repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuml/cpp/src/decisiontree | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/quantiles.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <iostream>
#include <memory>
#include <raft/core/handle.hpp>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <raft/core/nvtx.hpp>
#include "quantiles.h"
namespace ML {
namespace DT {
template <typename T>
__global__ void computeQuantilesKernel(
T* quantiles, int* n_bins, const T* sorted_data, const int max_n_bins, const int n_rows);
template <typename T>
auto computeQuantiles(
const raft::handle_t& handle, const T* data, int max_n_bins, int n_rows, int n_cols)
{
raft::common::nvtx::push_range("computeQuantiles");
auto stream = handle.get_stream();
size_t temp_storage_bytes = 0; // for device radix sort
rmm::device_uvector<T> sorted_column(n_rows, stream);
// acquire device vectors to store the quantiles + offsets
auto quantiles_array = std::make_shared<rmm::device_uvector<T>>(n_cols * max_n_bins, stream);
auto n_bins_array = std::make_shared<rmm::device_uvector<int>>(n_cols, stream);
// get temp_storage_bytes for sorting
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortKeys(
nullptr, temp_storage_bytes, data, sorted_column.data(), n_rows, 0, 8 * sizeof(T), stream));
// allocate total memory needed for parallelized sorting
rmm::device_uvector<char> d_temp_storage(temp_storage_bytes, stream);
for (int col = 0; col < n_cols; col++) {
raft::common::nvtx::push_range("sorting columns");
int col_offset = col * n_rows;
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortKeys((void*)(d_temp_storage.data()),
temp_storage_bytes,
data + col_offset,
sorted_column.data(),
n_rows,
0,
8 * sizeof(T),
stream));
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
raft::common::nvtx::pop_range(); // sorting columns
int n_blocks = 1;
int n_threads = min(1024, max_n_bins);
int quantile_offset = col * max_n_bins;
int bins_offset = col;
raft::common::nvtx::push_range("computeQuantilesKernel @quantile.cuh");
computeQuantilesKernel<<<n_blocks, n_threads, 0, stream>>>(
quantiles_array->data() + quantile_offset,
n_bins_array->data() + bins_offset,
sorted_column.data(),
max_n_bins,
n_rows);
RAFT_CUDA_TRY(cudaStreamSynchronize(handle.get_stream()));
RAFT_CUDA_TRY(cudaGetLastError());
raft::common::nvtx::pop_range(); // computeQuatilesKernel
}
// encapsulate the device pointers under a Quantiles struct
Quantiles<T, int> quantiles;
quantiles.quantiles_array = quantiles_array->data();
quantiles.n_bins_array = n_bins_array->data();
raft::common::nvtx::pop_range(); // computeQuantiles
return std::make_tuple(quantiles, quantiles_array, n_bins_array);
}
} // namespace DT
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/bins.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
namespace ML {
namespace DT {
struct CountBin {
int x;
CountBin(CountBin const&) = default;
HDI CountBin(int x_) : x(x_) {}
HDI CountBin() : x(0) {}
DI static void IncrementHistogram(CountBin* hist, int n_bins, int b, int label)
{
auto offset = label * n_bins + b;
CountBin::AtomicAdd(hist + offset, {1});
}
DI static void AtomicAdd(CountBin* address, CountBin val) { atomicAdd(&address->x, val.x); }
HDI CountBin& operator+=(const CountBin& b)
{
x += b.x;
return *this;
}
HDI CountBin operator+(CountBin b) const
{
b += *this;
return b;
}
};
struct AggregateBin {
double label_sum;
int count;
AggregateBin(AggregateBin const&) = default;
HDI AggregateBin() : label_sum(0.0), count(0) {}
HDI AggregateBin(double label_sum, int count) : label_sum(label_sum), count(count) {}
DI static void IncrementHistogram(AggregateBin* hist, int n_bins, int b, double label)
{
AggregateBin::AtomicAdd(hist + b, {label, 1});
}
DI static void AtomicAdd(AggregateBin* address, AggregateBin val)
{
atomicAdd(&address->label_sum, val.label_sum);
atomicAdd(&address->count, val.count);
}
HDI AggregateBin& operator+=(const AggregateBin& b)
{
label_sum += b.label_sum;
count += b.count;
return *this;
}
HDI AggregateBin operator+(AggregateBin b) const
{
b += *this;
return b;
}
};
} // namespace DT
} // namespace ML | 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/gini-double.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = double;
using _LabelT = int;
using _IdxT = int;
using _ObjectiveT = GiniObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = CountBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/builder_kernels.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../bins.cuh"
#include "../objectives.cuh"
#include "../quantiles.h"
#include <raft/random/rng.cuh>
#include <cub/cub.cuh>
namespace ML {
namespace DT {
// The range of instances belonging to a particular node
// This structure refers to a range in the device array dataset.row_ids
struct InstanceRange {
std::size_t begin;
std::size_t count;
};
struct NodeWorkItem {
size_t idx; // Index of the work item in the tree
int depth;
InstanceRange instances;
};
/**
* This struct has information about workload of a single threadblock of
* computeSplit kernels of classification and regression
*/
template <typename IdxT>
struct WorkloadInfo {
IdxT nodeid; // Node in the batch on which the threadblock needs to work
IdxT large_nodeid; // counts only large nodes (nodes that require more than one block along x-dim
// for histogram calculation)
IdxT offset_blockid; // Offset threadblock id among all the blocks that are
// working on this node
IdxT num_blocks; // Total number of blocks that are working on the node
};
template <typename SplitT, typename DataT, typename IdxT>
HDI bool SplitNotValid(const SplitT& split,
DataT min_impurity_decrease,
IdxT min_samples_leaf,
std::size_t num_rows)
{
return split.best_metric_val <= min_impurity_decrease || split.nLeft < min_samples_leaf ||
(IdxT(num_rows) - split.nLeft) < min_samples_leaf;
}
/* Returns 'dataset' rounded up to a correctly-aligned pointer of type OutT* */
template <typename OutT, typename InT>
DI OutT* alignPointer(InT dataset)
{
return reinterpret_cast<OutT*>(raft::alignTo(reinterpret_cast<size_t>(dataset), sizeof(OutT)));
}
template <typename DataT, typename LabelT, typename IdxT, int TPB>
__global__ void nodeSplitKernel(const IdxT max_depth,
const IdxT min_samples_leaf,
const IdxT min_samples_split,
const IdxT max_leaves,
const DataT min_impurity_decrease,
const Dataset<DataT, LabelT, IdxT> dataset,
const NodeWorkItem* work_items,
const Split<DataT, IdxT>* splits);
template <typename DatasetT, typename NodeT, typename ObjectiveT, typename DataT>
__global__ void leafKernel(ObjectiveT objective,
DatasetT dataset,
const NodeT* tree,
const InstanceRange* instance_ranges,
DataT* leaves);
// 32-bit FNV1a hash
// Reference: http://www.isthe.com/chongo/tech/comp/fnv/index.html
const uint32_t fnv1a32_prime = uint32_t(16777619);
const uint32_t fnv1a32_basis = uint32_t(2166136261);
HDI uint32_t fnv1a32(uint32_t hash, uint32_t txt)
{
hash ^= (txt >> 0) & 0xFF;
hash *= fnv1a32_prime;
hash ^= (txt >> 8) & 0xFF;
hash *= fnv1a32_prime;
hash ^= (txt >> 16) & 0xFF;
hash *= fnv1a32_prime;
hash ^= (txt >> 24) & 0xFF;
hash *= fnv1a32_prime;
return hash;
}
// returns the lowest index in `array` whose value is greater or equal to `element`
template <typename DataT, typename IdxT>
HDI IdxT lower_bound(DataT* array, IdxT len, DataT element)
{
IdxT start = 0;
IdxT end = len - 1;
IdxT mid;
while (start < end) {
mid = (start + end) / 2;
if (array[mid] < element) {
start = mid + 1;
} else {
end = mid;
}
}
return start;
}
template <typename IdxT>
struct CustomDifference {
__device__ IdxT operator()(const IdxT& lhs, const IdxT& rhs)
{
if (lhs == rhs)
return 0;
else
return 1;
}
};
/**
* @brief Generates 'k' unique samples of features from 'n' feature sample-space.
* Does this for each work-item (node), feeding a unique seed for each (treeid, nodeid
* (=blockIdx.x), threadIdx.x). Method used is a random, parallel, sampling with replacement of
* excess of 'k' samples (hence the name) and then eliminating the dupicates by ordering them. The
* excess number of samples (=`n_parallel_samples`) is calculated such that after ordering there is
* at least 'k' uniques.
*/
template <typename IdxT, int MAX_SAMPLES_PER_THREAD, int BLOCK_THREADS = 128>
__global__ void excess_sample_with_replacement_kernel(
IdxT* colids,
const NodeWorkItem* work_items,
size_t work_items_size,
IdxT treeid,
uint64_t seed,
size_t n /* total cols to sample from*/,
size_t k /* number of unique cols to sample */,
int n_parallel_samples /* number of cols to sample with replacement */)
{
if (blockIdx.x >= work_items_size) return;
const uint32_t nodeid = work_items[blockIdx.x].idx;
uint64_t subsequence(fnv1a32_basis);
subsequence = fnv1a32(subsequence, uint32_t(threadIdx.x));
subsequence = fnv1a32(subsequence, uint32_t(treeid));
subsequence = fnv1a32(subsequence, uint32_t(nodeid));
raft::random::PCGenerator gen(seed, subsequence, uint64_t(0));
raft::random::UniformIntDistParams<IdxT, uint64_t> uniform_int_dist_params;
uniform_int_dist_params.start = 0;
uniform_int_dist_params.end = n;
uniform_int_dist_params.diff =
uint64_t(uniform_int_dist_params.end - uniform_int_dist_params.start);
IdxT n_uniques = 0;
IdxT items[MAX_SAMPLES_PER_THREAD];
IdxT col_indices[MAX_SAMPLES_PER_THREAD];
IdxT mask[MAX_SAMPLES_PER_THREAD];
// populate this
for (int i = 0; i < MAX_SAMPLES_PER_THREAD; ++i)
mask[i] = 0;
do {
// blocked arrangement
for (int cta_sample_idx = MAX_SAMPLES_PER_THREAD * threadIdx.x, thread_local_sample_idx = 0;
thread_local_sample_idx < MAX_SAMPLES_PER_THREAD;
++cta_sample_idx, ++thread_local_sample_idx) {
// mask of the previous iteration, if exists, is re-used here
// so previously generated unique random numbers are used.
// newly generated random numbers may or may not duplicate the previously generated ones
// but this ensures some forward progress in order to generate at least 'k' unique random
// samples.
if (mask[thread_local_sample_idx] == 0 and cta_sample_idx < n_parallel_samples)
raft::random::custom_next(
gen, &items[thread_local_sample_idx], uniform_int_dist_params, IdxT(0), IdxT(0));
else if (mask[thread_local_sample_idx] ==
0) // indices that exceed `n_parallel_samples` will not generate
items[thread_local_sample_idx] = n - 1;
else
continue; // this case is for samples whose mask == 1 (saving previous iteration's random
// number generated)
}
// Specialize BlockRadixSort type for our thread block
typedef cub::BlockRadixSort<IdxT, BLOCK_THREADS, MAX_SAMPLES_PER_THREAD> BlockRadixSortT;
// BlockAdjacentDifference
typedef cub::BlockAdjacentDifference<IdxT, BLOCK_THREADS> BlockAdjacentDifferenceT;
// BlockScan
typedef cub::BlockScan<IdxT, BLOCK_THREADS> BlockScanT;
// Shared memory
__shared__ union TempStorage {
typename BlockRadixSortT::TempStorage sort;
typename BlockAdjacentDifferenceT::TempStorage diff;
typename BlockScanT::TempStorage scan;
} temp_storage;
// collectively sort items
BlockRadixSortT(temp_storage.sort).Sort(items);
__syncthreads();
// compute the mask
// compute the adjacent differences according to the functor
// TODO: Replace deprecated 'FlagHeads' with 'SubtractLeft' when it is available
BlockAdjacentDifferenceT(temp_storage.diff)
.SubtractLeft(items, mask, CustomDifference<IdxT>(), mask[0]);
__syncthreads();
// do a scan on the mask to get the indices for gathering
BlockScanT(temp_storage.scan).ExclusiveSum(mask, col_indices, n_uniques);
__syncthreads();
} while (n_uniques < k);
// write the items[] of only the ones with mask[]=1 to col[offset + col_idx[]]
IdxT col_offset = k * blockIdx.x;
for (int i = 0; i < MAX_SAMPLES_PER_THREAD; ++i) {
if (mask[i] and col_indices[i] < k) { colids[col_offset + col_indices[i]] = items[i]; }
}
}
// algo L of the reservoir sampling algorithm
/**
* @brief For each work item select 'k' features without replacement from 'n' features using algo-L.
* On exit each row of the colids array will contain k random integers from the [0..n-1] range.
*
* Each thread works on single row. The parameters work_items_size, treeid and seed are
* used to initialize a unique random seed for each work item.
*
* @param colids the generated random indices, size [work_items_size, k] row major layout
* @param work_items
* @param treeid
* @param seed
* @param n total cos to sample from
* @param k number of cols to sample
* algorithm of reservoir sampling. wiki :
* https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm
*/
template <typename IdxT>
__global__ void algo_L_sample_kernel(int* colids,
const NodeWorkItem* work_items,
size_t work_items_size,
IdxT treeid,
uint64_t seed,
size_t n /* total cols to sample from*/,
size_t k /* cols to sample */)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= work_items_size) return;
const uint32_t nodeid = work_items[tid].idx;
uint64_t subsequence = (uint64_t(treeid) << 32) | uint64_t(nodeid);
raft::random::PCGenerator gen(seed, subsequence, uint64_t(0));
raft::random::UniformIntDistParams<IdxT, uint64_t> uniform_int_dist_params;
uniform_int_dist_params.start = 0;
uniform_int_dist_params.end = k;
uniform_int_dist_params.diff =
uint64_t(uniform_int_dist_params.end - uniform_int_dist_params.start);
float fp_uniform_val;
IdxT int_uniform_val;
// fp_uniform_val will have a random value between 0 and 1
gen.next(fp_uniform_val);
double W = raft::myExp(raft::myLog(fp_uniform_val) / k);
size_t col(0);
// initially fill the reservoir array in increasing order of cols till k
while (1) {
colids[tid * k + col] = col;
if (col == k - 1)
break;
else
++col;
}
// randomly sample from a geometric distribution
while (col < n) {
// fp_uniform_val will have a random value between 0 and 1
gen.next(fp_uniform_val);
col += static_cast<int>(raft::myLog(fp_uniform_val) / raft::myLog(1 - W)) + 1;
if (col < n) {
// int_uniform_val will now have a random value between 0...k
raft::random::custom_next(gen, &int_uniform_val, uniform_int_dist_params, IdxT(0), IdxT(0));
colids[tid * k + int_uniform_val] = col; // the bad memory coalescing here is hidden
// fp_uniform_val will have a random value between 0 and 1
gen.next(fp_uniform_val);
W *= raft::myExp(raft::myLog(fp_uniform_val) / k);
}
}
}
template <typename IdxT>
__global__ void adaptive_sample_kernel(int* colids,
const NodeWorkItem* work_items,
size_t work_items_size,
IdxT treeid,
uint64_t seed,
int N,
int M)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= work_items_size) return;
const uint32_t nodeid = work_items[tid].idx;
uint64_t subsequence = (uint64_t(treeid) << 32) | uint64_t(nodeid);
raft::random::PCGenerator gen(seed, subsequence, uint64_t(0));
int selected_count = 0;
for (int i = 0; i < N; i++) {
uint32_t toss = 0;
gen.next(toss);
uint64_t lhs = uint64_t(M - selected_count);
lhs <<= 32;
uint64_t rhs = uint64_t(toss) * (N - i);
if (lhs > rhs) {
colids[tid * M + selected_count] = i;
selected_count++;
if (selected_count == M) break;
}
}
}
template <typename DataT,
typename LabelT,
typename IdxT,
int TPB,
typename ObjectiveT,
typename BinT>
__global__ void computeSplitKernel(BinT* histograms,
IdxT n_bins,
IdxT max_depth,
IdxT min_samples_split,
IdxT max_leaves,
const Dataset<DataT, LabelT, IdxT> dataset,
const Quantiles<DataT, IdxT> quantiles,
const NodeWorkItem* work_items,
IdxT colStart,
const IdxT* colids,
int* done_count,
int* mutex,
volatile Split<DataT, IdxT>* splits,
ObjectiveT objective,
IdxT treeid,
const WorkloadInfo<IdxT>* workload_info,
uint64_t seed);
} // namespace DT
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/entropy-double.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = double;
using _LabelT = int;
using _IdxT = int;
using _ObjectiveT = EntropyObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = CountBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/quantiles.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../quantiles.cuh"
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace ML {
namespace DT {
template <typename T>
__global__ void computeQuantilesKernel(
T* quantiles, int* n_bins, const T* sorted_data, const int max_n_bins, const int n_rows)
{
double bin_width = static_cast<double>(n_rows) / max_n_bins;
for (int bin = threadIdx.x; bin < max_n_bins; bin += blockDim.x) {
// get index by interpolation
int idx = int(round((bin + 1) * bin_width)) - 1;
idx = min(max(0, idx), n_rows - 1);
quantiles[bin] = sorted_data[idx];
}
__syncthreads();
if (threadIdx.x == 0) {
// make quantiles unique, in-place
// thrust::seq to explicitly disable cuda dynamic parallelism here
auto new_last = thrust::unique(thrust::seq, quantiles, quantiles + max_n_bins);
// get the unique count
*n_bins = new_last - quantiles;
}
__syncthreads();
return;
}
// instantiation
template __global__ void computeQuantilesKernel<float>(
float* quantiles, int* n_bins, const float* sorted_data, const int max_n_bins, const int n_rows);
template __global__ void computeQuantilesKernel<double>(double* quantiles,
int* n_bins,
const double* sorted_data,
const int max_n_bins,
const int n_rows);
} // end namespace DT
} // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/inverse_gaussian-float.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = float;
using _LabelT = float;
using _IdxT = int;
using _ObjectiveT = InverseGaussianObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/inverse_gaussian-double.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = double;
using _LabelT = double;
using _IdxT = int;
using _ObjectiveT = InverseGaussianObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/builder_kernels_impl.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdio>
#include <common/grid_sync.cuh>
#include <cub/cub.cuh>
#include <raft/util/cuda_utils.cuh>
#include <thrust/binary_search.h>
#include "builder_kernels.cuh"
namespace ML {
namespace DT {
static constexpr int TPB_DEFAULT = 128;
/**
* @brief Partition the samples to left/right nodes based on the best split
* @return the position of the left child node in the nodes list. However, this
* value is valid only for threadIdx.x == 0.
* @note this should be called by only one block from all participating blocks
* 'smem' should be at least of size `sizeof(IdxT) * TPB * 2`
*/
template <typename DataT, typename LabelT, typename IdxT, int TPB>
DI void partitionSamples(const Dataset<DataT, LabelT, IdxT>& dataset,
const Split<DataT, IdxT>& split,
const NodeWorkItem& work_item,
char* smem)
{
typedef cub::BlockScan<int, TPB> BlockScanT;
__shared__ typename BlockScanT::TempStorage temp1, temp2;
volatile auto* row_ids = reinterpret_cast<volatile IdxT*>(dataset.row_ids);
// for compaction
size_t smemSize = sizeof(IdxT) * TPB;
auto* lcomp = reinterpret_cast<IdxT*>(smem);
auto* rcomp = reinterpret_cast<IdxT*>(smem + smemSize);
auto range_start = work_item.instances.begin;
auto range_len = work_item.instances.count;
auto* col = dataset.data + split.colid * std::size_t(dataset.M);
auto loffset = range_start, part = loffset + split.nLeft, roffset = part;
auto end = range_start + range_len;
int lflag = 0, rflag = 0, llen = 0, rlen = 0, minlen = 0;
auto tid = threadIdx.x;
while (loffset < part && roffset < end) {
// find the samples in the left that belong to right and vice-versa
auto loff = loffset + tid, roff = roffset + tid;
if (llen == minlen) lflag = loff < part ? col[row_ids[loff]] > split.quesval : 0;
if (rlen == minlen) rflag = roff < end ? col[row_ids[roff]] <= split.quesval : 0;
// scan to compute the locations for each 'misfit' in the two partitions
int lidx, ridx;
BlockScanT(temp1).ExclusiveSum(lflag, lidx, llen);
BlockScanT(temp2).ExclusiveSum(rflag, ridx, rlen);
__syncthreads();
minlen = llen < rlen ? llen : rlen;
// compaction to figure out the right locations to swap
if (lflag) lcomp[lidx] = loff;
if (rflag) rcomp[ridx] = roff;
__syncthreads();
// reset the appropriate flags for the longer of the two
if (lidx < minlen) lflag = 0;
if (ridx < minlen) rflag = 0;
if (llen == minlen) loffset += TPB;
if (rlen == minlen) roffset += TPB;
// swap the 'misfit's
if (tid < minlen) {
auto a = row_ids[lcomp[tid]];
auto b = row_ids[rcomp[tid]];
row_ids[lcomp[tid]] = b;
row_ids[rcomp[tid]] = a;
}
}
}
template <typename DataT, typename LabelT, typename IdxT, int TPB>
__global__ void nodeSplitKernel(const IdxT max_depth,
const IdxT min_samples_leaf,
const IdxT min_samples_split,
const IdxT max_leaves,
const DataT min_impurity_decrease,
const Dataset<DataT, LabelT, IdxT> dataset,
const NodeWorkItem* work_items,
const Split<DataT, IdxT>* splits)
{
extern __shared__ char smem[];
const auto work_item = work_items[blockIdx.x];
const auto split = splits[blockIdx.x];
if (SplitNotValid(
split, min_impurity_decrease, min_samples_leaf, IdxT(work_item.instances.count))) {
return;
}
partitionSamples<DataT, LabelT, IdxT, TPB>(dataset, split, work_item, (char*)smem);
}
template <typename DatasetT, typename NodeT, typename ObjectiveT, typename DataT>
__global__ void leafKernel(ObjectiveT objective,
DatasetT dataset,
const NodeT* tree,
const InstanceRange* instance_ranges,
DataT* leaves)
{
using BinT = typename ObjectiveT::BinT;
extern __shared__ char shared_memory[];
auto histogram = reinterpret_cast<BinT*>(shared_memory);
auto node_id = blockIdx.x;
auto& node = tree[node_id];
auto range = instance_ranges[node_id];
if (!node.IsLeaf()) return;
auto tid = threadIdx.x;
for (int i = tid; i < dataset.num_outputs; i += blockDim.x) {
histogram[i] = BinT();
}
__syncthreads();
for (auto i = range.begin + tid; i < range.begin + range.count; i += blockDim.x) {
auto label = dataset.labels[dataset.row_ids[i]];
BinT::IncrementHistogram(histogram, 1, 0, label);
}
__syncthreads();
if (tid == 0) {
ObjectiveT::SetLeafVector(
histogram, dataset.num_outputs, leaves + dataset.num_outputs * node_id);
}
}
/**
* @brief For every threadblock, converts the smem pdf-histogram to
* cdf-histogram inplace using inclusive block-sum-scan and returns
* the total_sum
* @return The total sum aggregated over the sumscan,
* as well as the modified cdf-histogram pointer
*/
template <typename BinT, typename IdxT, int TPB>
DI BinT pdf_to_cdf(BinT* shared_histogram, IdxT n_bins)
{
// Blockscan instance preparation
typedef cub::BlockScan<BinT, TPB> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// variable to accumulate aggregate of sumscans of previous iterations
BinT total_aggregate = BinT();
for (IdxT tix = threadIdx.x; tix < raft::ceildiv(n_bins, TPB) * TPB; tix += blockDim.x) {
BinT result;
BinT block_aggregate;
BinT element = tix < n_bins ? shared_histogram[tix] : BinT();
BlockScan(temp_storage).InclusiveSum(element, result, block_aggregate);
__syncthreads();
if (tix < n_bins) { shared_histogram[tix] = result + total_aggregate; }
total_aggregate += block_aggregate;
}
// return the total sum
return total_aggregate;
}
template <typename DataT,
typename LabelT,
typename IdxT,
int TPB,
typename ObjectiveT,
typename BinT>
__global__ void computeSplitKernel(BinT* histograms,
IdxT max_n_bins,
IdxT max_depth,
IdxT min_samples_split,
IdxT max_leaves,
const Dataset<DataT, LabelT, IdxT> dataset,
const Quantiles<DataT, IdxT> quantiles,
const NodeWorkItem* work_items,
IdxT colStart,
const IdxT* colids,
int* done_count,
int* mutex,
volatile Split<DataT, IdxT>* splits,
ObjectiveT objective,
IdxT treeid,
const WorkloadInfo<IdxT>* workload_info,
uint64_t seed)
{
// dynamic shared memory
extern __shared__ char smem[];
// Read workload info for this block
WorkloadInfo<IdxT> workload_info_cta = workload_info[blockIdx.x];
IdxT nid = workload_info_cta.nodeid;
IdxT large_nid = workload_info_cta.large_nodeid;
const auto work_item = work_items[nid];
auto range_start = work_item.instances.begin;
auto range_len = work_item.instances.count;
IdxT offset_blockid = workload_info_cta.offset_blockid;
IdxT num_blocks = workload_info_cta.num_blocks;
// obtaining the feature to test split on
IdxT col;
if (dataset.n_sampled_cols == dataset.N) {
col = colStart + blockIdx.y;
} else {
IdxT colIndex = colStart + blockIdx.y;
col = colids[nid * dataset.n_sampled_cols + colIndex];
}
// getting the n_bins for that feature
int n_bins = quantiles.n_bins_array[col];
auto end = range_start + range_len;
auto shared_histogram_len = n_bins * objective.NumClasses();
auto* shared_histogram = alignPointer<BinT>(smem);
auto* shared_quantiles = alignPointer<DataT>(shared_histogram + shared_histogram_len);
auto* shared_done = alignPointer<int>(shared_quantiles + n_bins);
IdxT stride = blockDim.x * num_blocks;
IdxT tid = threadIdx.x + offset_blockid * blockDim.x;
// populating shared memory with initial values
for (IdxT i = threadIdx.x; i < shared_histogram_len; i += blockDim.x)
shared_histogram[i] = BinT();
for (IdxT b = threadIdx.x; b < n_bins; b += blockDim.x)
shared_quantiles[b] = quantiles.quantiles_array[max_n_bins * col + b];
// synchronizing above changes across block
__syncthreads();
// compute pdf shared histogram for all bins for all classes in shared mem
// Must be 64 bit - can easily grow larger than a 32 bit int
std::size_t col_offset = std::size_t(col) * dataset.M;
for (auto i = range_start + tid; i < end; i += stride) {
// each thread works over a data point and strides to the next
auto row = dataset.row_ids[i];
auto data = dataset.data[row + col_offset];
auto label = dataset.labels[row];
// `start` is lowest index such that data <= shared_quantiles[start]
IdxT start = lower_bound(shared_quantiles, n_bins, data);
// ++shared_histogram[start]
BinT::IncrementHistogram(shared_histogram, n_bins, start, label);
}
// synchronizing above changes across block
__syncthreads();
if (num_blocks > 1) {
// update the corresponding global location
auto histograms_offset =
((large_nid * gridDim.y) + blockIdx.y) * max_n_bins * objective.NumClasses();
for (IdxT i = threadIdx.x; i < shared_histogram_len; i += blockDim.x) {
BinT::AtomicAdd(histograms + histograms_offset + i, shared_histogram[i]);
}
__threadfence(); // for commit guarantee
__syncthreads();
// last threadblock will go ahead and compute the best split
bool last = MLCommon::signalDone(
done_count + nid * gridDim.y + blockIdx.y, num_blocks, offset_blockid == 0, shared_done);
// if not the last threadblock, exit
if (!last) return;
// store the complete global histogram in shared memory of last block
for (IdxT i = threadIdx.x; i < shared_histogram_len; i += blockDim.x)
shared_histogram[i] = histograms[histograms_offset + i];
__syncthreads();
}
// PDF to CDF inplace in `shared_histogram`
for (IdxT c = 0; c < objective.NumClasses(); ++c) {
// left to right scan operation for scanning
// "lesser-than-or-equal" counts
BinT total_sum = pdf_to_cdf<BinT, IdxT, TPB>(shared_histogram + n_bins * c, n_bins);
// now, `shared_histogram[n_bins * c + i]` will have count of datapoints of class `c`
// that are less than or equal to `shared_quantiles[i]`.
}
__syncthreads();
// calculate the best candidate bins (one for each thread in the block) in current feature and
// corresponding information gain for splitting
Split<DataT, IdxT> sp =
objective.Gain(shared_histogram, shared_quantiles, col, range_len, n_bins);
__syncthreads();
// calculate best bins among candidate bins per feature using warp reduce
// then atomically update across features to get best split per node
// (in split[nid])
sp.evalBestSplit(smem, splits + nid, mutex + nid);
}
// partial template instantiation to avoid code-duplication
template __global__ void nodeSplitKernel<_DataT, _LabelT, _IdxT, TPB_DEFAULT>(
const _IdxT max_depth,
const _IdxT min_samples_leaf,
const _IdxT min_samples_split,
const _IdxT max_leaves,
const _DataT min_impurity_decrease,
const Dataset<_DataT, _LabelT, _IdxT> dataset,
const NodeWorkItem* work_items,
const Split<_DataT, _IdxT>* splits);
template __global__ void leafKernel<_DatasetT, _NodeT, _ObjectiveT, _DataT>(
_ObjectiveT objective,
_DatasetT dataset,
const _NodeT* tree,
const InstanceRange* instance_ranges,
_DataT* leaves);
template __global__ void
computeSplitKernel<_DataT, _LabelT, _IdxT, TPB_DEFAULT, _ObjectiveT, _BinT>(
_BinT* histograms,
_IdxT n_bins,
_IdxT max_depth,
_IdxT min_samples_split,
_IdxT max_leaves,
const Dataset<_DataT, _LabelT, _IdxT> dataset,
const Quantiles<_DataT, _IdxT> quantiles,
const NodeWorkItem* work_items,
_IdxT colStart,
const _IdxT* colids,
int* done_count,
int* mutex,
volatile Split<_DataT, _IdxT>* splits,
_ObjectiveT objective,
_IdxT treeid,
const WorkloadInfo<_IdxT>* workload_info,
uint64_t seed);
} // namespace DT
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/gini-float.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = float;
using _LabelT = int;
using _IdxT = int;
using _ObjectiveT = GiniObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = CountBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/gamma-double.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = double;
using _LabelT = double;
using _IdxT = int;
using _ObjectiveT = GammaObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/poisson-float.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = float;
using _LabelT = float;
using _IdxT = int;
using _ObjectiveT = PoissonObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/mse-float.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = float;
using _LabelT = float;
using _IdxT = int;
using _ObjectiveT = MSEObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/poisson-double.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = double;
using _LabelT = double;
using _IdxT = int;
using _ObjectiveT = PoissonObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/mse-double.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = double;
using _LabelT = double;
using _IdxT = int;
using _ObjectiveT = MSEObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/entropy-float.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = float;
using _LabelT = int;
using _IdxT = int;
using _ObjectiveT = EntropyObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = CountBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo | rapidsai_public_repos/cuml/cpp/src/decisiontree/batched-levelalgo/kernels/gamma-float.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tree/flatnode.h>
#include "../bins.cuh"
#include "../objectives.cuh"
namespace ML {
namespace DT {
using _DataT = float;
using _LabelT = float;
using _IdxT = int;
using _ObjectiveT = GammaObjectiveFunction<_DataT, _LabelT, _IdxT>;
using _BinT = AggregateBin;
using _DatasetT = Dataset<_DataT, _LabelT, _IdxT>;
using _NodeT = SparseTreeNode<_DataT, _LabelT, _IdxT>;
} // namespace DT
} // namespace ML
#include "builder_kernels_impl.cuh"
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/fil/internal.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file internal.cuh cuML-internal interface to Forest Inference Library. */
#pragma once
#include <bitset>
#include <cstdint>
#include <cuml/fil/fil.h>
#include <iostream>
#include <numeric>
#include <raft/core/error.hpp>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <utility>
#include <vector>
namespace raft {
class handle_t;
}
// needed for node_traits<...>
namespace treelite {
template <typename, typename>
struct ModelImpl;
}
namespace ML {
namespace fil {
const int BITS_PER_BYTE = 8;
/// modpow2 returns a % b == a % pow(2, log2_b)
__host__ __device__ __forceinline__ int modpow2(int a, int log2_b)
{
return a & ((1 << log2_b) - 1);
}
/**
* output_t are flags that define the output produced by the FIL predictor; a
* valid output_t values consists of the following, combined using '|' (bitwise
* or), which define stages, which operation in the next stage applied to the
* output of the previous stage:
* - one of RAW or AVG, indicating how to combine individual tree outputs into the forest output
* - optional SIGMOID for applying the sigmoid transform
* - optional CLASS, to output the class label
*/
enum output_t {
/** raw output: the sum of the tree outputs; use for GBM models for
regression, or for binary classification for the value before the
transformation; note that this value is 0, and may be omitted
when combined with other flags */
RAW = 0x0,
/** average output: divide the sum of the tree outputs by the number of trees
before further transformations; use for random forests for regression
and binary classification for the probability */
AVG = 0x1,
/** sigmoid transformation: apply 1/(1+exp(-x)) to the sum or average of tree
outputs; use for GBM binary classification models for probability */
SIGMOID = 0x10,
/** output class label: either apply threshold to the output of the previous stage (for binary
classification), or select the class with the most votes to get the class label (for
multi-class classification). */
CLASS = 0x100,
/** softmax: apply softmax to class margins when predicting probability
in multiclass classification. Softmax is made robust by subtracting max
from margins before applying. */
SOFTMAX = 0x1000,
SIGMOID_CLASS = SIGMOID | CLASS,
AVG_CLASS = AVG | CLASS,
AVG_SIGMOID_CLASS = AVG | SIGMOID | CLASS,
AVG_SOFTMAX = AVG | SOFTMAX,
AVG_CLASS_SOFTMAX = AVG | CLASS | SOFTMAX,
ALL_SET = AVG | SIGMOID | CLASS | SOFTMAX
};
/** val_t is the payload within a FIL leaf */
template <typename real_t>
union val_t {
/** floating-point threshold value for parent node or output value
(e.g. class probability or regression summand) for leaf node */
real_t f = NAN;
/** class label, leaf vector index or categorical node set offset */
int idx;
};
/** base_node contains common implementation details for dense and sparse nodes */
template <typename real_t>
struct alignas(2 * sizeof(real_t)) base_node {
using real_type = real_t; // floating-point type
/** val, for parent nodes, is a threshold or category list offset. For leaf
nodes, it is the tree prediction (see see leaf_output_t<leaf_algo_t>::T) */
val_t<real_t> val;
/** bits encode various information about the node, with the exact nature of
this information depending on the node type; it includes e.g. whether the
node is a leaf or inner node, and for inner nodes, additional information,
e.g. the default direction, feature id or child index */
int bits;
static const int IS_LEAF_OFFSET = 31;
static const int IS_LEAF_MASK = 1 << IS_LEAF_OFFSET;
static const int DEF_LEFT_OFFSET = IS_LEAF_OFFSET - 1;
static const int DEF_LEFT_MASK = 1 << DEF_LEFT_OFFSET;
static const int IS_CATEGORICAL_OFFSET = DEF_LEFT_OFFSET - 1;
static const int IS_CATEGORICAL_MASK = 1 << IS_CATEGORICAL_OFFSET;
static const int FID_MASK = (1 << IS_CATEGORICAL_OFFSET) - 1;
template <class o_t>
__host__ __device__ o_t output() const
{
static_assert(
std::is_same_v<o_t, int> || std::is_same_v<o_t, real_t> || std::is_same_v<o_t, val_t<real_t>>,
"invalid o_t type parameter in node.output()");
if constexpr (std::is_same_v<o_t, int>) {
return val.idx;
} else if constexpr (std::is_same_v<o_t, real_t>) {
return val.f;
} else if constexpr (std::is_same_v<o_t, val_t<real_t>>) {
return val;
}
// control flow should not reach here
return o_t();
}
__host__ __device__ int set() const { return val.idx; }
__host__ __device__ real_t thresh() const { return val.f; }
__host__ __device__ val_t<real_t> split() const { return val; }
__host__ __device__ int fid() const { return bits & FID_MASK; }
__host__ __device__ bool def_left() const { return bits & DEF_LEFT_MASK; }
__host__ __device__ bool is_leaf() const { return bits & IS_LEAF_MASK; }
__host__ __device__ bool is_categorical() const { return bits & IS_CATEGORICAL_MASK; }
__host__ __device__ base_node() : val{}, bits(0) {}
base_node(val_t<real_t> output,
val_t<real_t> split,
int fid,
bool def_left,
bool is_leaf,
bool is_categorical)
{
RAFT_EXPECTS((fid & FID_MASK) == fid, "internal error: feature ID doesn't fit into base_node");
bits = (fid & FID_MASK) | (def_left ? DEF_LEFT_MASK : 0) | (is_leaf ? IS_LEAF_MASK : 0) |
(is_categorical ? IS_CATEGORICAL_MASK : 0);
if (is_leaf)
val = output;
else
val = split;
}
};
/** dense_node is a single node of a dense forest */
template <typename real_t>
struct alignas(2 * sizeof(real_t)) dense_node : base_node<real_t> {
dense_node() = default;
/// ignoring left_index, this is useful to unify import from treelite
dense_node(val_t<real_t> output,
val_t<real_t> split,
int fid,
bool def_left,
bool is_leaf,
bool is_categorical,
int left_index = -1)
: base_node<real_t>(output, split, fid, def_left, is_leaf, is_categorical)
{
}
/** index of the left child, where curr is the index of the current node */
__host__ __device__ int left(int curr) const { return 2 * curr + 1; }
};
/** sparse_node16 is a 16-byte node in a sparse forest */
template <typename real_t>
struct alignas(16) sparse_node16 : base_node<real_t> {
int left_idx;
__host__ __device__ sparse_node16() : left_idx(0) {}
sparse_node16(val_t<real_t> output,
val_t<real_t> split,
int fid,
bool def_left,
bool is_leaf,
bool is_categorical,
int left_index)
: base_node<real_t>(output, split, fid, def_left, is_leaf, is_categorical), left_idx(left_index)
{
}
__host__ __device__ int left_index() const { return left_idx; }
/** index of the left child, where curr is the index of the current node */
__host__ __device__ int left(int curr) const { return left_idx; }
};
/** sparse_node8 is a node of reduced size (8 bytes) in a sparse forest */
struct alignas(8) sparse_node8 : base_node<float> {
static const int LEFT_NUM_BITS = 16;
static const int FID_NUM_BITS = IS_CATEGORICAL_OFFSET - LEFT_NUM_BITS;
static const int LEFT_OFFSET = FID_NUM_BITS;
static const int FID_MASK = (1 << FID_NUM_BITS) - 1;
static const int LEFT_MASK = ((1 << LEFT_NUM_BITS) - 1) << LEFT_OFFSET;
__host__ __device__ int fid() const { return bits & FID_MASK; }
__host__ __device__ int left_index() const { return (bits & LEFT_MASK) >> LEFT_OFFSET; }
sparse_node8() = default;
sparse_node8(val_t<float> output,
val_t<float> split,
int fid,
bool def_left,
bool is_leaf,
bool is_categorical,
int left_index)
: base_node<float>(output, split, fid, def_left, is_leaf, is_categorical)
{
RAFT_EXPECTS((fid & FID_MASK) == fid,
"internal error: feature ID doesn't fit into sparse_node8");
RAFT_EXPECTS(((left_index << LEFT_OFFSET) & LEFT_MASK) == (left_index << LEFT_OFFSET),
"internal error: left child index doesn't fit into sparse_node8");
bits |= left_index << LEFT_OFFSET;
}
/** index of the left child, where curr is the index of the current node */
__host__ __device__ int left(int curr) const { return left_index(); }
};
template <typename node_t>
struct storage;
template <typename node_t>
struct dense_forest;
template <typename node_t>
struct sparse_forest;
template <typename node_t>
struct node_traits {
using real_type = typename node_t::real_type;
using storage = ML::fil::storage<node_t>;
using forest = sparse_forest<node_t>;
static const bool IS_DENSE = false;
static constexpr storage_type_t storage_type_enum =
std::is_same_v<sparse_node16<real_type>, node_t> ? SPARSE : SPARSE8;
template <typename threshold_t, typename leaf_t>
static void check(const treelite::ModelImpl<threshold_t, leaf_t>& model);
};
template <typename real_t>
struct node_traits<dense_node<real_t>> {
using storage = storage<dense_node<real_t>>;
using forest = dense_forest<dense_node<real_t>>;
static const bool IS_DENSE = true;
static const storage_type_t storage_type_enum = DENSE;
template <typename threshold_t, typename leaf_t>
static void check(const treelite::ModelImpl<threshold_t, leaf_t>& model)
{
}
};
/** leaf_algo_t describes what the leaves in a FIL forest store (predict)
and how FIL aggregates them into class margins/regression result/best class
**/
enum leaf_algo_t {
/** For iteration purposes */
MIN_LEAF_ALGO = 0,
/** storing a class probability or regression summand. We add all margins
together and determine regression result or use threshold to determine
one of the two classes. **/
FLOAT_UNARY_BINARY = 0,
/** storing a class label. Trees vote on the resulting class.
Probabilities are just normalized votes. */
CATEGORICAL_LEAF = 1,
/** 1-vs-rest, or tree-per-class, where trees are assigned round-robin to
consecutive categories and predict a floating-point margin. Used in
Gradient Boosted Decision Trees. We sum margins for each group separately
**/
GROVE_PER_CLASS = 2,
/** 1-vs-rest, or tree-per-class, where trees are assigned round-robin to
consecutive categories and predict a floating-point margin. Used in
Gradient Boosted Decision Trees. We sum margins for each group separately
This is a more specific version of GROVE_PER_CLASS.
_FEW_CLASSES means fewer (or as many) classes than threads. **/
GROVE_PER_CLASS_FEW_CLASSES = 3,
/** 1-vs-rest, or tree-per-class, where trees are assigned round-robin to
consecutive categories and predict a floating-point margin. Used in
Gradient Boosted Decision Trees. We sum margins for each group separately
This is a more specific version of GROVE_PER_CLASS.
_MANY_CLASSES means more classes than threads. **/
GROVE_PER_CLASS_MANY_CLASSES = 4,
/** Leaf contains an index into a vector of class probabilities. **/
VECTOR_LEAF = 5,
// to be extended
MAX_LEAF_ALGO = 5
};
template <typename node_t>
struct tree;
template <leaf_algo_t leaf_algo, typename real_t>
struct leaf_output_t {};
template <typename real_t>
struct leaf_output_t<leaf_algo_t::FLOAT_UNARY_BINARY, real_t> {
typedef real_t T;
};
template <typename real_t>
struct leaf_output_t<leaf_algo_t::CATEGORICAL_LEAF, real_t> {
typedef int T;
};
template <typename real_t>
struct leaf_output_t<leaf_algo_t::GROVE_PER_CLASS_FEW_CLASSES, real_t> {
typedef real_t T;
};
template <typename real_t>
struct leaf_output_t<leaf_algo_t::GROVE_PER_CLASS_MANY_CLASSES, real_t> {
typedef real_t T;
};
template <typename real_t>
struct leaf_output_t<leaf_algo_t::VECTOR_LEAF, real_t> {
typedef int T;
};
/** forest_params_t are the trees to initialize the predictor */
struct forest_params_t {
// total number of nodes; ignored for dense forests
int num_nodes;
// maximum depth; ignored for sparse forests
int depth;
// ntrees is the number of trees
int num_trees;
// num_cols is the number of columns in the data
int num_cols;
// leaf_algo determines what the leaves store (predict)
leaf_algo_t leaf_algo;
// algo is the inference algorithm;
// sparse forests do not distinguish between NAIVE and TREE_REORG
algo_t algo;
// output is the desired output type
output_t output;
// threshold is used to for classification if leaf_algo == FLOAT_UNARY_BINARY && (output &
// OUTPUT_CLASS) != 0 && !predict_proba, and is ignored otherwise
double threshold;
// global_bias is added to the sum of tree predictions
// (after averaging, if it is used, but before any further transformations)
double global_bias;
// only used for CATEGORICAL_LEAF inference. since we're storing the
// labels in leaves instead of the whole vector, this keeps track
// of the number of classes
int num_classes;
// blocks_per_sm, if nonzero, works as a limit to improve cache hit rate for larger forests
// suggested values (if nonzero) are from 2 to 7
// if zero, launches ceildiv(num_rows, NITEMS) blocks
int blocks_per_sm;
// threads_per_tree determines how many threads work on a single tree
// at once inside a block (sharing trees means splitting input rows)
int threads_per_tree;
// n_items is how many input samples (items) any thread processes. If 0 is given,
// choose most (up to MAX_N_ITEMS) that fit into shared memory.
int n_items;
};
/// FIL_TPB is the number of threads per block to use with FIL kernels
const int FIL_TPB = 256;
// 1 << 24 is the largest integer representable exactly as a float.
// To avoid edge cases, 16'777'214 is the most FIL will use.
constexpr std::int32_t MAX_FIL_INT_FLOAT = (1 << 24) - 2;
__host__ __device__ __forceinline__ int fetch_bit(const uint8_t* array, uint32_t bit)
{
return (array[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)) & 1;
}
struct categorical_sets {
// arrays are const to use fast GPU read instructions by default
// arrays from each node ID are concatenated first, then from all categories
const uint8_t* bits = nullptr;
// number of matching categories FIL stores in the bit array, per feature ID
const float* fid_num_cats = nullptr;
std::size_t bits_size = 0;
// either 0 or num_cols. When 0, indicates intended empty array.
std::size_t fid_num_cats_size = 0;
__host__ __device__ __forceinline__ bool cats_present() const
{
// If this is constructed from cat_sets_owner, will return true; but false by default
// We have converted all empty categorical nodes to NAN-threshold numerical nodes.
return fid_num_cats != nullptr;
}
// set count is due to tree_idx + node_within_tree_idx are both ints, hence uint32_t result
template <typename node_t>
__host__ __device__ __forceinline__ int category_matches(
node_t node, typename node_t::real_type category) const
{
// standard boolean packing. This layout has better ILP
// node.set() is global across feature IDs and is an offset (as opposed
// to set number). If we run out of uint32_t and we have hundreds of
// features with similar categorical feature count, we may consider
// storing node ID within nodes with same feature ID and look up
// {.fid_num_cats, .first_node_offset} = ...[feature_id]
/* category < 0.0f or category > INT_MAX is equivalent to out-of-dictionary category
(not matching, branch left). -0.0f represents category 0.
If (float)(int)category != category, we will discard the fractional part.
E.g. 3.8f represents category 3 regardless of fid_num_cats value.
FIL will reject a model where an integer within [0, fid_num_cats] cannot be represented
precisely as a 32-bit float.
*/
using real_t = typename node_t::real_type;
return category < static_cast<real_t>(fid_num_cats[node.fid()]) && category >= real_t(0) &&
fetch_bit(bits + node.set(), static_cast<uint32_t>(static_cast<int>(category)));
}
static int sizeof_mask_from_num_cats(int num_cats)
{
return raft::ceildiv(num_cats, BITS_PER_BYTE);
}
int sizeof_mask(int feature_id) const
{
return sizeof_mask_from_num_cats(static_cast<int>(fid_num_cats[feature_id]));
}
};
// lets any tree determine a child index for a node in a generic fashion
// used in fil_test.cu fot its child_index() in CPU predicting
struct tree_base {
categorical_sets cat_sets;
template <bool CATS_SUPPORTED, typename node_t>
__host__ __device__ __forceinline__ int child_index(const node_t& node,
int node_idx,
typename node_t::real_type val) const
{
bool cond;
if (isnan(val)) {
cond = !node.def_left();
} else if (CATS_SUPPORTED && node.is_categorical()) {
cond = cat_sets.category_matches(node, val);
} else {
cond = val >= node.thresh();
}
return node.left(node_idx) + cond;
}
};
// -1 means no matching categories
struct cat_feature_counters {
int max_matching = -1;
int n_nodes = 0;
static cat_feature_counters combine(cat_feature_counters a, cat_feature_counters b)
{
return {.max_matching = std::max(a.max_matching, b.max_matching),
.n_nodes = a.n_nodes + b.n_nodes};
}
};
// used only during model import. For inference, trimmed down using cat_sets_owner::accessor()
// in internal.cuh, as opposed to fil_test.cu, because importing from treelite will require it
struct cat_sets_owner {
// arrays from each node ID are concatenated first, then from all categories
std::vector<uint8_t> bits;
// largest matching category in the model, per feature ID. uses int because GPU code can only fit
// int
std::vector<float> fid_num_cats;
// how many categorical nodes use a given feature id. Used for model shape string.
std::vector<std::size_t> n_nodes;
// per tree, size and offset of bit pool within the overall bit pool
std::vector<std::size_t> bit_pool_offsets;
categorical_sets accessor() const
{
return {
.bits = bits.data(),
.fid_num_cats = fid_num_cats.data(),
.bits_size = bits.size(),
.fid_num_cats_size = fid_num_cats.size(),
};
}
void consume_counters(const std::vector<cat_feature_counters>& counters)
{
for (cat_feature_counters cf : counters) {
fid_num_cats.push_back(static_cast<float>(cf.max_matching + 1));
n_nodes.push_back(cf.n_nodes);
}
}
void consume_bit_pool_sizes(const std::vector<std::size_t>& bit_pool_sizes)
{
bit_pool_offsets.push_back(0);
for (std::size_t i = 0; i < bit_pool_sizes.size() - 1; ++i) {
bit_pool_offsets.push_back(bit_pool_offsets.back() + bit_pool_sizes[i]);
}
bits.resize(bit_pool_offsets.back() + bit_pool_sizes.back());
}
cat_sets_owner() {}
cat_sets_owner(std::vector<uint8_t> bits_, std::vector<float> fid_num_cats_)
: bits(bits_), fid_num_cats(fid_num_cats_)
{
}
};
std::ostream& operator<<(std::ostream& os, const cat_sets_owner& cso);
struct cat_sets_device_owner {
// arrays from each node ID are concatenated first, then from all categories
rmm::device_uvector<uint8_t> bits;
// largest matching category in the model, per feature ID
rmm::device_uvector<float> fid_num_cats;
categorical_sets accessor() const
{
return {
.bits = bits.data(),
.fid_num_cats = fid_num_cats.data(),
.bits_size = bits.size(),
.fid_num_cats_size = fid_num_cats.size(),
};
}
cat_sets_device_owner(cudaStream_t stream) : bits(0, stream), fid_num_cats(0, stream) {}
cat_sets_device_owner(categorical_sets cat_sets, cudaStream_t stream)
: bits(cat_sets.bits_size, stream), fid_num_cats(cat_sets.fid_num_cats_size, stream)
{
ASSERT(bits.size() <= std::size_t(INT_MAX) + std::size_t(1),
"too many categories/categorical nodes: cannot store bits offset in node");
if (cat_sets.fid_num_cats_size > 0) {
ASSERT(cat_sets.fid_num_cats != nullptr, "internal error: cat_sets.fid_num_cats is nil");
RAFT_CUDA_TRY(cudaMemcpyAsync(fid_num_cats.data(),
cat_sets.fid_num_cats,
fid_num_cats.size() * sizeof(float),
cudaMemcpyDefault,
stream));
}
if (cat_sets.bits_size > 0) {
ASSERT(cat_sets.bits != nullptr, "internal error: cat_sets.bits is nil");
RAFT_CUDA_TRY(cudaMemcpyAsync(
bits.data(), cat_sets.bits, bits.size() * sizeof(uint8_t), cudaMemcpyDefault, stream));
}
}
void release()
{
bits.release();
fid_num_cats.release();
}
};
/** init uses params, trees and nodes to initialize the forest
* with nodes stored in pf
* @tparam fil_node_t node type to use with the forest;
* must be sparse_node16, sparse_node8 or dense_node
* @param h cuML handle used by this function
* @param pf pointer to where to store the newly created forest
* @param trees for sparse forests, indices of tree roots in the nodes array, of length
params->ntrees; ignored for dense forests
* @param nodes nodes for the forest, of length params->num_nodes for sparse
or (2**(params->depth + 1) - 1) * params->ntrees for dense forests
* @param params pointer to parameters used to initialize the forest
* @param vector_leaf optional vector leaves
*/
template <typename fil_node_t, typename real_t = typename fil_node_t::real_type>
void init(const raft::handle_t& h,
forest_t<real_t>* pf,
const categorical_sets& cat_sets,
const std::vector<real_t>& vector_leaf,
const int* trees,
const fil_node_t* nodes,
const forest_params_t* params);
struct predict_params;
} // namespace fil
static const int MAX_SHM_STD = 48 * 1024; // maximum architecture-independent size
std::string output2str(fil::output_t output);
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/fil/common.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file common.cuh Common GPU functionality */
#pragma once
#include <cub/cub.cuh>
#include <stdexcept>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <cuml/fil/fil.h>
#include <raft/util/cuda_utils.cuh>
#include "internal.cuh"
namespace ML {
namespace fil {
__host__ __device__ __forceinline__ int tree_num_nodes(int depth) { return (1 << (depth + 1)) - 1; }
__host__ __device__ __forceinline__ int forest_num_nodes(int num_trees, int depth)
{
return num_trees * tree_num_nodes(depth);
}
template <typename real_t>
struct storage_base {
categorical_sets sets_;
real_t* vector_leaf_;
bool cats_present() const { return sets_.cats_present(); }
};
/** represents a dense tree */
template <typename real_t>
struct tree<dense_node<real_t>> : tree_base {
using real_type = real_t;
__host__ __device__ tree(categorical_sets cat_sets, dense_node<real_t>* nodes, int node_pitch)
: tree_base{cat_sets}, nodes_(nodes), node_pitch_(node_pitch)
{
}
__host__ __device__ const dense_node<real_t>& operator[](int i) const
{
return nodes_[i * node_pitch_];
}
dense_node<real_t>* nodes_ = nullptr;
int node_pitch_ = 0;
};
/** partial specialization of storage. Stores the forest on GPU as a collection of dense nodes */
template <typename real_t>
struct storage<dense_node<real_t>> : storage_base<real_t> {
using real_type = real_t;
using node_t = dense_node<real_t>;
__host__ __device__ storage(categorical_sets cat_sets,
real_t* vector_leaf,
node_t* nodes,
int num_trees,
int tree_stride,
int node_pitch)
: storage_base<real_t>{cat_sets, vector_leaf},
nodes_(nodes),
num_trees_(num_trees),
tree_stride_(tree_stride),
node_pitch_(node_pitch)
{
}
__host__ __device__ int num_trees() const { return num_trees_; }
__host__ __device__ tree<node_t> operator[](int i) const
{
// sets_ is a dependent name (in template sense)
return tree<node_t>(this->sets_, nodes_ + i * tree_stride_, node_pitch_);
}
node_t* nodes_ = nullptr;
int num_trees_ = 0;
int tree_stride_ = 0;
int node_pitch_ = 0;
};
/** sparse tree */
template <typename node_t>
struct tree : tree_base {
using real_type = typename node_t::real_type;
__host__ __device__ tree(categorical_sets cat_sets, node_t* nodes)
: tree_base{cat_sets}, nodes_(nodes)
{
}
__host__ __device__ const node_t& operator[](int i) const { return nodes_[i]; }
node_t* nodes_ = nullptr;
};
/** storage stores the forest on GPU as a collection of sparse nodes */
template <typename node_t_>
struct storage : storage_base<typename node_t_::real_type> {
using node_t = node_t_;
using real_type = typename node_t::real_type;
int* trees_ = nullptr;
node_t* nodes_ = nullptr;
int num_trees_ = 0;
__host__ __device__ storage(
categorical_sets cat_sets, real_type* vector_leaf, int* trees, node_t* nodes, int num_trees)
: storage_base<real_type>{cat_sets, vector_leaf},
trees_(trees),
nodes_(nodes),
num_trees_(num_trees)
{
}
__host__ __device__ int num_trees() const { return num_trees_; }
__host__ __device__ tree<node_t> operator[](int i) const
{
// sets_ is a dependent name (in template sense)
return tree<node_t>(this->sets_, &nodes_[trees_[i]]);
}
};
using dense_storage_f32 = storage<dense_node<float>>;
using dense_storage_f64 = storage<dense_node<double>>;
using sparse_storage16_f32 = storage<sparse_node16<float>>;
using sparse_storage16_f64 = storage<sparse_node16<double>>;
using sparse_storage8 = storage<sparse_node8>;
/// all model parameters mostly required to compute shared memory footprint,
/// also the footprint itself
struct shmem_size_params {
/// for class probabilities, this is the number of classes considered;
/// num_classes is ignored otherwise
int num_classes = 1;
// leaf_algo determines what the leaves store (predict) and how FIL
// aggregates them into class margins/predicted class/regression answer
leaf_algo_t leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY;
/// how many columns an input row has
int num_cols = 0;
/// whether to predict class probabilities or classes (or regress)
bool predict_proba = false;
/// are the input columns are prefetched into shared
/// memory before inferring the row in question
bool cols_in_shmem = true;
// are there categorical inner nodes? doesn't currently affect shared memory size,
// but participates in template dispatch and may affect it later
bool cats_present = false;
/// log2_threads_per_tree determines how many threads work on a single tree
/// at once inside a block (sharing trees means splitting input rows)
int log2_threads_per_tree = 0;
/// n_items is how many input samples (items) any thread processes. If 0 is given,
/// choose the reasonable most (<= MAX_N_ITEMS) that fit into shared memory. See init_n_items()
int n_items = 0;
// block_dim_x is the CUDA block size. Set by dispatch_on_leaf_algo(...)
int block_dim_x = 0;
/// shm_sz is the associated shared memory footprint
int shm_sz = INT_MAX;
/// sizeof_real is the size in bytes of all floating-point variables during inference
std::size_t sizeof_real = 4;
__host__ __device__ int sdata_stride()
{
return num_cols | 1; // pad to odd
}
__host__ __device__ int cols_shmem_size()
{
return cols_in_shmem ? sizeof_real * sdata_stride() * n_items << log2_threads_per_tree : 0;
}
template <int NITEMS, typename real_t, leaf_algo_t leaf_algo>
size_t get_smem_footprint();
};
// predict_params are parameters for prediction
struct predict_params : shmem_size_params {
predict_params(shmem_size_params ssp) : shmem_size_params(ssp) {}
// Model parameters.
algo_t algo;
// number of outputs for the forest per each data row
int num_outputs;
// Data parameters; preds and data are pointers to either float or double.
void* preds;
const void* data;
// number of data rows (instances) to predict on
int64_t num_rows;
// to signal infer kernel to apply softmax and also average prior to that
// for GROVE_PER_CLASS for predict_proba
output_t transform;
// number of blocks to launch
int num_blocks;
};
constexpr leaf_algo_t next_leaf_algo(leaf_algo_t algo)
{
return static_cast<leaf_algo_t>(algo + 1);
}
template <bool COLS_IN_SHMEM_ = false,
bool CATS_SUPPORTED_ = false,
leaf_algo_t LEAF_ALGO_ = MIN_LEAF_ALGO,
int N_ITEMS_ = 1>
struct KernelTemplateParams {
static const bool COLS_IN_SHMEM = COLS_IN_SHMEM_;
static const bool CATS_SUPPORTED = CATS_SUPPORTED_;
static const leaf_algo_t LEAF_ALGO = LEAF_ALGO_;
static const int N_ITEMS = N_ITEMS_;
template <bool _cats_supported>
using ReplaceCatsSupported =
KernelTemplateParams<COLS_IN_SHMEM, _cats_supported, LEAF_ALGO, N_ITEMS>;
using NextLeafAlgo =
KernelTemplateParams<COLS_IN_SHMEM, CATS_SUPPORTED, next_leaf_algo(LEAF_ALGO), N_ITEMS>;
template <leaf_algo_t NEW_LEAF_ALGO>
using ReplaceLeafAlgo =
KernelTemplateParams<COLS_IN_SHMEM, CATS_SUPPORTED, NEW_LEAF_ALGO, N_ITEMS>;
using IncNItems = KernelTemplateParams<COLS_IN_SHMEM, CATS_SUPPORTED, LEAF_ALGO, N_ITEMS + 1>;
};
// inherit from this struct to pass the functor to dispatch_on_fil_template_params()
// compiler will prevent defining a .run() method with a different output type
template <typename T>
struct dispatch_functor {
typedef T return_t;
template <class KernelParams = KernelTemplateParams<>>
T run(predict_params);
};
namespace dispatch {
template <class KernelParams, class Func, class T = typename Func::return_t>
T dispatch_on_n_items(Func func, predict_params params)
{
if (params.n_items == KernelParams::N_ITEMS) {
return func.template run<KernelParams>(params);
} else if constexpr (KernelParams::N_ITEMS < MAX_N_ITEMS) {
return dispatch_on_n_items<class KernelParams::IncNItems>(func, params);
} else {
ASSERT(false, "n_items > %d or < 1", MAX_N_ITEMS);
}
return T(); // appeasing the compiler
}
template <class KernelParams, class Func, class T = typename Func::return_t>
T dispatch_on_leaf_algo(Func func, predict_params params)
{
if (params.leaf_algo == KernelParams::LEAF_ALGO) {
if constexpr (KernelParams::LEAF_ALGO == GROVE_PER_CLASS) {
if (params.num_classes <= FIL_TPB) {
params.block_dim_x = FIL_TPB - FIL_TPB % params.num_classes;
using Next = typename KernelParams::ReplaceLeafAlgo<GROVE_PER_CLASS_FEW_CLASSES>;
return dispatch_on_n_items<Next>(func, params);
} else {
params.block_dim_x = FIL_TPB;
using Next = typename KernelParams::ReplaceLeafAlgo<GROVE_PER_CLASS_MANY_CLASSES>;
return dispatch_on_n_items<Next>(func, params);
}
} else {
params.block_dim_x = FIL_TPB;
return dispatch_on_n_items<KernelParams>(func, params);
}
} else if constexpr (next_leaf_algo(KernelParams::LEAF_ALGO) <= MAX_LEAF_ALGO) {
return dispatch_on_leaf_algo<class KernelParams::NextLeafAlgo>(func, params);
} else {
ASSERT(false, "internal error: dispatch: invalid leaf_algo %d", params.leaf_algo);
}
return T(); // appeasing the compiler
}
template <class KernelParams, class Func, class T = typename Func::return_t>
T dispatch_on_cats_supported(Func func, predict_params params)
{
return params.cats_present
? dispatch_on_leaf_algo<typename KernelParams::ReplaceCatsSupported<true>>(func, params)
: dispatch_on_leaf_algo<typename KernelParams::ReplaceCatsSupported<false>>(func,
params);
}
template <class Func, class T = typename Func::return_t>
T dispatch_on_cols_in_shmem(Func func, predict_params params)
{
return params.cols_in_shmem
? dispatch_on_cats_supported<KernelTemplateParams<true>>(func, params)
: dispatch_on_cats_supported<KernelTemplateParams<false>>(func, params);
}
} // namespace dispatch
template <class Func, class T = typename Func::return_t>
T dispatch_on_fil_template_params(Func func, predict_params params)
{
return dispatch::dispatch_on_cols_in_shmem(func, params);
}
// For an example of Func declaration, see this.
// the .run(predict_params) method will be defined in infer.cu
struct compute_smem_footprint : dispatch_functor<int> {
template <class KernelParams = KernelTemplateParams<>>
int run(predict_params);
};
template <int NITEMS,
leaf_algo_t leaf_algo,
bool cols_in_shmem,
bool CATS_SUPPORTED,
class storage_type>
__global__ void infer_k(storage_type forest, predict_params params);
// infer() calls the inference kernel with the parameters on the stream
template <typename storage_type>
void infer(storage_type forest, predict_params params, cudaStream_t stream);
} // namespace fil
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/fil/fil.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file fil.cu fil.cu implements the forest data types (dense and sparse), including their
creation and prediction (the main inference kernel is defined in infer.cu). */
#include "common.cuh" // for predict_params, storage, storage
#include "internal.cuh" // for cat_sets_device_owner, categorical_sets, output_t,
#include <cuml/fil/fil.h> // for algo_t,
#include <raft/core/error.hpp> // for ASSERT
#include <raft/core/handle.hpp> // for handle_t
#include <raft/util/cudart_utils.hpp> // for RAFT_CUDA_TRY, cudaStream_t,
#include <rmm/device_uvector.hpp> // for device_uvector
#include <thrust/host_vector.h> // for host_vector
#include <cmath> // for expf
#include <cstddef> // for size_t
#include <cstdint> // for uint8_t
namespace ML {
namespace fil {
template <typename real_t>
__host__ __device__ real_t sigmoid(real_t x)
{
return real_t(1) / (real_t(1) + exp(-x));
}
/** performs additional transformations on the array of forest predictions
(preds) of size n; the transformations are defined by output, and include
averaging (multiplying by inv_num_trees), adding global_bias (always done),
sigmoid and applying threshold. in case of complement_proba,
fills in the complement probability */
template <typename real_t>
__global__ void transform_k(real_t* preds,
size_t n,
output_t output,
real_t inv_num_trees,
real_t threshold,
real_t global_bias,
bool complement_proba)
{
size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x;
if (i >= n) return;
if (complement_proba && i % 2 != 0) return;
real_t result = preds[i];
if ((output & output_t::AVG) != 0) result *= inv_num_trees;
result += global_bias;
if ((output & output_t::SIGMOID) != 0) result = sigmoid(result);
// will not be done on CATEGORICAL_LEAF because the whole kernel will not run
if ((output & output_t::CLASS) != 0) { result = result > threshold ? real_t(1) : real_t(0); }
// sklearn outputs numpy array in 'C' order, with the number of classes being last dimension
// that is also the default order, so we should use the same one
if (complement_proba) {
preds[i] = real_t(1) - result;
preds[i + 1] = result;
} else
preds[i] = result;
}
// needed to avoid expanding the dispatch template into unresolved
// compute_smem_footprint::run() calls. In infer.cu, we don't export those symbols,
// but rather one symbol for the whole template specialization, as below.
extern template int dispatch_on_fil_template_params(compute_smem_footprint, predict_params);
// forest is the base type for all forests and contains data and methods common
// to both dense and sparse forests
template <typename real_t>
struct forest {
forest(const raft::handle_t& h) : cat_sets_(h.get_stream()), vector_leaf_(0, h.get_stream()) {}
void init_shmem_size(int device)
{
/// the most shared memory a kernel can request on the GPU in question
RAFT_CUDA_TRY(
cudaDeviceGetAttribute(&max_shm_, cudaDevAttrMaxSharedMemoryPerBlockOptin, device));
/* Our GPUs have been growing the shared memory size generation after
generation. Eventually, a CUDA GPU might come by that supports more
shared memory that would fit into unsigned 16-bit int. For such a GPU,
we would have otherwise silently overflowed the index calculation due
to short division. It would have failed cpp tests, but we might forget
about this source of bugs, if not for the failing assert. */
ASSERT(max_shm_ < int(proba_ssp_.sizeof_real) * std::numeric_limits<uint16_t>::max(),
"internal error: please use a larger type inside"
" infer_k for column count");
}
void init_n_items(int device)
{
// searching for the most items per block while respecting the shared
// memory limits creates a full linear programming problem.
// solving it in a single equation looks less tractable than this
for (bool predict_proba : {false, true}) {
shmem_size_params& ssp_ = predict_proba ? proba_ssp_ : class_ssp_;
ssp_.predict_proba = predict_proba;
shmem_size_params ssp = ssp_;
// if n_items was not provided, try from 1 to MAX_N_ITEMS. Otherwise, use as-is.
int min_n_items = ssp.n_items == 0 ? 1 : ssp.n_items;
int max_n_items =
ssp.n_items == 0 ? (algo_ == algo_t::BATCH_TREE_REORG ? MAX_N_ITEMS : 1) : ssp.n_items;
for (bool cols_in_shmem : {false, true}) {
ssp.cols_in_shmem = cols_in_shmem;
for (ssp.n_items = min_n_items; ssp.n_items <= max_n_items; ++ssp.n_items) {
ssp.shm_sz = dispatch_on_fil_template_params(compute_smem_footprint(), ssp);
if (ssp.shm_sz < max_shm_) ssp_ = ssp;
}
}
ASSERT(max_shm_ >= ssp_.shm_sz,
"FIL out of shared memory. Perhaps the maximum number of \n"
"supported classes is exceeded? 5'000 would still be safe.");
}
}
void init_fixed_block_count(int device, int blocks_per_sm)
{
int max_threads_per_sm, sm_count;
RAFT_CUDA_TRY(
cudaDeviceGetAttribute(&max_threads_per_sm, cudaDevAttrMaxThreadsPerMultiProcessor, device));
blocks_per_sm = std::min(blocks_per_sm, max_threads_per_sm / FIL_TPB);
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, device));
fixed_block_count_ = blocks_per_sm * sm_count;
}
void init_common(const raft::handle_t& h,
const categorical_sets& cat_sets,
const std::vector<real_t>& vector_leaf,
const forest_params_t* params)
{
depth_ = params->depth;
num_trees_ = params->num_trees;
algo_ = params->algo;
output_ = params->output;
threshold_ = static_cast<real_t>(params->threshold);
global_bias_ = static_cast<real_t>(params->global_bias);
proba_ssp_.n_items = params->n_items;
proba_ssp_.log2_threads_per_tree = log2(params->threads_per_tree);
proba_ssp_.leaf_algo = params->leaf_algo;
proba_ssp_.num_cols = params->num_cols;
proba_ssp_.num_classes = params->num_classes;
proba_ssp_.cats_present = cat_sets.cats_present();
proba_ssp_.sizeof_real = sizeof(real_t);
class_ssp_ = proba_ssp_;
int device = h.get_device();
cudaStream_t stream = h.get_stream();
init_shmem_size(device);
init_n_items(device); // n_items takes priority over blocks_per_sm
init_fixed_block_count(device, params->blocks_per_sm);
// vector leaf
if (!vector_leaf.empty()) {
vector_leaf_.resize(vector_leaf.size() * sizeof(real_t), stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(vector_leaf_.data(),
vector_leaf.data(),
vector_leaf.size() * sizeof(real_t),
cudaMemcpyHostToDevice,
stream));
}
// categorical features
cat_sets_ = cat_sets_device_owner(cat_sets, stream);
}
virtual void infer(predict_params params, cudaStream_t stream) = 0;
void predict(
const raft::handle_t& h, real_t* preds, const real_t* data, size_t num_rows, bool predict_proba)
{
// Initialize prediction parameters.
predict_params params(predict_proba ? proba_ssp_ : class_ssp_);
params.algo = algo_;
params.preds = preds;
params.data = data;
params.num_rows = num_rows;
// ignored unless predict_proba is true and algo is GROVE_PER_CLASS
params.transform = output_;
// fixed_block_count_ == 0 means the number of thread blocks is
// proportional to the number of rows
params.num_blocks = fixed_block_count_;
/**
The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows
(always 2 outputs):
RAW: output the sum of tree predictions
AVG is set: divide by the number of trees (averaging)
SIGMOID is set: apply sigmoid
CLASS is set: ignored
SOFTMAX is set: error
write the output of the previous stages and its complement
The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows
(always 1 output):
RAW (no values set): output the sum of tree predictions
AVG is set: divide by the number of trees (averaging)
SIGMOID is set: apply sigmoid
CLASS is set: apply threshold (equivalent to choosing best class)
SOFTMAX is set: error
The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid
CLASS is set: ignored
SOFTMAX is set: error
The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest probability, else output label
0. SOFTMAX is set: error All other flags (AVG, SIGMOID, CLASS) are ignored
The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid; if SOFTMAX is also set: error
CLASS is set: ignored
SOFTMAX is set: softmax is applied after averaging and global_bias
The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest margin,
equal margins resolved in favor of smaller label integer
All other flags (AVG, SIGMOID, CLASS, SOFTMAX) are ignored
The multi-class classification / regression (VECTOR_LEAF) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid; if SOFTMAX is also set: error
CLASS is set: ignored
SOFTMAX is set: softmax is applied after averaging and global_bias
All other flags (SIGMOID, CLASS, SOFTMAX) are ignored
The multi-class classification / regression (VECTOR_LEAF) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest margin,
equal margins resolved in favor of smaller label integer
All other flags (AVG, SIGMOID, CLASS, SOFTMAX) are ignored
*/
output_t ot = output_;
// Treelite applies bias before softmax, but we do after.
// Simulating treelite order, which cancels out bias.
// If non-proba prediction used, it still will not matter
// for the same reason softmax will not.
real_t global_bias = (ot & output_t::SOFTMAX) != 0 ? real_t(0) : global_bias_;
bool complement_proba = false, do_transform;
if (predict_proba) {
// no threshold on probabilities
ot = output_t(ot & ~output_t::CLASS);
switch (params.leaf_algo) {
case leaf_algo_t::FLOAT_UNARY_BINARY:
params.num_outputs = 2;
complement_proba = true;
do_transform = true;
break;
case leaf_algo_t::GROVE_PER_CLASS:
// for GROVE_PER_CLASS, averaging happens in infer_k
ot = output_t(ot & ~output_t::AVG);
params.num_outputs = params.num_classes;
do_transform =
(ot != output_t::RAW && ot != output_t::SOFTMAX) || global_bias != real_t(0);
break;
case leaf_algo_t::CATEGORICAL_LEAF:
params.num_outputs = params.num_classes;
do_transform = ot != output_t::RAW || global_bias_ != real_t(0);
break;
case leaf_algo_t::VECTOR_LEAF:
// for VECTOR_LEAF, averaging happens in infer_k
ot = output_t(ot & ~output_t::AVG);
params.num_outputs = params.num_classes;
do_transform =
(ot != output_t::RAW && ot != output_t::SOFTMAX) || global_bias != real_t(0);
break;
default: ASSERT(false, "internal error: predict: invalid leaf_algo %d", params.leaf_algo);
}
} else {
if (params.leaf_algo == leaf_algo_t::FLOAT_UNARY_BINARY) {
do_transform = ot != output_t::RAW || global_bias_ != real_t(0);
} else {
// GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and
// all transforms are monotonic. also, would break current code
do_transform = false;
}
params.num_outputs = 1;
}
// Predict using the forest.
cudaStream_t stream = h.get_stream();
infer(params, stream);
if (do_transform) {
size_t num_values_to_transform = (size_t)num_rows * (size_t)params.num_outputs;
transform_k<<<raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB), FIL_TPB, 0, stream>>>(
preds,
num_values_to_transform,
ot,
num_trees_ > 0 ? (real_t(1) / num_trees_) : real_t(1),
threshold_,
global_bias,
complement_proba);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
virtual void free(const raft::handle_t& h)
{
cat_sets_.release();
vector_leaf_.release();
}
virtual ~forest() {}
int num_trees_ = 0;
int depth_ = 0;
algo_t algo_ = algo_t::NAIVE;
output_t output_ = output_t::RAW;
int fixed_block_count_ = 0;
int max_shm_ = 0;
real_t threshold_ = 0.5;
real_t global_bias_ = 0;
shmem_size_params class_ssp_;
shmem_size_params proba_ssp_;
// vector_leaf_ is only used if {class,proba}_ssp_.leaf_algo is VECTOR_LEAF,
// otherwise it is empty
rmm::device_uvector<real_t> vector_leaf_;
cat_sets_device_owner cat_sets_;
};
template <typename storage_type>
struct opt_into_arch_dependent_shmem : dispatch_functor<void> {
const int max_shm;
opt_into_arch_dependent_shmem(int max_shm_) : max_shm(max_shm_) {}
template <typename KernelParams = KernelTemplateParams<>>
void run(predict_params p)
{
auto kernel = infer_k<KernelParams::N_ITEMS,
KernelParams::LEAF_ALGO,
KernelParams::COLS_IN_SHMEM,
KernelParams::CATS_SUPPORTED,
storage_type>;
// p.shm_sz might be > max_shm or < MAX_SHM_STD, but we should not check for either, because
// we don't run on both proba_ssp_ and class_ssp_ (only class_ssp_). This should be quick.
RAFT_CUDA_TRY(
cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shm));
}
};
template <typename real_t>
struct dense_forest<dense_node<real_t>> : forest<real_t> {
using node_t = dense_node<real_t>;
dense_forest(const raft::handle_t& h) : forest<real_t>(h), nodes_(0, h.get_stream()) {}
void transform_trees(const node_t* nodes)
{
/* Populate node information:
For each tree, the nodes are still stored in the breadth-first,
left-to-right order. However, instead of storing the nodes of the same
tree adjacently, it uses a different layout. In this layout, the roots
of all trees (node 0) are stored first, followed by left children of
the roots of all trees (node 1), followed by the right children of the
roots of all trees (node 2), and so on.
*/
int global_node = 0;
for (int tree = 0; tree < this->num_trees_; ++tree) {
int tree_node = 0;
// the counters `level` and `branch` are not used for computing node
// indices, they are only here to highlight the node ordering within
// each tree
for (int level = 0; level <= this->depth_; ++level) {
for (int branch = 0; branch < 1 << level; ++branch) {
h_nodes_[tree_node * this->num_trees_ + tree] = nodes[global_node];
++tree_node;
++global_node;
}
}
}
}
/// const int* trees is ignored and only provided for compatibility with
/// sparse_forest<node_t>::init()
void init(const raft::handle_t& h,
const categorical_sets& cat_sets,
const std::vector<real_t>& vector_leaf,
const int* trees,
const node_t* nodes,
const forest_params_t* params)
{
this->init_common(h, cat_sets, vector_leaf, params);
if (this->algo_ == algo_t::NAIVE) this->algo_ = algo_t::BATCH_TREE_REORG;
int num_nodes = forest_num_nodes(this->num_trees_, this->depth_);
nodes_.resize(num_nodes, h.get_stream());
h_nodes_.resize(num_nodes);
if (this->algo_ == algo_t::NAIVE) {
std::copy(nodes, nodes + num_nodes, h_nodes_.begin());
} else {
transform_trees(nodes);
}
RAFT_CUDA_TRY(cudaMemcpyAsync(nodes_.data(),
h_nodes_.data(),
num_nodes * sizeof(node_t),
cudaMemcpyHostToDevice,
h.get_stream()));
// predict_proba is a runtime parameter, and opt-in is unconditional
dispatch_on_fil_template_params(opt_into_arch_dependent_shmem<storage<node_t>>(this->max_shm_),
static_cast<predict_params>(this->class_ssp_));
// copy must be finished before freeing the host data
h.sync_stream();
h_nodes_.clear();
h_nodes_.shrink_to_fit();
}
virtual void infer(predict_params params, cudaStream_t stream) override
{
storage<node_t> forest(this->cat_sets_.accessor(),
this->vector_leaf_.data(),
nodes_.data(),
this->num_trees_,
this->algo_ == algo_t::NAIVE ? tree_num_nodes(this->depth_) : 1,
this->algo_ == algo_t::NAIVE ? 1 : this->num_trees_);
fil::infer(forest, params, stream);
}
virtual void free(const raft::handle_t& h) override
{
nodes_.release();
forest<real_t>::free(h);
}
rmm::device_uvector<node_t> nodes_;
thrust::host_vector<node_t> h_nodes_;
};
template <typename node_t>
struct sparse_forest : forest<typename node_t::real_type> {
using real_type = typename node_t::real_type;
sparse_forest(const raft::handle_t& h)
: forest<typename node_t::real_type>(h), trees_(0, h.get_stream()), nodes_(0, h.get_stream())
{
}
void init(const raft::handle_t& h,
const categorical_sets& cat_sets,
const std::vector<real_type>& vector_leaf,
const int* trees,
const node_t* nodes,
const forest_params_t* params)
{
this->init_common(h, cat_sets, vector_leaf, params);
if (this->algo_ == algo_t::ALGO_AUTO) this->algo_ = algo_t::NAIVE;
this->depth_ = 0; // a placeholder value
num_nodes_ = params->num_nodes;
// trees
trees_.resize(this->num_trees_, h.get_stream());
RAFT_CUDA_TRY(cudaMemcpyAsync(trees_.data(),
trees,
sizeof(int) * this->num_trees_,
cudaMemcpyHostToDevice,
h.get_stream()));
// nodes
nodes_.resize(num_nodes_, h.get_stream());
RAFT_CUDA_TRY(cudaMemcpyAsync(
nodes_.data(), nodes, sizeof(node_t) * num_nodes_, cudaMemcpyHostToDevice, h.get_stream()));
// predict_proba is a runtime parameter, and opt-in is unconditional
dispatch_on_fil_template_params(opt_into_arch_dependent_shmem<storage<node_t>>(this->max_shm_),
static_cast<predict_params>(this->class_ssp_));
}
virtual void infer(predict_params params, cudaStream_t stream) override
{
storage<node_t> forest(this->cat_sets_.accessor(),
this->vector_leaf_.data(),
trees_.data(),
nodes_.data(),
this->num_trees_);
fil::infer(forest, params, stream);
}
void free(const raft::handle_t& h) override
{
trees_.release();
nodes_.release();
forest<real_type>::free(h);
}
int num_nodes_ = 0;
rmm::device_uvector<int> trees_;
rmm::device_uvector<node_t> nodes_;
};
void check_params(const forest_params_t* params, bool dense)
{
if (dense) {
ASSERT(params->depth >= 0, "depth must be non-negative for dense forests");
} else {
ASSERT(params->num_nodes >= 0, "num_nodes must be non-negative for sparse forests");
ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO,
"only ALGO_AUTO and NAIVE algorithms are supported "
"for sparse forests");
}
ASSERT(params->num_trees >= 0, "num_trees must be non-negative");
ASSERT(params->num_cols >= 0, "num_cols must be non-negative");
switch (params->algo) {
case algo_t::ALGO_AUTO:
case algo_t::NAIVE:
case algo_t::TREE_REORG:
case algo_t::BATCH_TREE_REORG: break;
default: ASSERT(false, "algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG");
}
switch (params->leaf_algo) {
case leaf_algo_t::FLOAT_UNARY_BINARY:
if ((params->output & output_t::CLASS) != 0) {
ASSERT(params->num_classes == 2,
"only supporting binary"
" classification using FLOAT_UNARY_BINARY");
} else {
ASSERT(params->num_classes == 1,
"num_classes must be 1 for "
"regression");
}
ASSERT((params->output & output_t::SOFTMAX) == 0,
"softmax does not make sense for leaf_algo == FLOAT_UNARY_BINARY");
break;
case leaf_algo_t::GROVE_PER_CLASS:
ASSERT(params->threads_per_tree == 1, "multiclass not supported with threads_per_tree > 1");
ASSERT(params->num_classes > 2,
"num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS");
ASSERT(params->num_trees % params->num_classes == 0,
"num_classes must divide num_trees evenly for GROVE_PER_CLASS");
break;
case leaf_algo_t::CATEGORICAL_LEAF:
ASSERT(params->threads_per_tree == 1, "multiclass not supported with threads_per_tree > 1");
ASSERT(params->num_classes >= 2,
"num_classes >= 2 is required for "
"leaf_algo == CATEGORICAL_LEAF");
ASSERT((params->output & output_t::SOFTMAX) == 0,
"softmax not supported for leaf_algo == CATEGORICAL_LEAF");
break;
case leaf_algo_t::VECTOR_LEAF:
ASSERT(params->num_classes >= 2,
"num_classes >= 2 is required for "
"leaf_algo == VECTOR_LEAF");
break;
default:
ASSERT(false,
"leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF"
" or GROVE_PER_CLASS");
}
// output_t::RAW == 0, and doesn't have a separate flag
if ((params->output & ~output_t::ALL_SET) != 0) {
ASSERT(false, "output should be a combination of RAW, AVG, SIGMOID, CLASS and SOFTMAX");
}
ASSERT(~params->output & (output_t::SIGMOID | output_t::SOFTMAX),
"combining softmax and sigmoid is not supported");
ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative");
ASSERT(params->n_items >= 0, "n_items must be non-negative");
ASSERT(params->threads_per_tree > 0, "threads_per_tree must be positive");
ASSERT((params->threads_per_tree & (params->threads_per_tree - 1)) == 0,
"threads_per_tree must be a power of 2");
ASSERT(params->threads_per_tree <= FIL_TPB,
"threads_per_tree must not "
"exceed block size %d",
FIL_TPB);
}
/** initializes a forest of any type
* When fil_node_t == dense_node, const int* trees is ignored
*/
template <typename fil_node_t, typename real_t>
void init(const raft::handle_t& h,
forest_t<real_t>* pf,
const categorical_sets& cat_sets,
const std::vector<real_t>& vector_leaf,
const int* trees,
const fil_node_t* nodes,
const forest_params_t* params)
{
check_params(params, node_traits<fil_node_t>::IS_DENSE);
using forest_type = typename node_traits<fil_node_t>::forest;
forest_type* f = new forest_type(h);
f->init(h, cat_sets, vector_leaf, trees, nodes, params);
*pf = f;
}
// explicit instantiations for init()
template void init<dense_node<float>, float>(const raft::handle_t& h,
forest_t<float>* pf,
const categorical_sets& cat_sets,
const std::vector<float>& vector_leaf,
const int* trees,
const dense_node<float>* nodes,
const forest_params_t* params);
template void init<dense_node<double>, double>(const raft::handle_t& h,
forest_t<double>* pf,
const categorical_sets& cat_sets,
const std::vector<double>& vector_leaf,
const int* trees,
const dense_node<double>* nodes,
const forest_params_t* params);
template void init<sparse_node16<float>, float>(const raft::handle_t& h,
forest_t<float>* pf,
const categorical_sets& cat_sets,
const std::vector<float>& vector_leaf,
const int* trees,
const sparse_node16<float>* nodes,
const forest_params_t* params);
template void init<sparse_node16<double>, double>(const raft::handle_t& h,
forest_t<double>* pf,
const categorical_sets& cat_sets,
const std::vector<double>& vector_leaf,
const int* trees,
const sparse_node16<double>* nodes,
const forest_params_t* params);
template void init<sparse_node8, float>(const raft::handle_t& h,
forest_t<float>* pf,
const categorical_sets& cat_sets,
const std::vector<float>& vector_leaf,
const int* trees,
const sparse_node8* nodes,
const forest_params_t* params);
template <typename real_t>
void free(const raft::handle_t& h, forest_t<real_t> f)
{
f->free(h);
delete f;
}
template void free<float>(const raft::handle_t& h, forest_t<float> f);
template void free<double>(const raft::handle_t& h, forest_t<double> f);
template <typename real_t>
void predict(const raft::handle_t& h,
forest_t<real_t> f,
real_t* preds,
const real_t* data,
size_t num_rows,
bool predict_proba)
{
f->predict(h, preds, data, num_rows, predict_proba);
}
template void predict<float>(const raft::handle_t& h,
forest_t<float> f,
float* preds,
const float* data,
size_t num_rows,
bool predict_proba);
template void predict<double>(const raft::handle_t& h,
forest_t<double> f,
double* preds,
const double* data,
size_t num_rows,
bool predict_proba);
} // namespace fil
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/fil/infer.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
#include "internal.cuh"
#include <cuml/fil/multi_sum.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/functional.h>
#include <algorithm>
#include <cmath>
#ifndef CUDA_PRAGMA_UNROLL
#ifdef __CUDA_ARCH__
#define CUDA_PRAGMA_UNROLL _Pragma("unroll")
#else
#define CUDA_PRAGMA_UNROLL
#endif // __CUDA_ARCH__
#endif // CUDA_PRAGMA_UNROLL
#define INLINE_CONFIG __forceinline__
namespace ML {
namespace fil {
// vec wraps float[N], int[N] or double[N] for cub::BlockReduce
template <int N, typename T>
struct vec;
template <typename BinaryOp>
struct Vectorized {
BinaryOp op;
__host__ __device__ Vectorized(BinaryOp op_) : op(op_) {}
template <int NITEMS, typename T>
constexpr __host__ __device__ __forceinline__ vec<NITEMS, T> operator()(vec<NITEMS, T> a,
vec<NITEMS, T> b) const
{
vec<NITEMS, T> c;
CUDA_PRAGMA_UNROLL
for (int i = 0; i < NITEMS; i++)
c[i] = op(a[i], b[i]);
return c;
}
};
template <typename BinaryOp>
constexpr __host__ __device__ Vectorized<BinaryOp> vectorized(BinaryOp op)
{
return Vectorized<BinaryOp>(op);
}
template <int N, typename T>
struct vec {
static const int NITEMS = N;
T data[N];
explicit __host__ __device__ vec(T t)
{
CUDA_PRAGMA_UNROLL
for (int i = 0; i < N; ++i)
data[i] = t;
}
__host__ __device__ vec() : vec(T()) {}
__host__ __device__ T& operator[](int i) { return data[i]; }
__host__ __device__ T operator[](int i) const { return data[i]; }
friend __host__ __device__ vec<N, T> operator+(const vec<N, T>& a, const vec<N, T>& b)
{
return vectorized(cub::Sum())(a, b);
}
friend __host__ __device__ void operator+=(vec<N, T>& a, const vec<N, T>& b) { a = a + b; }
template <typename Vec>
friend __host__ __device__ vec<N, T> operator/(vec<N, T>& a, const Vec& b)
{
return vectorized(thrust::divides<T>())(a, vec<N, T>(b));
}
template <typename Vec>
friend __host__ __device__ void operator/=(vec<N, T>& a, const Vec& b)
{
a = a / b;
}
};
template <typename real_t>
struct best_margin_label : cub::KeyValuePair<int, real_t> {
__host__ __device__ best_margin_label(cub::KeyValuePair<int, real_t> pair)
: cub::KeyValuePair<int, real_t>(pair)
{
}
__host__ __device__ best_margin_label(int c = 0, real_t f = -INFINITY)
: cub::KeyValuePair<int, real_t>({c, f})
{
}
};
template <int NITEMS, typename real_t>
__device__ __forceinline__ vec<NITEMS, best_margin_label<real_t>> to_vec(int c,
vec<NITEMS, real_t> margin)
{
vec<NITEMS, best_margin_label<real_t>> ret;
CUDA_PRAGMA_UNROLL
for (int i = 0; i < NITEMS; ++i)
ret[i] = best_margin_label<real_t>(c, margin[i]);
return ret;
}
struct ArgMax {
template <int NITEMS, typename real_t>
__host__ __device__ __forceinline__ vec<NITEMS, best_margin_label<real_t>> operator()(
vec<NITEMS, best_margin_label<real_t>> a, vec<NITEMS, best_margin_label<real_t>> b) const
{
vec<NITEMS, best_margin_label<real_t>> c;
CUDA_PRAGMA_UNROLL
for (int i = 0; i < NITEMS; i++)
c[i] = cub::ArgMax()(a[i], b[i]);
return c;
}
};
/** tree_leaf_output returns the leaf outputs from the tree with leaf indices
given by leaves for n_rows items. FULL_ITEMS indicates whether n_rows ==
NITEMS, to allow the compiler to skip the conditional when unrolling the
loop. */
template <typename output_type, bool FULL_NITEMS, int NITEMS, typename tree_type>
__device__ __forceinline__ vec<NITEMS, output_type> tree_leaf_output(tree_type tree,
int n_rows,
int (&leaves)[NITEMS])
{
vec<NITEMS, output_type> out(0);
CUDA_PRAGMA_UNROLL
for (int j = 0; j < NITEMS; ++j) {
if (FULL_NITEMS || j < n_rows) {
/** dependent names are not considered templates by default, unless it's a
member of a current [template] instantiation. As output<>() is a
member function inherited from the base class, template
output<output_type>() is required. */
out[j] = tree[leaves[j]].template output<output_type>();
}
}
return out;
}
template <int NITEMS, bool CATS_SUPPORTED, typename output_type, typename tree_type>
__device__ __forceinline__ vec<NITEMS, output_type> infer_one_tree(
tree_type tree, const typename tree_type::real_type* input, int cols, int n_rows)
{
// find the leaf nodes for each row
int curr[NITEMS];
// the first n_rows are active
int mask = (1 << n_rows) - 1;
for (int j = 0; j < NITEMS; ++j)
curr[j] = 0;
do {
CUDA_PRAGMA_UNROLL
for (int j = 0; j < NITEMS; ++j) {
auto n = tree[curr[j]];
mask &= ~(n.is_leaf() << j);
if ((mask & (1 << j)) != 0) {
curr[j] = tree.child_index<CATS_SUPPORTED>(n, curr[j], input[j * cols + n.fid()]);
}
}
} while (mask != 0);
// get the output from the leaves
if (n_rows == NITEMS) {
return tree_leaf_output<output_type, true>(tree, n_rows, curr);
} else {
return tree_leaf_output<output_type, false>(tree, n_rows, curr);
}
}
template <typename output_type, typename tree_type>
__device__ __forceinline__ vec<1, output_type> infer_one_tree(
tree_type tree, const typename tree_type::real_type* input, int cols, int rows)
{
int curr = 0;
for (;;) {
auto n = tree[curr];
if (n.is_leaf()) break;
bool cond = tree.child_index<true>(n, curr, input[n.fid()]);
curr = n.left(curr) + cond;
}
vec<1, output_type> out;
/** dependent names are not considered templates by default,
unless it's a member of a current [template] instantiation.**/
out[0] = tree[curr].template output<output_type>();
return out;
}
/**
The shared memory requirements for finalization stage may differ based
on the set of PTX architectures the kernels were compiled for, as well as
the CUDA compute capability of the device chosen for computation.
TODO (levsnv): run a test kernel during forest init to determine the compute capability
chosen for the inference, for an accurate sizeof(BlockReduce::TempStorage),
which is used in determining max NITEMS or max input data columns.
600 is the __CUDA_ARCH__ for Pascal (6.0) GPUs, which is not defined in
host code.
6.0 is the earliest compute capability supported by FIL and RAPIDS in general.
See https://rapids.ai/start.html as well as cmake defaults.
*/
// values below are defaults as of this change.
template <int NITEMS, typename real_t>
size_t block_reduce_footprint_host()
{
return sizeof(
typename cub::
BlockReduce<vec<NITEMS, real_t>, FIL_TPB, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 1, 1, 600>::
TempStorage);
}
template <int NITEMS, typename real_t>
size_t block_reduce_best_class_footprint_host()
{
return sizeof(typename cub::BlockReduce<vec<NITEMS, best_margin_label<real_t>>,
FIL_TPB,
cub::BLOCK_REDUCE_WARP_REDUCTIONS,
1,
1,
600>::TempStorage);
}
// the device template should achieve the best performance, using up-to-date
// CUB defaults
template <typename T, typename BinaryOp>
__device__ __forceinline__ T block_reduce(T value, BinaryOp op, void* storage)
{
typedef cub::BlockReduce<T, FIL_TPB> BlockReduceT;
return BlockReduceT(*(typename BlockReduceT::TempStorage*)storage).Reduce(value, op, blockDim.x);
}
template <int NITEMS,
typename real_t,
leaf_algo_t leaf_algo> // = FLOAT_UNARY_BINARY
struct tree_aggregator_t {
vec<NITEMS, real_t> acc;
void* tmp_storage;
/** shared memory footprint of the accumulator during
the finalization of forest inference kernel, when infer_k output
value is computed.
num_classes is used for other template parameters */
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
return log2_threads_per_tree != 0 ? FIL_TPB * NITEMS * sizeof(real_t)
: block_reduce_footprint_host<NITEMS, real_t>();
}
/** shared memory footprint of the accumulator during
the accumulation of forest inference, when individual trees
are inferred and partial aggregates are accumulated.
num_classes is used for other template parameters */
static size_t smem_accumulate_footprint(int num_classes) { return 0; }
/**
num_classes is used for other template parameters */
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
real_t* vector_leaf)
: tmp_storage(finalize_workspace)
{
}
__device__ __forceinline__ void accumulate(vec<NITEMS, real_t> single_tree_prediction,
int tree,
int thread_num_rows)
{
acc += single_tree_prediction;
}
__device__ INLINE_CONFIG void finalize(real_t* block_out,
int block_num_rows,
int output_stride,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (FIL_TPB != 1 << log2_threads_per_tree) { // anything to reduce?
// ensure input columns can be overwritten (no threads traversing trees)
__syncthreads();
if (log2_threads_per_tree == 0) {
acc = block_reduce(acc, vectorized(cub::Sum()), tmp_storage);
} else {
auto per_thread = (vec<NITEMS, real_t>*)tmp_storage;
per_thread[threadIdx.x] = acc;
__syncthreads();
// We have two pertinent cases for splitting FIL_TPB == 256 values:
// 1. 2000 columns, which fit few threads/tree in shared memory,
// so ~256 groups. These are the models that will run the slowest.
// multi_sum performance is not sensitive to the radix here.
// 2. 50 columns, so ~32 threads/tree, so ~8 groups. These are the most
// popular.
acc =
multi_sum<5>(per_thread, 1 << log2_threads_per_tree, FIL_TPB >> log2_threads_per_tree);
}
}
if (threadIdx.x * NITEMS >= block_num_rows) return;
CUDA_PRAGMA_UNROLL
for (int row = 0; row < NITEMS; ++row) {
int out_preds_i = threadIdx.x * NITEMS + row;
if (out_preds_i < block_num_rows) block_out[out_preds_i * output_stride] = acc[row];
}
}
};
// tmp_storage may overlap shared memory addressed by [begin, end)
// allreduce_shmem ensures no race conditions
template <typename Iterator, typename BinaryOp>
__device__ __forceinline__ auto allreduce_shmem(Iterator begin,
Iterator end,
BinaryOp op,
void* tmp_storage)
{
typedef typename std::iterator_traits<Iterator>::value_type value_type;
value_type thread_partial;
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
thread_partial = op(thread_partial, *it);
__syncthreads(); // free shared memory [begin, end)
auto res = block_reduce(thread_partial, op, tmp_storage);
// broadcast sum to all threads
__syncthreads(); // free up tmp_storage
if (threadIdx.x == 0) *(value_type*)tmp_storage = res;
__syncthreads();
return *(value_type*)tmp_storage;
}
// *begin and *end shall be struct vec
// tmp_storage may overlap shared memory addressed by [begin, end)
template <typename Iterator, typename real_t>
__device__ __forceinline__ void write_best_class(
Iterator begin, Iterator end, void* tmp_storage, real_t* out, int num_rows)
{
// reduce per-class candidate margins to one best class candidate
// per thread (for each of the NITEMS rows)
auto best = vec<begin->NITEMS, best_margin_label<real_t>>();
for (int c = threadIdx.x; c < end - begin; c += blockDim.x)
best = vectorized(cub::ArgMax())(best, to_vec(c, begin[c]));
// [begin, end) may overlap tmp_storage
__syncthreads();
// find best class per block (for each of the NITEMS rows)
best = block_reduce(best, vectorized(cub::ArgMax()), tmp_storage);
// write it out to global memory
if (threadIdx.x > 0) return;
CUDA_PRAGMA_UNROLL
for (int row = 0; row < best.NITEMS; ++row)
if (row < num_rows) out[row] = best[row].key;
}
/// needed for softmax
struct shifted_exp {
template <typename real_t>
__device__ double operator()(real_t margin, real_t max) const
{
return exp(margin - max);
}
};
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void block_softmax(Iterator begin, Iterator end, void* tmp_storage)
{
// subtract max before exponentiating for numerical stability
using value_type = typename std::iterator_traits<Iterator>::value_type;
value_type max = allreduce_shmem(begin, end, vectorized(cub::Max()), tmp_storage);
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it = vectorized(shifted_exp())(*it, max);
// sum of exponents
value_type soe = allreduce_shmem(begin, end, vectorized(cub::Sum()), tmp_storage);
// softmax phase 2: normalization
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it /= soe;
}
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
template <typename Iterator, typename real_t>
__device__ __forceinline__ void normalize_softmax_and_write(Iterator begin,
Iterator end,
output_t transform,
int trees_per_class,
void* tmp_storage,
real_t* out,
int num_rows)
{
if ((transform & output_t::AVG) != 0) {
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it /= trees_per_class;
}
if ((transform & output_t::SOFTMAX) != 0) block_softmax(begin, end, tmp_storage);
// write result to global memory
CUDA_PRAGMA_UNROLL
for (int row = 0; row < begin->NITEMS; ++row) {
for (int c = threadIdx.x; c < end - begin; c += blockDim.x)
if (row < num_rows) out[row * (end - begin) + c] = begin[c][row];
}
}
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
// in case num_outputs > 1
template <typename Iterator, typename real_t>
__device__ __forceinline__ void class_margins_to_global_memory(Iterator begin,
Iterator end,
output_t transform,
int trees_per_class,
void* tmp_storage,
real_t* out,
int num_rows,
int num_outputs)
{
if (num_outputs == 1) { // will output class
// reduce per-class candidate margins to one best class candidate
// per thread (for each of the NITEMS rows)
write_best_class(begin, end, tmp_storage, out, num_rows);
} else { // output softmax-ed margin
normalize_softmax_and_write(begin, end, transform, trees_per_class, tmp_storage, out, num_rows);
}
}
template <int NITEMS, typename real_t>
struct tree_aggregator_t<NITEMS, real_t, GROVE_PER_CLASS_FEW_CLASSES> {
vec<NITEMS, real_t> acc;
int num_classes;
vec<NITEMS, real_t>* per_thread;
void* tmp_storage;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = (FIL_TPB - FIL_TPB % num_classes) * sizeof(vec<NITEMS, real_t>);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS, real_t>()
: block_reduce_best_class_footprint_host<NITEMS, real_t>();
return predict_proba ? phase1 + phase2 : std::max(phase1, phase2);
}
static size_t smem_accumulate_footprint(int num_classes) { return 0; }
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
real_t* vector_leaf)
: num_classes(params.num_classes),
per_thread((vec<NITEMS, real_t>*)finalize_workspace),
tmp_storage(params.predict_proba ? per_thread + num_classes : finalize_workspace)
{
}
__device__ __forceinline__ void accumulate(vec<NITEMS, real_t> single_tree_prediction,
int tree,
int thread_num_rows)
{
acc += single_tree_prediction;
}
__device__ INLINE_CONFIG void finalize(real_t* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
__syncthreads(); // free up input row in case it was in shared memory
// load margin into shared memory
per_thread[threadIdx.x] = acc;
__syncthreads();
acc = multi_sum<6>(per_thread, num_classes, blockDim.x / num_classes);
if (threadIdx.x < num_classes) per_thread[threadIdx.x] = acc;
__syncthreads(); // per_thread needs to be fully populated
class_margins_to_global_memory(per_thread,
per_thread + num_classes,
transform,
num_trees / num_classes,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS, typename real_t>
struct tree_aggregator_t<NITEMS, real_t, GROVE_PER_CLASS_MANY_CLASSES> {
vec<NITEMS, real_t> acc;
/// at first, per class margin, then, possibly, different softmax partials
vec<NITEMS, real_t>* per_class_margin;
void* tmp_storage;
int num_classes;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS, real_t>()
: block_reduce_best_class_footprint_host<NITEMS, real_t>();
return predict_proba ? phase1 + phase2 : std::max(phase1, phase2);
}
static __host__ __device__ size_t smem_accumulate_footprint(int num_classes)
{
return num_classes * sizeof(vec<NITEMS, real_t>);
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
real_t* vector_leaf)
: per_class_margin((vec<NITEMS, real_t>*)accumulate_workspace),
tmp_storage(params.predict_proba ? per_class_margin + num_classes : finalize_workspace),
num_classes(params.num_classes)
{
for (int c = threadIdx.x; c < num_classes; c += blockDim.x)
per_class_margin[c] = vec<NITEMS, real_t>(0);
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, real_t> single_tree_prediction,
int tree,
int thread_num_rows)
{
// since threads are assigned to consecutive classes, no need for atomics
if (thread_num_rows > 0) { per_class_margin[tree % num_classes] += single_tree_prediction; }
__syncthreads();
}
__device__ INLINE_CONFIG void finalize(real_t* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
class_margins_to_global_memory(per_class_margin,
per_class_margin + num_classes,
transform,
num_trees / num_classes,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS, typename real_t>
struct tree_aggregator_t<NITEMS, real_t, VECTOR_LEAF> {
// per_class_margin is a row-major matrix
// of size num_threads_per_class * num_classes
// used to accumulate class values
vec<NITEMS, real_t>* per_class_margin;
vec<NITEMS, int>* vector_leaf_indices;
int* thread_num_rows;
int num_classes;
int num_threads_per_class;
real_t* vector_leaf;
void* tmp_storage;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS, real_t>()
: block_reduce_best_class_footprint_host<NITEMS, real_t>();
return predict_proba ? phase1 + phase2 : std::max(phase1, phase2);
}
static size_t smem_accumulate_footprint(int num_classes)
{
return sizeof(vec<NITEMS, real_t>) * num_classes * max(1, FIL_TPB / num_classes) +
sizeof(vec<NITEMS, int>) * FIL_TPB + sizeof(int) * FIL_TPB;
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
real_t* vector_leaf)
: num_classes(params.num_classes),
num_threads_per_class(max(1, blockDim.x / params.num_classes)),
vector_leaf(vector_leaf),
tmp_storage(finalize_workspace)
{
// Assign workspace
char* ptr = (char*)accumulate_workspace;
per_class_margin = (vec<NITEMS, real_t>*)ptr;
ptr += sizeof(vec<NITEMS, real_t>) * num_classes * num_threads_per_class;
vector_leaf_indices = (vec<NITEMS, int>*)ptr;
ptr += sizeof(vec<NITEMS, int>) * blockDim.x;
thread_num_rows = (int*)ptr;
// Initialise shared memory
for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) {
per_class_margin[i] = vec<NITEMS, real_t>();
}
vector_leaf_indices[threadIdx.x] = vec<NITEMS, int>();
thread_num_rows[threadIdx.x] = 0;
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, int> single_tree_prediction,
int tree,
int num_rows)
{
// Perform a transpose in shared memory
// Assign each thread to a class, so they can accumulate without atomics
__syncthreads();
// Write indices to shared memory
vector_leaf_indices[threadIdx.x] = single_tree_prediction;
thread_num_rows[threadIdx.x] = num_rows;
__syncthreads();
// i here refers to each element of the matrix per_class_margin
for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) {
// if num_threads_per_class == 1, then c == i
int c = i % num_classes;
// iterate over original thread inputs with stride num_threads_per_class
// j is the original thread input
// we have num_classes threads for each j
for (int j = i / num_classes; j < blockDim.x; j += num_threads_per_class) {
for (int item = 0; item < thread_num_rows[j]; ++item) {
real_t pred = vector_leaf[vector_leaf_indices[j][item] * num_classes + c];
per_class_margin[i][item] += pred;
}
}
}
}
__device__ INLINE_CONFIG void finalize(real_t* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (num_classes < blockDim.x) {
__syncthreads();
// Efficient implementation for small number of classes
auto acc = multi_sum<6>(per_class_margin, num_classes, max(1, blockDim.x / num_classes));
if (threadIdx.x < num_classes) per_class_margin[threadIdx.x] = acc;
__syncthreads();
}
class_margins_to_global_memory(per_class_margin,
per_class_margin + num_classes,
transform,
num_trees,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS, typename real_t>
struct tree_aggregator_t<NITEMS, real_t, CATEGORICAL_LEAF> {
// could switch to uint16_t to save shared memory
// provided raft::myAtomicAdd(short*) simulated with appropriate shifts
int* votes;
int num_classes;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
// not accounting for lingering accumulate_footprint during finalize()
return 0;
}
static size_t smem_accumulate_footprint(int num_classes)
{
return sizeof(int) * num_classes * NITEMS;
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
real_t* vector_leaf)
: num_classes(params.num_classes), votes((int*)accumulate_workspace)
{
for (int c = threadIdx.x; c < num_classes; c += FIL_TPB * NITEMS)
CUDA_PRAGMA_UNROLL
for (int item = 0; item < NITEMS; ++item)
votes[c * NITEMS + item] = 0;
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, int> single_tree_prediction,
int tree,
int thread_num_rows)
{
if (thread_num_rows == 0) return;
CUDA_PRAGMA_UNROLL
for (int item = 0; item < NITEMS; ++item) {
raft::myAtomicAdd(votes + single_tree_prediction[item] * NITEMS + item, 1);
}
}
// class probabilities or regression. for regression, num_classes
// is just the number of outputs for each data instance
__device__ __forceinline__ void finalize_multiple_outputs(real_t* out, int num_rows)
{
__syncthreads();
for (int c = threadIdx.x; c < num_classes; c += blockDim.x) {
CUDA_PRAGMA_UNROLL
for (int row = 0; row < num_rows; ++row)
out[row * num_classes + c] = votes[c * NITEMS + row];
}
}
// using this when predicting a single class label, as opposed to sparse class vector
// or class probabilities or regression
__device__ __forceinline__ void finalize_class_label(real_t* out, int num_rows)
{
__syncthreads(); // make sure all votes[] are final
int item = threadIdx.x;
int row = item;
if (item < NITEMS && row < num_rows) {
int max_votes = 0;
int best_class = 0;
for (int c = 0; c < num_classes; ++c) {
if (votes[c * NITEMS + item] > max_votes) {
max_votes = votes[c * NITEMS + item];
best_class = c;
}
}
out[row] = best_class;
}
}
__device__ INLINE_CONFIG void finalize(real_t* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (num_outputs > 1) {
// only supporting num_outputs == num_classes
finalize_multiple_outputs(out, num_rows);
} else {
finalize_class_label(out, num_rows);
}
}
};
template <typename real_t>
__device__ INLINE_CONFIG void load_data(real_t* sdata,
const real_t* block_input,
predict_params params,
int rows_per_block,
int block_num_rows)
{
int num_cols = params.num_cols;
int sdata_stride = params.sdata_stride();
// cache the row for all threads to reuse
// 2021: latest SMs still do not have >256KiB of shared memory/block required to
// exceed the uint16_t
CUDA_PRAGMA_UNROLL
for (uint16_t input_idx = threadIdx.x; input_idx < block_num_rows * num_cols;
input_idx += blockDim.x) {
// for even num_cols, we need to pad sdata_stride to reduce bank conflicts
// assuming here that sdata_stride == num_cols + 1
// then, idx / num_cols * sdata_stride + idx % num_cols == idx + idx / num_cols
uint16_t sdata_idx =
sdata_stride == num_cols ? input_idx : input_idx + input_idx / (uint16_t)num_cols;
sdata[sdata_idx] = block_input[input_idx];
}
CUDA_PRAGMA_UNROLL
for (int idx = block_num_rows * sdata_stride; idx < rows_per_block * sdata_stride;
idx += blockDim.x)
sdata[idx] = 0.0f;
}
template <int NITEMS,
leaf_algo_t leaf_algo,
bool cols_in_shmem,
bool CATS_SUPPORTED,
class storage_type>
__global__ void infer_k(storage_type forest, predict_params params)
{
using real_t = typename storage_type::real_type;
extern __shared__ char smem[];
real_t* sdata = reinterpret_cast<real_t*>(smem);
int sdata_stride = params.sdata_stride();
int rows_per_block = NITEMS << params.log2_threads_per_tree;
int num_cols = params.num_cols;
int thread_row0 = NITEMS * modpow2(threadIdx.x, params.log2_threads_per_tree);
for (int64_t block_row0 = blockIdx.x * rows_per_block; block_row0 < params.num_rows;
block_row0 += rows_per_block * gridDim.x) {
int block_num_rows =
max(0, (int)min((int64_t)rows_per_block, (int64_t)params.num_rows - block_row0));
const real_t* block_input =
reinterpret_cast<const real_t*>(params.data) + block_row0 * num_cols;
if constexpr (cols_in_shmem)
load_data(sdata, block_input, params, rows_per_block, block_num_rows);
tree_aggregator_t<NITEMS, real_t, leaf_algo> acc(
params, (char*)sdata + params.cols_shmem_size(), sdata, forest.vector_leaf_);
__syncthreads(); // for both row cache init and acc init
// one block works on NITEMS * threads_per_tree rows and the whole forest
// one thread works on NITEMS rows
int thread_tree0 = threadIdx.x >> params.log2_threads_per_tree;
int tree_stride = blockDim.x >> params.log2_threads_per_tree;
int thread_num_rows = max(0, min(NITEMS, block_num_rows - thread_row0));
for (int tree = thread_tree0; tree - thread_tree0 < forest.num_trees(); tree += tree_stride) {
/* tree - thread_tree0 < forest.num_trees() is a necessary but block-uniform
condition for "tree < forest.num_trees()". It lets use __syncthreads()
and is made exact below.
Same with thread_num_rows > 0
*/
using pred_t = typename leaf_output_t<leaf_algo, real_t>::T;
vec<NITEMS, pred_t> prediction;
if (tree < forest.num_trees() && thread_num_rows != 0) {
prediction = infer_one_tree<NITEMS, CATS_SUPPORTED, pred_t>(
forest[tree],
cols_in_shmem ? sdata + thread_row0 * sdata_stride : block_input + thread_row0 * num_cols,
cols_in_shmem ? sdata_stride : num_cols,
cols_in_shmem ? NITEMS : thread_num_rows);
}
// All threads must enter accumulate
// Dummy threads can be marked as having 0 rows
acc.accumulate(prediction, tree, tree < forest.num_trees() ? thread_num_rows : 0);
}
acc.finalize(reinterpret_cast<real_t*>(params.preds) + params.num_outputs * block_row0,
block_num_rows,
params.num_outputs,
params.transform,
forest.num_trees(),
params.log2_threads_per_tree);
__syncthreads(); // free up acc's shared memory resources for next row set
}
}
template <int NITEMS, typename real_t, leaf_algo_t leaf_algo>
size_t shmem_size_params::get_smem_footprint()
{
size_t finalize_footprint = tree_aggregator_t<NITEMS, real_t, leaf_algo>::smem_finalize_footprint(
cols_shmem_size(), num_classes, log2_threads_per_tree, predict_proba);
size_t accumulate_footprint =
tree_aggregator_t<NITEMS, real_t, leaf_algo>::smem_accumulate_footprint(num_classes) +
cols_shmem_size();
return std::max(accumulate_footprint, finalize_footprint);
}
template <class KernelParams>
int compute_smem_footprint::run(predict_params ssp)
{
switch (ssp.sizeof_real) {
case 4:
return ssp
.template get_smem_footprint<KernelParams::N_ITEMS, float, KernelParams::LEAF_ALGO>();
case 8:
return ssp
.template get_smem_footprint<KernelParams::N_ITEMS, double, KernelParams::LEAF_ALGO>();
default:
ASSERT(false,
"internal error: sizeof_real == %d, but must be 4 or 8",
static_cast<int>(ssp.sizeof_real));
// unreachable
return 0;
}
}
// make sure to instantiate all possible get_smem_footprint instantiations
template int dispatch_on_fil_template_params(compute_smem_footprint, predict_params);
template <typename storage_type>
struct infer_k_storage_template : dispatch_functor<void> {
storage_type forest;
cudaStream_t stream;
infer_k_storage_template(storage_type forest_, cudaStream_t stream_)
: forest(forest_), stream(stream_)
{
}
template <class KernelParams = KernelTemplateParams<>>
void run(predict_params params)
{
params.num_blocks = params.num_blocks != 0
? params.num_blocks
: raft::ceildiv(int(params.num_rows), params.n_items);
infer_k<KernelParams::N_ITEMS,
KernelParams::LEAF_ALGO,
KernelParams::COLS_IN_SHMEM,
KernelParams::CATS_SUPPORTED>
<<<params.num_blocks, params.block_dim_x, params.shm_sz, stream>>>(forest, params);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
};
template <typename storage_type>
void infer(storage_type forest, predict_params params, cudaStream_t stream)
{
dispatch_on_fil_template_params(infer_k_storage_template<storage_type>(forest, stream), params);
}
template void infer<dense_storage_f32>(dense_storage_f32 forest,
predict_params params,
cudaStream_t stream);
template void infer<dense_storage_f64>(dense_storage_f64 forest,
predict_params params,
cudaStream_t stream);
template void infer<sparse_storage16_f32>(sparse_storage16_f32 forest,
predict_params params,
cudaStream_t stream);
template void infer<sparse_storage16_f64>(sparse_storage16_f64 forest,
predict_params params,
cudaStream_t stream);
template void infer<sparse_storage8>(sparse_storage8 forest,
predict_params params,
cudaStream_t stream);
} // namespace fil
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/fil/treelite_import.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file treelite_import.cu converts from treelite format to a FIL-centric CPU-RAM format, so that
* fil.cu can make a `forest` object out of it. */
#include "common.cuh" // for node_traits, num_trees, tree_num_nodes
#include "internal.cuh" // for MAX_FIL_INT_FLOAT, BITS_PER_BYTE, cat_feature_counters, cat_sets, cat_sets_owner, categorical_sets, leaf_algo_t
#include <cuml/common/logger.hpp> // for CUML_LOG_WARN
#include <cuml/fil/fil.h> // for algo_t, from_treelite, storage_type_repr, storage_type_t, treelite_params_t
#include <cuml/fil/fnv_hash.h> // for fowler_noll_vo_fingerprint64_32
#include <raft/core/error.hpp> // for ASSERT
#include <raft/core/handle.hpp> // for handle_t
#include <raft/util/cudart_utils.hpp> // for RAFT_CUDA_TRY
#include <treelite/base.h> // for Operator, SplitFeatureType, kGE, kGT, kLE, kLT, kNumerical
#include <treelite/c_api.h> // for ModelHandle
#include <treelite/tree.h> // for Tree, Model, ModelImpl, ModelParam
#include <omp.h> // for omp
#include <algorithm> // for std::max
#include <bitset> // for std::bitset
#include <cmath> // for NAN
#include <cstddef> // for std::size_t
#include <cstdint> // for uint8_t
#include <iosfwd> // for ios, stringstream
#include <limits> // for std::numeric_limits
#include <stack> // for std::stack
#include <string> // for std::string
#include <type_traits> // for std::is_same
namespace ML {
namespace fil {
namespace tl = treelite;
std::ostream& operator<<(std::ostream& os, const cat_sets_owner& cso)
{
os << "\nbits { ";
for (uint8_t b : cso.bits) {
os << std::bitset<BITS_PER_BYTE>(b) << " ";
}
os << " }\nmax_matching {";
for (float fid_num_cats : cso.fid_num_cats) {
os << static_cast<int>(fid_num_cats) - 1 << " ";
}
os << " }";
return os;
}
template <typename T, typename L>
int tree_root(const tl::Tree<T, L>& tree)
{
return 0; // Treelite format assumes that the root is 0
}
// a no-op placeholder for values and callables alike
struct empty {
template <typename... Args>
void operator()(Args...)
{
}
};
/** walk a Treelite tree, visiting each inner node with visit_inner and each leaf node with
visit_leaf. See walk_tree::element::state documentation for how TraversalState is retained
during traversal. Any per-tree state during traversal should be captured by the lambdas themselves.
visit_inner(int node_id, TraversalState state) should return a pair of new states, one for
each child node. visit_leaf(int, TraversalState) returns nothing.
**/
template <typename T, typename L, typename InnerFunc, typename LeafFunc = empty>
inline void walk_tree(const tl::Tree<T, L>& tree,
InnerFunc visit_inner,
LeafFunc visit_leaf = empty())
{
if constexpr (std::is_invocable<InnerFunc, int>()) {
/// wrapper for empty path state
walk_tree(
tree,
[&](int nid, empty val) {
visit_inner(nid);
return std::pair<empty, empty>();
},
[&](int nid, empty val) { visit_leaf(nid); });
} else {
using TraversalState = decltype(visit_inner(int(), {}).first);
/// needed to visit a node
struct element {
int tl_node_id;
/// Retained while visiting nodes on a single path from root to leaf.
/// This generalizes the node index that's carried over during inference tree traversal.
TraversalState state;
};
std::stack<element> stack;
stack.push(element{tree_root(tree), TraversalState()});
while (!stack.empty()) {
element i = stack.top();
stack.pop();
while (!tree.IsLeaf(i.tl_node_id)) {
auto [left_state, right_state] = visit_inner(i.tl_node_id, i.state);
stack.push(element{tree.LeftChild(i.tl_node_id), left_state});
i = element{tree.RightChild(i.tl_node_id), right_state};
}
visit_leaf(i.tl_node_id, i.state);
}
}
}
template <typename T, typename L>
inline int max_depth(const tl::Tree<T, L>& tree)
{
int tree_depth = 0;
walk_tree(
tree,
[](int node_id, int node_depth) {
// trees of this depth aren't used, so it most likely means bad input data,
// e.g. cycles in the forest
constexpr int DEPTH_LIMIT = 500;
ASSERT(node_depth < DEPTH_LIMIT, "node_depth limit reached, might be a cycle in the tree");
return std::pair(node_depth + 1, node_depth + 1);
},
[&](int node_id, int node_depth) { tree_depth = std::max(node_depth, tree_depth); });
return tree_depth;
}
template <typename T, typename L>
int max_depth(const tl::ModelImpl<T, L>& model)
{
int depth = 0;
const auto& trees = model.trees;
#pragma omp parallel for reduction(max : depth)
for (size_t i = 0; i < trees.size(); ++i) {
const auto& tree = trees[i];
depth = std::max(depth, max_depth(tree));
}
return depth;
}
void elementwise_combine(std::vector<cat_feature_counters>& dst,
const std::vector<cat_feature_counters>& extra)
{
std::transform(dst.begin(), dst.end(), extra.begin(), dst.begin(), cat_feature_counters::combine);
}
// constructs a vector of size n_cols (number of features, or columns) from a Treelite tree,
// where each feature has a maximum matching category and node count (from this tree alone).
template <typename T, typename L>
inline std::vector<cat_feature_counters> cat_counter_vec(const tl::Tree<T, L>& tree, int n_cols)
{
std::vector<cat_feature_counters> res(n_cols);
walk_tree(tree, [&](int node_id) {
if (tree.SplitType(node_id) == tl::SplitFeatureType::kCategorical) {
std::vector<std::uint32_t> mmv = tree.MatchingCategories(node_id);
int max_matching_cat;
if (mmv.size() > 0) {
// in `struct cat_feature_counters` and GPU structures, int(max_matching_cat) is safe
// because all precise int floats fit into ints, which are asserted to be 32 bits
max_matching_cat = mmv.back();
ASSERT(max_matching_cat <= MAX_FIL_INT_FLOAT,
"FIL cannot infer on "
"more than %d matching categories",
MAX_FIL_INT_FLOAT);
} else {
max_matching_cat = -1;
}
cat_feature_counters& counters = res[tree.SplitIndex(node_id)];
counters = cat_feature_counters::combine(counters, cat_feature_counters{max_matching_cat, 1});
}
});
return res;
}
// computes overall categorical bit pool size for a tree imported from the Treelite tree
template <typename T, typename L>
inline std::size_t bit_pool_size(const tl::Tree<T, L>& tree, const categorical_sets& cat_sets)
{
std::size_t size = 0;
walk_tree(tree, [&](int node_id) {
if (tree.SplitType(node_id) == tl::SplitFeatureType::kCategorical &&
tree.MatchingCategories(node_id).size() > 0) {
size += cat_sets.sizeof_mask(tree.SplitIndex(node_id));
}
});
return size;
}
template <typename T, typename L>
cat_sets_owner allocate_cat_sets_owner(const tl::ModelImpl<T, L>& model)
{
#pragma omp declare reduction( \
cat_counter_vec_red : std::vector<cat_feature_counters> : elementwise_combine( \
omp_out, omp_in)) initializer(omp_priv = omp_orig)
const auto& trees = model.trees;
cat_sets_owner cat_sets;
std::vector<cat_feature_counters> counters(model.num_feature);
#pragma omp parallel for reduction(cat_counter_vec_red : counters)
for (std::size_t i = 0; i < trees.size(); ++i) {
elementwise_combine(counters, cat_counter_vec(trees[i], model.num_feature));
}
cat_sets.consume_counters(counters);
std::vector<std::size_t> bit_pool_sizes(trees.size());
#pragma omp parallel for
for (std::size_t i = 0; i < trees.size(); ++i) {
bit_pool_sizes[i] = bit_pool_size(trees[i], cat_sets.accessor());
}
cat_sets.consume_bit_pool_sizes(bit_pool_sizes);
return cat_sets;
}
template <typename real_t>
void adjust_threshold(real_t* pthreshold, bool* swap_child_nodes, tl::Operator comparison_op)
{
// in treelite (take left node if val [op] threshold),
// the meaning of the condition is reversed compared to FIL;
// thus, "<" in treelite corresponds to comparison ">=" used by FIL
// https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243
if (isnan(*pthreshold)) {
*swap_child_nodes = !*swap_child_nodes;
return;
}
switch (comparison_op) {
case tl::Operator::kLT: break;
case tl::Operator::kLE:
// x <= y is equivalent to x < y', where y' is the next representable float
*pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<real_t>::infinity());
break;
case tl::Operator::kGT:
// x > y is equivalent to x >= y', where y' is the next representable float
// left and right still need to be swapped
*pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<real_t>::infinity());
case tl::Operator::kGE:
// swap left and right
*swap_child_nodes = !*swap_child_nodes;
break;
default: ASSERT(false, "only <, >, <= and >= comparisons are supported");
}
}
/** if the vector consists of zeros and a single one, return the position
for the one (assumed class label). Else, asserts false.
If the vector contains a NAN, asserts false */
template <typename L>
int find_class_label_from_one_hot(L* vector, int len)
{
bool found_label = false;
int out;
for (int i = 0; i < len; ++i) {
if (vector[i] == static_cast<L>(1.0)) {
ASSERT(!found_label, "label vector contains multiple 1.0f");
out = i;
found_label = true;
} else {
ASSERT(vector[i] == static_cast<L>(0.0),
"label vector contains values other than 0.0 and 1.0");
}
}
ASSERT(found_label, "did not find 1.0f in vector");
return out;
}
template <typename fil_node_t, typename T, typename L>
void tl2fil_leaf_payload(fil_node_t* fil_node,
int fil_node_id,
const tl::Tree<T, L>& tl_tree,
int tl_node_id,
const forest_params_t& forest_params,
std::vector<typename fil_node_t::real_type>* vector_leaf,
size_t* leaf_counter)
{
auto vec = tl_tree.LeafVector(tl_node_id);
switch (forest_params.leaf_algo) {
case leaf_algo_t::CATEGORICAL_LEAF:
ASSERT(vec.size() == static_cast<std::size_t>(forest_params.num_classes),
"inconsistent number of classes in treelite leaves");
fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size());
break;
case leaf_algo_t::VECTOR_LEAF: {
ASSERT(vec.size() == static_cast<std::size_t>(forest_params.num_classes),
"inconsistent number of classes in treelite leaves");
fil_node->val.idx = *leaf_counter;
for (int k = 0; k < forest_params.num_classes; k++) {
(*vector_leaf)[*leaf_counter * forest_params.num_classes + k] = vec[k];
}
(*leaf_counter)++;
break;
}
case leaf_algo_t::FLOAT_UNARY_BINARY:
case leaf_algo_t::GROVE_PER_CLASS:
fil_node->val.f = static_cast<typename fil_node_t::real_type>(tl_tree.LeafValue(tl_node_id));
ASSERT(!tl_tree.HasLeafVector(tl_node_id),
"some but not all treelite leaves have leaf_vector()");
break;
default: ASSERT(false, "internal error: invalid leaf_algo");
};
}
template <typename fil_node_t>
struct conversion_state {
fil_node_t node;
bool swap_child_nodes;
};
// modifies cat_sets
template <typename fil_node_t, typename T, typename L>
conversion_state<fil_node_t> tl2fil_inner_node(int fil_left_child,
const tl::Tree<T, L>& tree,
int tl_node_id,
cat_sets_owner* cat_sets,
std::size_t* bit_pool_offset)
{
using real_t = typename fil_node_t::real_type;
int tl_left = tree.LeftChild(tl_node_id), tl_right = tree.RightChild(tl_node_id);
val_t<real_t> split = {.f = std::numeric_limits<real_t>::quiet_NaN()};
int feature_id = tree.SplitIndex(tl_node_id);
bool is_categorical = tree.SplitType(tl_node_id) == tl::SplitFeatureType::kCategorical &&
tree.MatchingCategories(tl_node_id).size() > 0;
bool swap_child_nodes = false;
if (tree.SplitType(tl_node_id) == tl::SplitFeatureType::kNumerical) {
split.f = static_cast<real_t>(tree.Threshold(tl_node_id));
adjust_threshold(&split.f, &swap_child_nodes, tree.ComparisonOp(tl_node_id));
} else if (tree.SplitType(tl_node_id) == tl::SplitFeatureType::kCategorical) {
// for FIL, the list of categories is always for the right child
swap_child_nodes = !tree.CategoriesListRightChild(tl_node_id);
if (tree.MatchingCategories(tl_node_id).size() > 0) {
int sizeof_mask = cat_sets->accessor().sizeof_mask(feature_id);
split.idx = *bit_pool_offset;
*bit_pool_offset += sizeof_mask;
// cat_sets->bits have been zero-initialized
uint8_t* bits = &cat_sets->bits[split.idx];
for (std::uint32_t category : tree.MatchingCategories(tl_node_id)) {
bits[category / BITS_PER_BYTE] |= 1 << (category % BITS_PER_BYTE);
}
} else {
// always branch left in FIL. Already accounted for Treelite branching direction above.
split.f = std::numeric_limits<real_t>::quiet_NaN();
}
} else {
ASSERT(false, "only numerical and categorical split nodes are supported");
}
bool default_left = tree.DefaultLeft(tl_node_id) ^ swap_child_nodes;
fil_node_t node(
val_t<real_t>{}, split, feature_id, default_left, false, is_categorical, fil_left_child);
return conversion_state<fil_node_t>{node, swap_child_nodes};
}
template <typename fil_node_t, typename T, typename L>
int tree2fil(std::vector<fil_node_t>& nodes,
int root,
const tl::Tree<T, L>& tree,
std::size_t tree_idx,
const forest_params_t& forest_params,
std::vector<typename fil_node_t::real_type>* vector_leaf,
std::size_t* leaf_counter,
cat_sets_owner* cat_sets)
{
// needed if the node is sparse, to place within memory for the FIL tree
int sparse_index = 1;
walk_tree(
tree,
[&](int node_id, int fil_node_id) {
// reserve space for child nodes
// left is the offset of the left child node relative to the tree root
// in the array of all nodes of the FIL sparse forest
int left = node_traits<fil_node_t>::IS_DENSE ? 2 * fil_node_id + 1 : sparse_index;
sparse_index += 2;
conversion_state<fil_node_t> cs = tl2fil_inner_node<fil_node_t>(
left, tree, node_id, cat_sets, &cat_sets->bit_pool_offsets[tree_idx]);
nodes[root + fil_node_id] = cs.node;
return cs.swap_child_nodes ? std::pair(left + 1, left) : std::pair(left, left + 1);
},
[&](int node_id, int fil_node_id) {
nodes[root + fil_node_id] = fil_node_t({}, {}, 0, false, true, false, 0);
tl2fil_leaf_payload(&nodes[root + fil_node_id],
root + fil_node_id,
tree,
node_id,
forest_params,
vector_leaf,
leaf_counter);
});
return root;
}
struct level_entry {
int n_branch_nodes, n_leaves;
};
// hist has branch and leaf count given depth
template <typename T, typename L>
inline void node_depth_hist(const tl::Tree<T, L>& tree, std::vector<level_entry>& hist)
{
walk_tree(
tree,
[&](int node_id, std::size_t depth) {
if (depth >= hist.size()) hist.resize(depth + 1, {0, 0});
hist[depth].n_branch_nodes++;
return std::pair(depth + 1, depth + 1);
},
[&](int node_id, std::size_t depth) {
if (depth >= hist.size()) hist.resize(depth + 1, {0, 0});
hist[depth].n_leaves++;
});
}
template <typename T, typename L>
std::stringstream depth_hist_and_max(const tl::ModelImpl<T, L>& model)
{
using namespace std;
vector<level_entry> hist;
for (const auto& tree : model.trees)
node_depth_hist(tree, hist);
int min_leaf_depth = -1, leaves_times_depth = 0, total_branches = 0, total_leaves = 0;
stringstream forest_shape;
ios default_state(nullptr);
default_state.copyfmt(forest_shape);
forest_shape << "Depth histogram:" << endl << "depth branches leaves nodes" << endl;
for (std::size_t level = 0; level < hist.size(); ++level) {
level_entry e = hist[level];
forest_shape << setw(5) << level << setw(9) << e.n_branch_nodes << setw(7) << e.n_leaves
<< setw(8) << e.n_branch_nodes + e.n_leaves << endl;
forest_shape.copyfmt(default_state);
if (e.n_leaves && min_leaf_depth == -1) min_leaf_depth = level;
leaves_times_depth += e.n_leaves * level;
total_branches += e.n_branch_nodes;
total_leaves += e.n_leaves;
}
int total_nodes = total_branches + total_leaves;
forest_shape << "Total: branches: " << total_branches << " leaves: " << total_leaves
<< " nodes: " << total_nodes << endl;
forest_shape << "Avg nodes per tree: " << setprecision(2)
<< total_nodes / static_cast<double>(hist[0].n_branch_nodes) << endl;
forest_shape.copyfmt(default_state);
forest_shape << "Leaf depth: min: " << min_leaf_depth << " avg: " << setprecision(2) << fixed
<< leaves_times_depth / static_cast<double>(total_leaves)
<< " max: " << hist.size() - 1 << endl;
forest_shape.copyfmt(default_state);
vector<char> hist_bytes(hist.size() * sizeof(hist[0]));
memcpy(&hist_bytes[0], &hist[0], hist_bytes.size());
// std::hash does not promise to not be identity. Xoring plain numbers which
// add up to one another erases information, hence, std::hash is unsuitable here
forest_shape << "Depth histogram fingerprint: " << hex
<< fowler_noll_vo_fingerprint64_32(hist_bytes.begin(), hist_bytes.end()) << endl;
forest_shape.copyfmt(default_state);
return forest_shape;
}
template <typename T, typename L>
size_t tl_leaf_vector_size(const tl::ModelImpl<T, L>& model)
{
const tl::Tree<T, L>& tree = model.trees[0];
int node_key;
for (node_key = tree_root(tree); !tree.IsLeaf(node_key); node_key = tree.RightChild(node_key))
;
if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size();
return 0;
}
// tl2fil_common is the part of conversion from a treelite model
// common for dense and sparse forests
template <typename T, typename L>
void tl2fil_common(forest_params_t* params,
const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params)
{
// fill in forest-independent params
params->algo = tl_params->algo;
params->threshold = tl_params->threshold;
// fill in forest-dependent params
params->depth = max_depth(model); // also checks for cycles
const tl::ModelParam& param = model.param;
// assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value()
size_t leaf_vec_size = tl_leaf_vector_size(model);
std::string pred_transform(param.pred_transform);
if (leaf_vec_size > 0) {
ASSERT(leaf_vec_size == model.task_param.num_class, "treelite model inconsistent");
params->num_classes = leaf_vec_size;
params->leaf_algo = leaf_algo_t::VECTOR_LEAF;
ASSERT(pred_transform == "max_index" || pred_transform == "identity_multiclass",
"only max_index and identity_multiclass values of pred_transform "
"are supported for multi-class models");
} else {
if (model.task_param.num_class > 1) {
params->num_classes = static_cast<int>(model.task_param.num_class);
ASSERT(tl_params->output_class, "output_class==true is required for multi-class models");
ASSERT(pred_transform == "identity_multiclass" || pred_transform == "max_index" ||
pred_transform == "softmax" || pred_transform == "multiclass_ova",
"only identity_multiclass, max_index, multiclass_ova and softmax "
"values of pred_transform are supported for xgboost-style "
"multi-class classification models.");
// this function should not know how many threads per block will be used
params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS;
} else {
params->num_classes = tl_params->output_class ? 2 : 1;
ASSERT(pred_transform == "sigmoid" || pred_transform == "identity",
"only sigmoid and identity values of pred_transform "
"are supported for binary classification and regression models.");
params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY;
}
}
params->num_cols = model.num_feature;
ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported");
params->global_bias = param.global_bias;
params->output = output_t::RAW;
/** output_t::CLASS denotes using a threshold in FIL, when
predict_proba == false. For all multiclass models, the best class is
selected using argmax instead. This happens when either
leaf_algo == CATEGORICAL_LEAF or num_classes > 2.
**/
if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF &&
params->num_classes <= 2) {
params->output = output_t(params->output | output_t::CLASS);
}
// "random forest" in treelite means tree output averaging
if (model.average_tree_output) { params->output = output_t(params->output | output_t::AVG); }
if (pred_transform == "sigmoid" || pred_transform == "multiclass_ova") {
params->output = output_t(params->output | output_t::SIGMOID);
}
if (pred_transform == "softmax") params->output = output_t(params->output | output_t::SOFTMAX);
params->num_trees = model.trees.size();
params->blocks_per_sm = tl_params->blocks_per_sm;
params->threads_per_tree = tl_params->threads_per_tree;
params->n_items = tl_params->n_items;
}
template <typename node_t>
template <typename threshold_t, typename leaf_t>
void node_traits<node_t>::check(const treelite::ModelImpl<threshold_t, leaf_t>& model)
{
if constexpr (!std::is_same<node_t, sparse_node8>()) return;
const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS;
const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1;
// check the number of features
int num_features = model.num_feature;
ASSERT(num_features <= MAX_FEATURES,
"model has %d features, "
"but only %d supported for 8-byte sparse nodes",
num_features,
MAX_FEATURES);
// check the number of tree nodes
const std::vector<tl::Tree<threshold_t, leaf_t>>& trees = model.trees;
for (std::size_t i = 0; i < trees.size(); ++i) {
int num_nodes = trees[i].num_nodes;
ASSERT(num_nodes <= MAX_TREE_NODES,
"tree %zu has %d nodes, "
"but only %d supported for 8-byte sparse nodes",
i,
num_nodes,
MAX_TREE_NODES);
}
}
template <typename fil_node_t, typename threshold_t, typename leaf_t>
struct tl2fil_t {
using real_t = typename fil_node_t::real_type;
std::vector<int> roots_;
std::vector<fil_node_t> nodes_;
std::vector<real_t> vector_leaf_;
forest_params_t params_;
cat_sets_owner cat_sets_;
const tl::ModelImpl<threshold_t, leaf_t>& model_;
const treelite_params_t& tl_params_;
tl2fil_t(const tl::ModelImpl<threshold_t, leaf_t>& model_, const treelite_params_t& tl_params_)
: model_(model_), tl_params_(tl_params_)
{
}
void init()
{
static const bool IS_DENSE = node_traits<fil_node_t>::IS_DENSE;
tl2fil_common(¶ms_, model_, &tl_params_);
node_traits<fil_node_t>::check(model_);
std::size_t num_trees = model_.trees.size();
std::size_t total_nodes = 0;
roots_.reserve(num_trees);
for (auto& tree : model_.trees) {
roots_.push_back(total_nodes);
total_nodes += IS_DENSE ? tree_num_nodes(params_.depth) : tree.num_nodes;
}
if (params_.leaf_algo == VECTOR_LEAF) {
std::size_t max_leaves = IS_DENSE ? num_trees * (tree_num_nodes(params_.depth) + 1) / 2
: (total_nodes + num_trees) / 2;
vector_leaf_.resize(max_leaves * params_.num_classes);
}
cat_sets_ = allocate_cat_sets_owner(model_);
nodes_.resize(total_nodes);
// convert the nodes_
#pragma omp parallel for
for (std::size_t tree_idx = 0; tree_idx < num_trees; ++tree_idx) {
// Max number of leaves processed so far
size_t leaf_counter = (roots_[tree_idx] + tree_idx) / 2;
tree2fil(nodes_,
roots_[tree_idx],
model_.trees[tree_idx],
tree_idx,
params_,
&vector_leaf_,
&leaf_counter,
&cat_sets_);
}
params_.num_nodes = nodes_.size();
}
/// initializes FIL forest object, to be ready to infer
void init_forest(const raft::handle_t& handle, forest_t<real_t>* pforest)
{
ML::fil::init(
handle, pforest, cat_sets_.accessor(), vector_leaf_, roots_.data(), nodes_.data(), ¶ms_);
// sync is necessary as nodes_ are used in init(),
// but destructed at the end of this function
handle.sync_stream(handle.get_stream());
if (tl_params_.pforest_shape_str) {
*tl_params_.pforest_shape_str = sprintf_shape(model_, nodes_, roots_, cat_sets_);
}
}
};
template <typename fil_node_t, typename threshold_t, typename leaf_t>
void convert(const raft::handle_t& handle,
forest_t<typename fil_node_t::real_type>* pforest,
const tl::ModelImpl<threshold_t, leaf_t>& model,
const treelite_params_t& tl_params)
{
tl2fil_t<fil_node_t, threshold_t, leaf_t> tl2fil(model, tl_params);
tl2fil.init();
tl2fil.init_forest(handle, pforest);
}
template <typename real_t>
constexpr bool type_supported()
{
// not using std::is_floating_point because we did not instantiate fp16-based nodes/trees/forests
return std::is_same_v<real_t, float> || std::is_same_v<real_t, double>;
}
template <typename threshold_t, typename leaf_t>
void from_treelite(const raft::handle_t& handle,
forest_variant* pforest_variant,
const tl::ModelImpl<threshold_t, leaf_t>& model,
const treelite_params_t* tl_params)
{
precision_t precision = tl_params->precision;
// choose the precision based on model if required
if (precision == PRECISION_NATIVE) {
precision = std::is_same_v<decltype(threshold_t(0) + leaf_t(0)), float> ? PRECISION_FLOAT32
: PRECISION_FLOAT64;
}
switch (precision) {
case PRECISION_FLOAT32: {
*pforest_variant = (forest_t<float>)nullptr;
forest_t<float>* pforest = &std::get<forest_t<float>>(*pforest_variant);
from_treelite(handle, pforest, model, tl_params);
break;
}
case PRECISION_FLOAT64: {
*pforest_variant = (forest_t<double>)nullptr;
forest_t<double>* pforest = &std::get<forest_t<double>>(*pforest_variant);
from_treelite(handle, pforest, model, tl_params);
break;
}
default:
ASSERT(false,
"bad value of tl_params->precision, must be one of "
"PRECISION_{NATIVE,FLOAT32,FLOAT64}");
}
}
template <typename threshold_t, typename leaf_t, typename real_t>
void from_treelite(const raft::handle_t& handle,
forest_t<real_t>* pforest,
const tl::ModelImpl<threshold_t, leaf_t>& model,
const treelite_params_t* tl_params)
{
// Invariants on threshold and leaf types
static_assert(type_supported<threshold_t>(),
"Model must contain float32 or float64 thresholds for splits");
ASSERT(type_supported<leaf_t>(), "Models with integer leaf output are not yet supported");
storage_type_t storage_type = tl_params->storage_type;
// build dense trees by default
if (storage_type == storage_type_t::AUTO) {
if (tl_params->algo == algo_t::ALGO_AUTO || tl_params->algo == algo_t::NAIVE) {
int depth = max_depth(model);
// max 2**25 dense nodes, 256 MiB dense model size. Categorical mask size is unlimited and not
// affected by storage format.
const int LOG2_MAX_DENSE_NODES = 25;
int log2_num_dense_nodes = depth + 1 + int(ceil(std::log2(model.trees.size())));
storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES ? storage_type_t::SPARSE
: storage_type_t::DENSE;
} else {
// only dense storage is supported for other algorithms
storage_type = storage_type_t::DENSE;
}
}
switch (storage_type) {
case storage_type_t::DENSE:
convert<dense_node<real_t>>(handle, pforest, model, *tl_params);
break;
case storage_type_t::SPARSE:
convert<sparse_node16<real_t>>(handle, pforest, model, *tl_params);
break;
case storage_type_t::SPARSE8:
// SPARSE8 is only supported for float32
if constexpr (std::is_same_v<real_t, float>) {
convert<sparse_node8>(handle, pforest, model, *tl_params);
} else {
ASSERT(false, "SPARSE8 is only supported for float32 treelite models");
}
break;
default: ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE");
}
}
void from_treelite(const raft::handle_t& handle,
forest_variant* pforest,
ModelHandle model,
const treelite_params_t* tl_params)
{
const tl::Model& model_ref = *(tl::Model*)model;
model_ref.Dispatch([&](const auto& model_inner) {
// model_inner is of the concrete type tl::ModelImpl<threshold_t, leaf_t>
from_treelite(handle, pforest, model_inner, tl_params);
});
}
// allocates caller-owned char* using malloc()
template <typename threshold_t, typename leaf_t, typename node_t>
char* sprintf_shape(const tl::ModelImpl<threshold_t, leaf_t>& model,
const std::vector<node_t>& nodes,
const std::vector<int>& trees,
const cat_sets_owner cat_sets)
{
std::stringstream forest_shape = depth_hist_and_max(model);
double size_mb = (trees.size() * sizeof(trees.front()) + nodes.size() * sizeof(nodes.front()) +
cat_sets.bits.size()) /
1e6;
forest_shape << storage_type_repr[node_traits<node_t>::storage_type_enum] << " model size "
<< std::setprecision(2) << size_mb << " MB" << std::endl;
if (cat_sets.bits.size() > 0) {
forest_shape << "number of categorical nodes for each feature id: {";
std::size_t total_cat_nodes = 0;
for (std::size_t n : cat_sets.n_nodes) {
forest_shape << n << " ";
total_cat_nodes += n;
}
forest_shape << "}" << std::endl << "total categorical nodes: " << total_cat_nodes << std::endl;
forest_shape << "maximum matching category for each feature id: {";
for (float fid_num_cats : cat_sets.fid_num_cats)
forest_shape << static_cast<int>(fid_num_cats) - 1 << " ";
forest_shape << "}" << std::endl;
}
// stream may be discontiguous
std::string forest_shape_str = forest_shape.str();
// now copy to a non-owning allocation
char* shape_out = (char*)malloc(forest_shape_str.size() + 1); // incl. \0
memcpy((void*)shape_out, forest_shape_str.c_str(), forest_shape_str.size() + 1);
return shape_out;
}
} // namespace fil
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_canberra.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_canberra(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_canberra(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_hellinger.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_hellinger(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_hellinger(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/entropy.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/entropy.cuh>
namespace ML {
namespace Metrics {
double entropy(const raft::handle_t& handle,
const int* y,
const int n,
const int lower_class_range,
const int upper_class_range)
{
return raft::stats::entropy(y, n, lower_class_range, upper_class_range, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/trustworthiness.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/stats/trustworthiness_score.cuh>
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
/**
* @brief Compute the trustworthiness score
*
* @param h Raft handle
* @param X Data in original dimension
* @param X_embedded Data in target dimension (embedding)
* @param n Number of samples
* @param m Number of features in high/original dimension
* @param d Number of features in low/embedded dimension
* @param n_neighbors Number of neighbors considered by trustworthiness score
* @param batchSize Batch size
* @tparam distance_type: Distance type to consider
* @return Trustworthiness score
*/
template <typename math_t, raft::distance::DistanceType distance_type>
double trustworthiness_score(const raft::handle_t& h,
const math_t* X,
math_t* X_embedded,
int n,
int m,
int d,
int n_neighbors,
int batchSize)
{
return raft::stats::trustworthiness_score<math_t, distance_type>(
h, X, X_embedded, n, m, d, n_neighbors, batchSize);
}
template double trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>(
const raft::handle_t& h,
const float* X,
float* X_embedded,
int n,
int m,
int d,
int n_neighbors,
int batchSize);
}; // end namespace Metrics
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_l1.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_l1(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_l1(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_minkowski.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_minkowski.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_minkowski(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::LpUnexpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
}
void pairwise_distance_minkowski(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::LpUnexpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/adjusted_rand_index.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/adjusted_rand_index.cuh>
namespace ML {
namespace Metrics {
double adjusted_rand_index(const raft::handle_t& handle,
const int64_t* y,
const int64_t* y_hat,
const int64_t n)
{
return raft::stats::adjusted_rand_index<int64_t, unsigned long long>(
y, y_hat, n, handle.get_stream());
}
double adjusted_rand_index(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n)
{
return raft::stats::adjusted_rand_index<int, unsigned long long>(
y, y_hat, n, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_euclidean.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_euclidean(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
double metric_arg);
void pairwise_distance_euclidean(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_cosine.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_cosine(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_cosine(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_chebyshev.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_chebyshev(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_chebyshev(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance.cu |
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_canberra.cuh"
#include "pairwise_distance_chebyshev.cuh"
#include "pairwise_distance_correlation.cuh"
#include "pairwise_distance_cosine.cuh"
#include "pairwise_distance_euclidean.cuh"
#include "pairwise_distance_hamming.cuh"
#include "pairwise_distance_hellinger.cuh"
#include "pairwise_distance_jensen_shannon.cuh"
#include "pairwise_distance_kl_divergence.cuh"
#include "pairwise_distance_l1.cuh"
#include "pairwise_distance_minkowski.cuh"
#include "pairwise_distance_russell_rao.cuh"
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <raft/sparse/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
double metric_arg)
{
switch (metric) {
case raft::distance::DistanceType::L2Expanded:
case raft::distance::DistanceType::L2SqrtExpanded:
case raft::distance::DistanceType::L2Unexpanded:
case raft::distance::DistanceType::L2SqrtUnexpanded:
pairwise_distance_euclidean(handle, x, y, dist, m, n, k, metric, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::CosineExpanded:
pairwise_distance_cosine(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::L1:
pairwise_distance_l1(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::Linf:
pairwise_distance_chebyshev(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::HellingerExpanded:
pairwise_distance_hellinger(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::LpUnexpanded:
pairwise_distance_minkowski(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::Canberra:
pairwise_distance_canberra(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::CorrelationExpanded:
pairwise_distance_correlation(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::HammingUnexpanded:
pairwise_distance_hamming(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::JensenShannon:
pairwise_distance_jensen_shannon(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::KLDivergence:
pairwise_distance_kl_divergence(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::RusselRaoExpanded:
pairwise_distance_russell_rao(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
default: THROW("Unknown or unsupported distance metric '%d'!", (int)metric);
};
}
void pairwise_distance(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg)
{
switch (metric) {
case raft::distance::DistanceType::L2Expanded:
case raft::distance::DistanceType::L2SqrtExpanded:
case raft::distance::DistanceType::L2Unexpanded:
case raft::distance::DistanceType::L2SqrtUnexpanded:
pairwise_distance_euclidean(handle, x, y, dist, m, n, k, metric, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::CosineExpanded:
pairwise_distance_cosine(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::L1:
pairwise_distance_l1(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::Linf:
pairwise_distance_chebyshev(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::HellingerExpanded:
pairwise_distance_hellinger(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::LpUnexpanded:
pairwise_distance_minkowski(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::Canberra:
pairwise_distance_canberra(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::CorrelationExpanded:
pairwise_distance_correlation(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::HammingUnexpanded:
pairwise_distance_hamming(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::JensenShannon:
pairwise_distance_jensen_shannon(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::KLDivergence:
pairwise_distance_kl_divergence(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
case raft::distance::DistanceType::RusselRaoExpanded:
pairwise_distance_russell_rao(handle, x, y, dist, m, n, k, isRowMajor, metric_arg);
break;
default: THROW("Unknown or unsupported distance metric '%d'!", (int)metric);
};
}
template <typename value_idx = int, typename value_t = float>
void pairwiseDistance_sparse(const raft::handle_t& handle,
value_t* x,
value_t* y,
value_t* dist,
value_idx x_nrows,
value_idx y_nrows,
value_idx n_cols,
value_idx x_nnz,
value_idx y_nnz,
value_idx* x_indptr,
value_idx* y_indptr,
value_idx* x_indices,
value_idx* y_indices,
raft::distance::DistanceType metric,
float metric_arg)
{
auto out = raft::make_device_matrix_view<value_t, value_idx>(dist, y_nrows, x_nrows);
auto x_structure = raft::make_device_compressed_structure_view<value_idx, value_idx, value_idx>(
x_indptr, x_indices, x_nrows, n_cols, x_nnz);
auto x_csr_view = raft::make_device_csr_matrix_view<const value_t>(x, x_structure);
auto y_structure = raft::make_device_compressed_structure_view<value_idx, value_idx, value_idx>(
y_indptr, y_indices, y_nrows, n_cols, y_nnz);
auto y_csr_view = raft::make_device_csr_matrix_view<const value_t>(y, y_structure);
raft::sparse::distance::pairwise_distance(
handle, y_csr_view, x_csr_view, out, metric, metric_arg);
}
void pairwiseDistance_sparse(const raft::handle_t& handle,
float* x,
float* y,
float* dist,
int x_nrows,
int y_nrows,
int n_cols,
int x_nnz,
int y_nnz,
int* x_indptr,
int* y_indptr,
int* x_indices,
int* y_indices,
raft::distance::DistanceType metric,
float metric_arg)
{
pairwiseDistance_sparse<int, float>(handle,
x,
y,
dist,
x_nrows,
y_nrows,
n_cols,
x_nnz,
y_nnz,
x_indptr,
y_indptr,
x_indices,
y_indices,
metric,
metric_arg);
}
void pairwiseDistance_sparse(const raft::handle_t& handle,
double* x,
double* y,
double* dist,
int x_nrows,
int y_nrows,
int n_cols,
int x_nnz,
int y_nnz,
int* x_indptr,
int* y_indptr,
int* x_indices,
int* y_indices,
raft::distance::DistanceType metric,
float metric_arg)
{
pairwiseDistance_sparse<int, double>(handle,
x,
y,
dist,
x_nrows,
y_nrows,
n_cols,
x_nnz,
y_nnz,
x_indptr,
y_indptr,
x_indices,
y_indices,
metric,
metric_arg);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_hamming.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_hamming.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_hamming(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
raft::distance::
distance<raft::distance::DistanceType::HammingUnexpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_hamming(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
// Call the distance function
raft::distance::
distance<raft::distance::DistanceType::HammingUnexpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/v_measure.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/v_measure.cuh>
namespace ML {
namespace Metrics {
double v_measure(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range,
double beta)
{
return raft::stats::v_measure(
y, y_hat, n, lower_class_range, upper_class_range, handle.get_stream(), beta);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_hellinger.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_hellinger.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_hellinger(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
raft::distance::
distance<raft::distance::DistanceType::HellingerExpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_hellinger(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
raft::distance::
distance<raft::distance::DistanceType::HellingerExpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_russell_rao.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_russell_rao(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_russell_rao(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/kl_divergence.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/kl_divergence.cuh>
namespace ML {
namespace Metrics {
double kl_divergence(const raft::handle_t& handle, const double* y, const double* y_hat, int n)
{
return raft::stats::kl_divergence(y, y_hat, n, handle.get_stream());
}
float kl_divergence(const raft::handle_t& handle, const float* y, const float* y_hat, int n)
{
return raft::stats::kl_divergence(y, y_hat, n, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_canberra.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_canberra.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_canberra(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
raft::distance::distance<raft::distance::DistanceType::Canberra, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_canberra(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
// Call the distance function
raft::distance::distance<raft::distance::DistanceType::Canberra, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_jensen_shannon.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_jensen_shannon.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_jensen_shannon(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
raft::distance::
distance<raft::distance::DistanceType::JensenShannon, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_jensen_shannon(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::JensenShannon, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/silhouette_score_batched_double.cu |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/stats/silhouette_score.cuh>
namespace ML {
namespace Metrics {
namespace Batched {
double silhouette_score(const raft::handle_t& handle,
double* X,
int n_rows,
int n_cols,
int* y,
int n_labels,
double* scores,
int chunk,
raft::distance::DistanceType metric)
{
return raft::stats::silhouette_score_batched<double, int, int>(
handle, X, n_rows, n_cols, y, n_labels, scores, chunk, metric);
}
} // namespace Batched
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/completeness_score.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/homogeneity_score.cuh>
namespace ML {
namespace Metrics {
double completeness_score(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range)
{
return raft::stats::homogeneity_score(
y_hat, y, n, lower_class_range, upper_class_range, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_hamming.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_hamming(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_hamming(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_kl_divergence.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_kl_divergence(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_kl_divergence(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_minkowski.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_minkowski(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_minkowski(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_chebyshev.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_chebyshev.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_chebyshev(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
raft::distance::distance<raft::distance::DistanceType::Linf, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_chebyshev(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
// Call the distance function
raft::distance::distance<raft::distance::DistanceType::Linf, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/silhouette_score_batched_float.cu |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/stats/silhouette_score.cuh>
namespace ML {
namespace Metrics {
namespace Batched {
float silhouette_score(const raft::handle_t& handle,
float* X,
int n_rows,
int n_cols,
int* y,
int n_labels,
float* scores,
int chunk,
raft::distance::DistanceType metric)
{
return raft::stats::silhouette_score_batched<float, int, int>(
handle, X, n_rows, n_cols, y, n_labels, scores, chunk, metric);
}
} // namespace Batched
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/homogeneity_score.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/homogeneity_score.cuh>
namespace ML {
namespace Metrics {
double homogeneity_score(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range)
{
return raft::stats::homogeneity_score(
y, y_hat, n, lower_class_range, upper_class_range, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_kl_divergence.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_kl_divergence.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_kl_divergence(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::KLDivergence, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_kl_divergence(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::KLDivergence, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_euclidean.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_euclidean.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_euclidean(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
switch (metric) {
case raft::distance::DistanceType::L2Expanded:
raft::distance::
distance<raft::distance::DistanceType::L2Expanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
case raft::distance::DistanceType::L2SqrtExpanded:
raft::distance::
distance<raft::distance::DistanceType::L2SqrtExpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
case raft::distance::DistanceType::L2Unexpanded:
raft::distance::
distance<raft::distance::DistanceType::L2Unexpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
case raft::distance::DistanceType::L2SqrtUnexpanded:
raft::distance::
distance<raft::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
default: THROW("Unknown or unsupported distance metric '%d'!", (int)metric);
}
}
void pairwise_distance_euclidean(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg)
{
// Call the distance function
switch (metric) {
case raft::distance::DistanceType::L2Expanded:
raft::distance::distance<raft::distance::DistanceType::L2Expanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
case raft::distance::DistanceType::L2SqrtExpanded:
raft::distance::
distance<raft::distance::DistanceType::L2SqrtExpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
case raft::distance::DistanceType::L2Unexpanded:
raft::distance::
distance<raft::distance::DistanceType::L2Unexpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
case raft::distance::DistanceType::L2SqrtUnexpanded:
raft::distance::
distance<raft::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
break;
default: THROW("Unknown or unsupported distance metric '%d'!", (int)metric);
}
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_l1.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_l1.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_l1(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::L1, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_l1(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
raft::distance::distance<raft::distance::DistanceType::L1, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_russell_rao.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_russell_rao.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_russell_rao(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
raft::distance::
distance<raft::distance::DistanceType::RusselRaoExpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_russell_rao(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
raft::distance::
distance<raft::distance::DistanceType::RusselRaoExpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/r2_score.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/r2_score.cuh>
namespace ML {
namespace Metrics {
float r2_score_py(const raft::handle_t& handle, float* y, float* y_hat, int n)
{
return raft::stats::r2_score(y, y_hat, n, handle.get_stream());
}
double r2_score_py(const raft::handle_t& handle, double* y, double* y_hat, int n)
{
return raft::stats::r2_score(y, y_hat, n, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_correlation.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_correlation.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_correlation(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
raft::distance::
distance<raft::distance::DistanceType::CorrelationExpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_correlation(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
// Call the distance function
raft::distance::
distance<raft::distance::DistanceType::CorrelationExpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/accuracy_score.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/accuracy.cuh>
namespace ML {
namespace Metrics {
float accuracy_score_py(const raft::handle_t& handle,
const int* predictions,
const int* ref_predictions,
int n)
{
return raft::stats::accuracy(predictions, ref_predictions, n, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_jensen_shannon.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_jensen_shannon(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_jensen_shannon(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_correlation.cuh |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
namespace ML {
namespace Metrics {
void pairwise_distance_correlation(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg);
void pairwise_distance_correlation(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/silhouette_score.cu |
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/stats/silhouette_score.cuh>
namespace ML {
namespace Metrics {
double silhouette_score(const raft::handle_t& handle,
double* y,
int nRows,
int nCols,
int* labels,
int nLabels,
double* silScores,
raft::distance::DistanceType metric)
{
return raft::stats::silhouette_score<double, int>(
handle, y, nRows, nCols, labels, nLabels, silScores, handle.get_stream(), metric);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/rand_index.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/handle.hpp>
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/rand_index.cuh>
namespace ML {
namespace Metrics {
double rand_index(const raft::handle_t& handle, const double* y, const double* y_hat, int n)
{
return raft::stats::rand_index(y, y_hat, (uint64_t)n, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/mutual_info_score.cu |
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/handle.hpp>
#include <cuml/metrics/metrics.hpp>
#include <raft/core/handle.hpp>
#include <raft/stats/mutual_info_score.cuh>
namespace ML {
namespace Metrics {
double mutual_info_score(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range)
{
return raft::stats::mutual_info_score(
y, y_hat, n, lower_class_range, upper_class_range, handle.get_stream());
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/metrics/pairwise_distance_cosine.cu |
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pairwise_distance_cosine.cuh"
#include <raft/core/handle.hpp>
#include <raft/distance/distance.cuh>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Metrics {
void pairwise_distance_cosine(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
bool isRowMajor,
double metric_arg)
{
// Call the distance function
raft::distance::
distance<raft::distance::DistanceType::CosineExpanded, double, double, double, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
void pairwise_distance_cosine(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
bool isRowMajor,
float metric_arg)
{
// Call the distance function
raft::distance::distance<raft::distance::DistanceType::CosineExpanded, float, float, float, int>(
handle, x, y, dist, m, n, k, isRowMajor);
}
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/random_projection/rproj.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rproj.cuh"
#include <cuml/random_projection/rproj_c.h>
#include <raft/core/handle.hpp>
namespace ML {
template void RPROJfit(const raft::handle_t& handle,
rand_mat<float>* random_matrix,
paramsRPROJ* params);
template void RPROJfit(const raft::handle_t& handle,
rand_mat<double>* random_matrix,
paramsRPROJ* params);
template void RPROJtransform(const raft::handle_t& handle,
float* input,
rand_mat<float>* random_matrix,
float* output,
paramsRPROJ* params);
template void RPROJtransform(const raft::handle_t& handle,
double* input,
rand_mat<double>* random_matrix,
double* output,
paramsRPROJ* params);
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/random_projection/rproj_utils.cuh | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/random_projection/rproj_c.h>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <sys/time.h>
#include <unordered_set>
const int TPB_X = 256;
inline void sample_without_replacement(size_t n_population,
size_t n_samples,
int* indices,
size_t& indices_idx)
{
std::random_device dev;
std::mt19937 gen(dev());
std::uniform_int_distribution<int> uni_dist(0, n_population - 1);
std::unordered_set<int> s;
for (size_t i = 0; i < n_samples; i++) {
int rand_idx = uni_dist(gen);
while (s.find(rand_idx) != s.end()) {
rand_idx = uni_dist(gen);
}
s.insert(rand_idx);
indices[indices_idx] = rand_idx;
indices_idx++;
}
}
__global__ void sum_bools(bool* in_bools, int n, int* out_val)
{
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < n) {
bool v = in_bools[row];
if (v) raft::myAtomicAdd(out_val, (int)in_bools[row]);
}
}
inline size_t binomial(const raft::handle_t& h, size_t n, double p, int random_state)
{
struct timeval tp;
gettimeofday(&tp, NULL);
long long seed = tp.tv_sec * 1000 + tp.tv_usec;
auto rng = raft::random::Rng(random_state + seed);
rmm::device_uvector<bool> rand_array(n, h.get_stream());
rmm::device_scalar<int> successes(h.get_stream());
rng.bernoulli(rand_array.data(), n, 1 - p, h.get_stream());
cudaMemsetAsync(successes.data(), 0, sizeof(int), h.get_stream());
dim3 grid_n(raft::ceildiv(n, (size_t)TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
sum_bools<<<grid_n, blk, 0, h.get_stream()>>>(rand_array.data(), n, successes.data());
RAFT_CUDA_TRY(cudaPeekAtLastError());
int ret = 0;
raft::update_host(&ret, successes.data(), 1, h.get_stream());
h.sync_stream();
RAFT_CUDA_TRY(cudaPeekAtLastError());
return n - ret;
}
inline double check_density(double density, size_t n_features)
{
if (density == -1.0) { return 1.0 / sqrt(n_features); }
return density;
}
namespace ML {
/**
* @brief computes minimum target dimension to preserve information according to error tolerance
* (eps parameter)
* @param[in] n_samples: number of samples
* @param[in] eps: error tolerance
* @return minimum target dimension
*/
size_t johnson_lindenstrauss_min_dim(size_t n_samples, double eps)
{
ASSERT(eps > 0.0 && eps < 1.0, "Parameter eps: must be in range (0, 1)");
ASSERT(n_samples > 0, "Parameter n_samples: must be strictly positive");
double denominator = (pow(eps, 2.0) / 2.0) - (pow(eps, 3) / 3.0);
size_t res = 4.0 * log(n_samples) / denominator;
return res;
}
inline void check_parameters(paramsRPROJ& params)
{
ASSERT(params.n_components > 0, "Parameter n_components: must be strictly positive");
ASSERT(params.n_features > 0, "Parameter n_features: must be strictly positive");
ASSERT(params.n_features >= params.n_components,
"Parameters n_features and n_components: n_features must superior "
"or equal to n_components. If you set eps parameter, please modify its "
"value."
"\nCurrent values :\n\tn_features : %d\n\tn_components : %d\n\teps : %lf",
params.n_features,
params.n_components,
params.eps);
ASSERT(params.gaussian_method || (params.density > 0.0 && params.density <= 1.0),
"Parameter density: must be in range (0, 1]");
}
inline void build_parameters(paramsRPROJ& params)
{
if (params.n_components == -1) {
params.n_components = johnson_lindenstrauss_min_dim(params.n_samples, params.eps);
}
if (!params.gaussian_method) {
params.density = check_density(params.density, params.n_features);
}
}
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/random_projection/rproj.cuh | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "rproj_utils.cuh"
#include <cuml/random_projection/rproj_c.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
// TODO: This needs to be removed.
#include <raft/sparse/detail/cusparse_wrappers.h>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <cstddef>
#include <random>
#include <unordered_set>
#include <vector>
namespace ML {
/**
* @brief generates a gaussian random matrix
* @param[in] h: cuML handle
* @param[out] random_matrix: the random matrix to be allocated and generated
* @param[in] params: data structure that includes all the parameters of the model
*/
template <typename math_t>
void gaussian_random_matrix(const raft::handle_t& h,
rand_mat<math_t>* random_matrix,
paramsRPROJ& params)
{
cudaStream_t stream = h.get_stream();
int len = params.n_components * params.n_features;
random_matrix->dense_data.resize(len, stream);
auto rng = raft::random::Rng(params.random_state);
math_t scale = 1.0 / sqrt(double(params.n_components));
rng.normal(random_matrix->dense_data.data(), len, math_t(0), scale, stream);
}
/**
* @brief generates a sparse random matrix
* @param[in] h: cuML handle
* @param[out] random_matrix: the random matrix to be allocated and generated
* @param[in] params: data structure that includes all the parameters of the model
*/
template <typename math_t>
void sparse_random_matrix(const raft::handle_t& h,
rand_mat<math_t>* random_matrix,
paramsRPROJ& params)
{
cudaStream_t stream = h.get_stream();
if (params.density == 1.0f) {
int len = params.n_components * params.n_features;
random_matrix->dense_data.resize(len, stream);
auto rng = raft::random::Rng(params.random_state);
math_t scale = 1.0 / sqrt(math_t(params.n_components));
rng.scaled_bernoulli(random_matrix->dense_data.data(), len, math_t(0.5), scale, stream);
} else {
std::size_t indices_alloc = params.n_features * params.n_components;
std::size_t indptr_alloc = (params.n_components + 1);
std::vector<int> indices(indices_alloc);
std::vector<int> indptr(indptr_alloc);
std::size_t offset = 0;
std::size_t indices_idx = 0;
std::size_t indptr_idx = 0;
for (int i = 0; i < params.n_components; i++) {
int n_nonzero = binomial(h, params.n_features, params.density, params.random_state);
sample_without_replacement(params.n_features, n_nonzero, indices.data(), indices_idx);
indptr[indptr_idx] = offset;
indptr_idx++;
offset += n_nonzero;
}
indptr[indptr_idx] = offset;
auto len = offset;
random_matrix->indices.resize(len, stream);
raft::update_device(random_matrix->indices.data(), indices.data(), len, stream);
len = indptr_idx + 1;
random_matrix->indptr.resize(len, stream);
raft::update_device(random_matrix->indptr.data(), indptr.data(), len, stream);
len = offset;
random_matrix->sparse_data.resize(len, stream);
auto rng = raft::random::Rng(params.random_state);
math_t scale = sqrt(1.0 / params.density) / sqrt(params.n_components);
rng.scaled_bernoulli(random_matrix->sparse_data.data(), len, math_t(0.5), scale, stream);
}
}
/**
* @brief fits the model by generating appropriate random matrix
* @param[in] handle: cuML handle
* @param[out] random_matrix: the random matrix to be allocated and generated
* @param[in] params: data structure that includes all the parameters of the model
*/
template <typename math_t>
void RPROJfit(const raft::handle_t& handle, rand_mat<math_t>* random_matrix, paramsRPROJ* params)
{
random_matrix->reset();
build_parameters(*params);
check_parameters(*params);
if (params->gaussian_method) {
gaussian_random_matrix<math_t>(handle, random_matrix, *params);
random_matrix->type = dense;
} else {
sparse_random_matrix<math_t>(handle, random_matrix, *params);
random_matrix->type = sparse;
}
}
/**
* @brief transforms data according to generated random matrix
* @param[in] handle: cuML handle
* @param[in] input: unprojected original dataset
* @param[in] random_matrix: the random matrix to be allocated and generated
* @param[out] output: projected dataset
* @param[in] params: data structure that includes all the parameters of the model
*/
template <typename math_t>
void RPROJtransform(const raft::handle_t& handle,
math_t* input,
rand_mat<math_t>* random_matrix,
math_t* output,
paramsRPROJ* params)
{
cudaStream_t stream = handle.get_stream();
check_parameters(*params);
if (random_matrix->type == dense) {
cublasHandle_t cublas_handle = handle.get_cublas_handle();
const math_t alfa = 1;
const math_t beta = 0;
auto& m = params->n_samples;
auto& n = params->n_components;
auto& k = params->n_features;
auto& lda = m;
auto& ldb = k;
auto& ldc = m;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
params->n_samples,
n,
k,
&alfa,
input,
lda,
random_matrix->dense_data.data(),
ldb,
&beta,
output,
ldc,
stream));
} else if (random_matrix->type == sparse) {
auto cusparse_handle = handle.get_cusparse_handle();
const math_t alfa = 1;
const math_t beta = 0;
auto& m = params->n_samples;
auto& n = params->n_components;
auto& k = params->n_features;
std::size_t nnz = random_matrix->sparse_data.size();
auto& lda = m;
auto& ldc = m;
// TODO: Need to wrap this in a RAFT public API.
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsegemmi(cusparse_handle,
m,
n,
k,
nnz,
&alfa,
input,
lda,
random_matrix->sparse_data.data(),
random_matrix->indptr.data(),
random_matrix->indices.data(),
&beta,
output,
ldc,
stream));
} else {
ASSERT(false,
"Could not find a random matrix. Please perform a fit operation "
"before applying transformation");
}
}
}; // namespace ML
// end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/tsvd/tsvd.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tsvd.cuh"
#include <cuml/decomposition/tsvd.hpp>
#include <raft/core/handle.hpp>
namespace ML {
void tsvdFit(raft::handle_t& handle,
float* input,
float* components,
float* singular_vals,
const paramsTSVD& prms)
{
tsvdFit(handle, input, components, singular_vals, prms, handle.get_stream());
}
void tsvdFit(raft::handle_t& handle,
double* input,
double* components,
double* singular_vals,
const paramsTSVD& prms)
{
tsvdFit(handle, input, components, singular_vals, prms, handle.get_stream());
}
void tsvdFitTransform(raft::handle_t& handle,
float* input,
float* trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
const paramsTSVD& prms)
{
tsvdFitTransform(handle,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
prms,
handle.get_stream());
}
void tsvdFitTransform(raft::handle_t& handle,
double* input,
double* trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
const paramsTSVD& prms)
{
tsvdFitTransform(handle,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
prms,
handle.get_stream());
}
void tsvdTransform(raft::handle_t& handle,
float* input,
float* components,
float* trans_input,
const paramsTSVD& prms)
{
tsvdTransform(handle, input, components, trans_input, prms, handle.get_stream());
}
void tsvdTransform(raft::handle_t& handle,
double* input,
double* components,
double* trans_input,
const paramsTSVD& prms)
{
tsvdTransform(handle, input, components, trans_input, prms, handle.get_stream());
}
void tsvdInverseTransform(raft::handle_t& handle,
float* trans_input,
float* components,
float* input,
const paramsTSVD& prms)
{
tsvdInverseTransform(handle, trans_input, components, input, prms, handle.get_stream());
}
void tsvdInverseTransform(raft::handle_t& handle,
double* trans_input,
double* components,
double* input,
const paramsTSVD& prms)
{
tsvdInverseTransform(handle, trans_input, components, input, prms, handle.get_stream());
}
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/tsvd/tsvd_mg.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tsvd.cuh"
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <cuml/decomposition/tsvd.hpp>
#include <cuml/decomposition/tsvd_mg.hpp>
#include <cumlprims/opg/linalg/mm_aTa.hpp>
#include <cumlprims/opg/stats/mean.hpp>
#include <cumlprims/opg/stats/mean_center.hpp>
#include <cumlprims/opg/stats/stddev.hpp>
#include <raft/core/handle.hpp>
#include <raft/core/comms.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/matrix/math.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace TSVD {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* singular_vals,
paramsTSVDMG& prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
cublasHandle_t cublas_handle = handle.get_cublas_handle();
auto len = prms.n_cols * prms.n_cols;
rmm::device_uvector<T> cov_data(len, streams[0]);
auto cov_data_size = cov_data.size();
Matrix::Data<T> cov{cov_data.data(), cov_data_size};
LinAlg::opg::mm_aTa(handle, cov, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> components_all(len, streams[0]);
rmm::device_uvector<T> explained_var_all(prms.n_cols, streams[0]);
ML::calEig(handle, cov.ptr, components_all.data(), explained_var_all.data(), prms, streams[0]);
raft::matrix::truncZeroOrigin(
components_all.data(), prms.n_cols, components, prms.n_components, prms.n_cols, streams[0]);
T scalar = T(1);
raft::matrix::seqRoot(
explained_var_all.data(), singular_vals, scalar, prms.n_components, streams[0]);
}
/**
* @brief performs MNMG fit operation for the tsvd
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param components: principal components of the input data
* @output param singular_vals: singular values of the data
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
T* singular_vals,
paramsTSVDMG& prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_impl(
handle, input_data, input_desc, components, singular_vals, prms, streams, n_streams, verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
Matrix::PartDescriptor input_desc,
T* components,
std::vector<Matrix::Data<T>*>& trans_input,
paramsTSVDMG& prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.blocksOwnedBy(rank);
for (std::size_t i = 0; i < input.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
components,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
streams[si]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG transform operation for the tsvd.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param components: principal components of the input data
* @output param trans_input: transformed input data
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
Matrix::Data<T>** trans_input,
paramsTSVDMG& prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
transform_impl(
handle, input_data, input_desc, components, trans_data, prms, streams, n_streams, verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& trans_input,
Matrix::PartDescriptor trans_input_desc,
T* components,
std::vector<Matrix::Data<T>*>& input,
paramsTSVDMG& prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks;
for (std::size_t i = 0; i < local_blocks.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
components,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
streams[si]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG inverse transform operation for the output.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param trans_input: transformed input data
* @input param components: principal components of the input data
* @output param input: input data
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** trans_input,
T* components,
Matrix::Data<T>** input,
paramsTSVDMG& prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
inverse_transform_impl(
handle, trans_data, trans_desc, components, input_data, prms, streams, n_streams, verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
/**
* @brief performs MNMG fit and transform operation for the tsvd.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param trans_input: transformed input data
* @output param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_transform_impl(raft::handle_t& handle,
cudaStream_t* streams,
size_t n_streams,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& trans_data,
Matrix::PartDescriptor& trans_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
paramsTSVDMG& prms,
bool verbose)
{
fit_impl(
handle, input_data, input_desc, components, singular_vals, prms, streams, n_streams, verbose);
transform_impl(
handle, input_data, input_desc, components, trans_data, prms, streams, n_streams, verbose);
PCA::opg::sign_flip(
handle, trans_data, input_desc, components, prms.n_components, streams, n_streams);
rmm::device_uvector<T> mu_trans(prms.n_components, streams[0]);
Matrix::Data<T> mu_trans_data{mu_trans.data(), prms.n_components};
Stats::opg::mean(handle, mu_trans_data, trans_data, trans_desc, streams, n_streams);
Matrix::Data<T> explained_var_data{explained_var, prms.n_components};
Stats::opg::var(
handle, explained_var_data, trans_data, trans_desc, mu_trans_data.ptr, streams, n_streams);
rmm::device_uvector<T> mu(prms.n_rows, streams[0]);
Matrix::Data<T> mu_data{mu.data(), prms.n_rows};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> var_input(prms.n_rows, streams[0]);
Matrix::Data<T> var_input_data{var_input.data(), prms.n_rows};
Stats::opg::var(handle, var_input_data, input_data, input_desc, mu_data.ptr, streams, n_streams);
rmm::device_uvector<T> total_vars(1, streams[0]);
raft::stats::sum(
total_vars.data(), var_input_data.ptr, std::size_t(1), prms.n_cols, false, streams[0]);
T total_vars_h;
raft::update_host(&total_vars_h, total_vars.data(), std::size_t(1), streams[0]);
handle.sync_stream(streams[0]);
T scalar = T(1) / total_vars_h;
raft::linalg::scalarMultiply(
explained_var_ratio, explained_var, scalar, prms.n_components, streams[0]);
}
void fit(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::floatData_t** input,
float* components,
float* singular_vals,
paramsTSVDMG& prms,
bool verbose)
{
fit_impl(handle, rank_sizes, n_parts, input, components, singular_vals, prms, verbose);
}
void fit(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::doubleData_t** input,
double* components,
double* singular_vals,
paramsTSVDMG& prms,
bool verbose)
{
fit_impl(handle, rank_sizes, n_parts, input, components, singular_vals, prms, verbose);
}
void fit_transform(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<float>*>& trans_data,
Matrix::PartDescriptor& trans_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
paramsTSVDMG& prms,
bool verbose)
{
// TODO: These streams should come from raft::handle_t
int rank = handle.get_comms().get_rank();
size_t n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (std::size_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_transform_impl(handle,
streams,
n_streams,
input_data,
input_desc,
trans_data,
trans_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
prms,
verbose);
for (std::size_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::size_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
void fit_transform(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<double>*>& trans_data,
Matrix::PartDescriptor& trans_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
paramsTSVDMG& prms,
bool verbose)
{
// TODO: These streams should come from raft::handle_t
int rank = handle.get_comms().get_rank();
size_t n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (std::size_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_transform_impl(handle,
streams,
n_streams,
input_data,
input_desc,
trans_data,
trans_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
prms,
verbose);
for (std::size_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::size_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** input,
float* components,
Matrix::Data<float>** trans_input,
paramsTSVDMG& prms,
bool verbose)
{
transform_impl(handle, rank_sizes, n_parts, input, components, trans_input, prms, verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** input,
double* components,
Matrix::Data<double>** trans_input,
paramsTSVDMG& prms,
bool verbose)
{
transform_impl(handle, rank_sizes, n_parts, input, components, trans_input, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::size_t n_parts,
Matrix::Data<float>** trans_input,
float* components,
Matrix::Data<float>** input,
paramsTSVDMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** trans_input,
double* components,
Matrix::Data<double>** input,
paramsTSVDMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, prms, verbose);
}
} // namespace opg
} // namespace TSVD
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/tsvd/tsvd.cuh | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/decomposition/params.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/eig.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/rsvd.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/stddev.cuh>
#include <raft/stats/sum.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
namespace ML {
template <typename math_t>
void calCompExpVarsSvd(const raft::handle_t& handle,
math_t* in,
math_t* components,
math_t* singular_vals,
math_t* explained_vars,
math_t* explained_var_ratio,
const paramsTSVD& prms,
cudaStream_t stream)
{
auto cusolver_handle = handle.get_cusolver_dn_handle();
auto cublas_handle = handle.get_cublas_handle();
auto diff = prms.n_cols - prms.n_components;
math_t ratio = math_t(diff) / math_t(prms.n_cols);
ASSERT(ratio >= math_t(0.2),
"Number of components should be less than at least 80 percent of the "
"number of features");
std::size_t p = static_cast<std::size_t>(math_t(0.1) * math_t(prms.n_cols));
// int p = int(math_t(prms.n_cols) / math_t(4));
ASSERT(p >= 5, "RSVD should be used where the number of columns are at least 50");
auto total_random_vecs = prms.n_components + p;
ASSERT(total_random_vecs < prms.n_cols,
"RSVD should be used where the number of columns are at least 50");
rmm::device_uvector<math_t> components_temp(prms.n_cols * prms.n_components, stream);
math_t* left_eigvec = nullptr;
raft::linalg::rsvdFixedRank(handle,
in,
prms.n_rows,
prms.n_cols,
singular_vals,
left_eigvec,
components_temp.data(),
prms.n_components,
p,
true,
false,
true,
false,
(math_t)prms.tol,
prms.n_iterations,
stream);
raft::linalg::transpose(
handle, components_temp.data(), components, prms.n_cols, prms.n_components, stream);
raft::matrix::power(singular_vals, explained_vars, math_t(1), prms.n_components, stream);
raft::matrix::ratio(handle, explained_vars, explained_var_ratio, prms.n_components, stream);
}
template <typename math_t, typename enum_solver = solver>
void calEig(const raft::handle_t& handle,
math_t* in,
math_t* components,
math_t* explained_var,
const paramsTSVDTemplate<enum_solver>& prms,
cudaStream_t stream)
{
auto cusolver_handle = handle.get_cusolver_dn_handle();
if (prms.algorithm == enum_solver::COV_EIG_JACOBI) {
raft::linalg::eigJacobi(handle,
in,
prms.n_cols,
prms.n_cols,
components,
explained_var,
stream,
(math_t)prms.tol,
prms.n_iterations);
} else {
raft::linalg::eigDC(handle, in, prms.n_cols, prms.n_cols, components, explained_var, stream);
}
raft::matrix::colReverse(components, prms.n_cols, prms.n_cols, stream);
raft::linalg::transpose(components, prms.n_cols, stream);
raft::matrix::rowReverse(explained_var, prms.n_cols, std::size_t(1), stream);
}
/**
* @defgroup sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen
* vectors
* @param input: input matrix that will be used to determine the sign.
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param components: components matrix.
* @param n_cols_comp: number of columns of components matrix
* @param stream cuda stream
* @{
*/
template <typename math_t>
void signFlip(math_t* input,
std::size_t n_rows,
std::size_t n_cols,
math_t* components,
std::size_t n_cols_comp,
cudaStream_t stream)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
thrust::for_each(
rmm::exec_policy(stream), counting, counting + n_cols, [=] __device__(std::size_t idx) {
auto d_i = idx * m;
auto end = d_i + m;
math_t max = 0.0;
std::size_t max_index = 0;
for (auto i = d_i; i < end; i++) {
math_t val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
if (input[max_index] < 0.0) {
for (auto i = d_i; i < end; i++) {
input[i] = -input[i];
}
auto len = n_cols * n_cols_comp;
for (auto i = idx; i < len; i = i + n_cols) {
components[i] = -components[i];
}
}
});
}
/**
* @brief perform fit operation for the tsvd. Generates eigenvectors, explained vars, singular vals,
* etc.
* @param[in] handle: the internal cuml handle object
* @param[in] input: the data is fitted to PCA. Size n_rows x n_cols. The size of the data is
* indicated in prms.
* @param[out] components: the principal components of the input data. Size n_cols * n_components.
* @param[out] singular_vals: singular values of the data. Size n_components * 1
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void tsvdFit(const raft::handle_t& handle,
math_t* input,
math_t* components,
math_t* singular_vals,
const paramsTSVD& prms,
cudaStream_t stream)
{
auto cublas_handle = handle.get_cublas_handle();
ASSERT(prms.n_cols > 1, "Parameter n_cols: number of columns cannot be less than two");
ASSERT(prms.n_rows > 1, "Parameter n_rows: number of rows cannot be less than two");
ASSERT(prms.n_components > 0,
"Parameter n_components: number of components cannot be less than one");
auto n_components = prms.n_components;
if (prms.n_components > prms.n_cols) n_components = prms.n_cols;
size_t len = prms.n_cols * prms.n_cols;
rmm::device_uvector<math_t> input_cross_mult(len, stream);
math_t alpha = math_t(1);
math_t beta = math_t(0);
raft::linalg::gemm(handle,
input,
prms.n_rows,
prms.n_cols,
input,
input_cross_mult.data(),
prms.n_cols,
prms.n_cols,
CUBLAS_OP_T,
CUBLAS_OP_N,
alpha,
beta,
stream);
rmm::device_uvector<math_t> components_all(len, stream);
rmm::device_uvector<math_t> explained_var_all(prms.n_cols, stream);
calEig(
handle, input_cross_mult.data(), components_all.data(), explained_var_all.data(), prms, stream);
raft::matrix::truncZeroOrigin(
components_all.data(), prms.n_cols, components, n_components, prms.n_cols, stream);
math_t scalar = math_t(1);
raft::matrix::seqRoot(explained_var_all.data(), singular_vals, scalar, n_components, stream);
}
/**
* @brief performs fit and transform operations for the tsvd. Generates transformed data,
* eigenvectors, explained vars, singular vals, etc.
* @param[in] handle: the internal cuml handle object
* @param[in] input: the data is fitted to PCA. Size n_rows x n_cols. The size of the data is
* indicated in prms.
* @param[out] trans_input: the transformed data. Size n_rows * n_components.
* @param[out] components: the principal components of the input data. Size n_cols * n_components.
* @param[out] explained_var: explained variances (eigenvalues) of the principal components. Size
* n_components * 1.
* @param[out] explained_var_ratio: the ratio of the explained variance and total variance. Size
* n_components * 1.
* @param[out] singular_vals: singular values of the data. Size n_components * 1
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void tsvdFitTransform(const raft::handle_t& handle,
math_t* input,
math_t* trans_input,
math_t* components,
math_t* explained_var,
math_t* explained_var_ratio,
math_t* singular_vals,
const paramsTSVD& prms,
cudaStream_t stream)
{
tsvdFit(handle, input, components, singular_vals, prms, stream);
tsvdTransform(handle, input, components, trans_input, prms, stream);
signFlip(trans_input, prms.n_rows, prms.n_components, components, prms.n_cols, stream);
rmm::device_uvector<math_t> mu_trans(prms.n_components, stream);
raft::stats::mean(
mu_trans.data(), trans_input, prms.n_components, prms.n_rows, true, false, stream);
raft::stats::vars(explained_var,
trans_input,
mu_trans.data(),
prms.n_components,
prms.n_rows,
true,
false,
stream);
rmm::device_uvector<math_t> mu(prms.n_cols, stream);
rmm::device_uvector<math_t> vars(prms.n_cols, stream);
raft::stats::mean(mu.data(), input, prms.n_cols, prms.n_rows, true, false, stream);
raft::stats::vars(vars.data(), input, mu.data(), prms.n_cols, prms.n_rows, true, false, stream);
rmm::device_scalar<math_t> total_vars(stream);
raft::stats::sum(total_vars.data(), vars.data(), std::size_t(1), prms.n_cols, false, stream);
math_t total_vars_h;
raft::update_host(&total_vars_h, total_vars.data(), 1, stream);
handle.sync_stream(stream);
math_t scalar = math_t(1) / total_vars_h;
raft::linalg::scalarMultiply(
explained_var_ratio, explained_var, scalar, prms.n_components, stream);
}
/**
* @brief performs transform operation for the tsvd. Transforms the data to eigenspace.
* @param[in] handle the internal cuml handle object
* @param[in] input: the data is transformed. Size n_rows x n_components.
* @param[in] components: principal components of the input data. Size n_cols * n_components.
* @param[out] trans_input: output that is transformed version of input
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void tsvdTransform(const raft::handle_t& handle,
math_t* input,
math_t* components,
math_t* trans_input,
const paramsTSVD& prms,
cudaStream_t stream)
{
ASSERT(prms.n_cols > 1, "Parameter n_cols: number of columns cannot be less than two");
ASSERT(prms.n_rows > 0, "Parameter n_rows: number of rows cannot be less than one");
ASSERT(prms.n_components > 0,
"Parameter n_components: number of components cannot be less than one");
math_t alpha = math_t(1);
math_t beta = math_t(0);
raft::linalg::gemm(handle,
input,
prms.n_rows,
prms.n_cols,
components,
trans_input,
prms.n_rows,
prms.n_components,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
stream);
}
/**
* @brief performs inverse transform operation for the tsvd. Transforms the transformed data back to
* original data.
* @param[in] handle the internal cuml handle object
* @param[in] trans_input: the data is fitted to PCA. Size n_rows x n_components.
* @param[in] components: transpose of the principal components of the input data. Size n_components
* * n_cols.
* @param[out] input: the data is fitted to PCA. Size n_rows x n_cols.
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void tsvdInverseTransform(const raft::handle_t& handle,
math_t* trans_input,
math_t* components,
math_t* input,
const paramsTSVD& prms,
cudaStream_t stream)
{
ASSERT(prms.n_cols > 1, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(prms.n_rows > 0, "Parameter n_rows: number of rows cannot be less than one");
ASSERT(prms.n_components > 0,
"Parameter n_components: number of components cannot be less than one");
math_t alpha = math_t(1);
math_t beta = math_t(0);
raft::linalg::gemm(handle,
trans_input,
prms.n_rows,
prms.n_components,
components,
input,
prms.n_rows,
prms.n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
}
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/arima/batched_arima.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <iostream>
#include <vector>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/logical.h>
#include <cuml/tsa/batched_arima.hpp>
#include <cuml/tsa/batched_kalman.hpp>
#include <common/nvtx.hpp>
#include <linalg/batched/matrix.cuh>
#include <raft/core/handle.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/stats/information_criterion.cuh>
#include <raft/stats/stats_types.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <timeSeries/arima_helpers.cuh>
#include <timeSeries/fillna.cuh>
namespace ML {
void pack(raft::handle_t& handle,
const ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
double* param_vec)
{
const auto stream = handle.get_stream();
params.pack(order, batch_size, param_vec, stream);
}
void unpack(raft::handle_t& handle,
ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
const double* param_vec)
{
const auto stream = handle.get_stream();
params.unpack(order, batch_size, param_vec, stream);
}
void batched_diff(raft::handle_t& handle,
double* d_y_diff,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order)
{
const auto stream = handle.get_stream();
MLCommon::TimeSeries::prepare_data(
d_y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
}
template <typename T>
struct is_missing {
typedef T argument_type;
typedef T result_type;
__thrust_exec_check_disable__ __device__ const T operator()(const T& x) const { return isnan(x); }
}; // end is_missing
bool detect_missing(raft::handle_t& handle, const double* d_y, int n_elem)
{
return thrust::any_of(
thrust::cuda::par.on(handle.get_stream()), d_y, d_y + n_elem, is_missing<double>());
}
void predict(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
const double* d_exog,
const double* d_exog_fut,
int batch_size,
int n_obs,
int start,
int end,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* d_y_p,
bool pre_diff,
double level,
double* d_lower,
double* d_upper)
{
raft::common::nvtx::range fun_scope(__func__);
const auto stream = handle.get_stream();
bool diff = order.need_diff() && pre_diff && level == 0;
int num_steps = std::max(end - n_obs, 0);
// Prepare data
int n_obs_kf;
const double* d_y_kf;
const double* d_exog_kf;
const double* d_exog_fut_kf = d_exog_fut;
ARIMAOrder order_after_prep = order;
rmm::device_uvector<double> exog_fut_buffer(0, stream);
if (diff) {
n_obs_kf = n_obs - order.n_diff();
MLCommon::TimeSeries::prepare_data(
arima_mem.y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream);
if (order.n_exog > 0) {
MLCommon::TimeSeries::prepare_data(arima_mem.exog_diff,
d_exog,
order.n_exog * batch_size,
n_obs,
order.d,
order.D,
order.s,
stream);
if (num_steps > 0) {
exog_fut_buffer.resize(num_steps * order.n_exog * batch_size, stream);
MLCommon::TimeSeries::prepare_future_data(exog_fut_buffer.data(),
d_exog,
d_exog_fut,
order.n_exog * batch_size,
n_obs,
num_steps,
order.d,
order.D,
order.s,
stream);
d_exog_fut_kf = exog_fut_buffer.data();
}
}
order_after_prep.d = 0;
order_after_prep.D = 0;
d_y_kf = arima_mem.y_diff;
d_exog_kf = arima_mem.exog_diff;
} else {
n_obs_kf = n_obs;
d_y_kf = d_y;
d_exog_kf = d_exog;
}
double* d_pred = arima_mem.pred;
// Create temporary array for the forecasts
rmm::device_uvector<double> fc_buffer(num_steps * batch_size, stream);
double* d_y_fc = fc_buffer.data();
// Compute the residual and forecast
std::vector<double> loglike = std::vector<double>(batch_size);
/// TODO: use device loglike to avoid useless copy ; part of #2233
batched_loglike(handle,
arima_mem,
d_y_kf,
d_exog_kf,
batch_size,
n_obs_kf,
order_after_prep,
params,
loglike.data(),
false,
true,
MLE,
0,
num_steps,
d_y_fc,
d_exog_fut_kf,
level,
d_lower,
d_upper);
auto counting = thrust::make_counting_iterator(0);
int predict_ld = end - start;
//
// In-sample prediction
//
// The prediction loop starts by filling undefined predictions with NaN,
// then computes the predictions from the observations and residuals
if (start < n_obs) {
int res_offset = diff ? order.d + order.s * order.D : 0;
int p_start = std::max(start, res_offset);
int p_end = std::min(n_obs, end);
int dD = diff ? order.d + order.D : 0;
int period1 = order.d ? 1 : order.s;
int period2 = order.d == 2 ? 1 : order.s;
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_y_p[0] = 0.0;
for (int i = 0; i < res_offset - start; i++) {
d_y_p[bid * predict_ld + i] = nan("");
}
for (int i = p_start; i < p_end; i++) {
if (dD == 0) {
d_y_p[bid * predict_ld + i - start] = d_pred[bid * n_obs + i];
} else if (dD == 1) {
d_y_p[bid * predict_ld + i - start] =
d_y[bid * n_obs + i - period1] + d_pred[bid * n_obs_kf + i - res_offset];
} else {
d_y_p[bid * predict_ld + i - start] =
d_y[bid * n_obs + i - period1] + d_y[bid * n_obs + i - period2] -
d_y[bid * n_obs + i - period1 - period2] + d_pred[bid * n_obs_kf + i - res_offset];
}
}
});
}
//
// Finalize out-of-sample forecast and copy in-sample predictions
//
if (num_steps) {
if (diff) {
MLCommon::TimeSeries::finalize_forecast(
d_y_fc, d_y, num_steps, batch_size, n_obs, n_obs, order.d, order.D, order.s, stream);
}
// Copy forecast in d_y_p
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
for (int i = 0; i < num_steps; i++) {
d_y_p[bid * predict_ld + n_obs - start + i] = d_y_fc[num_steps * bid + i];
}
});
/// TODO: 2D copy kernel?
}
}
/**
* Kernel to compute the sum-of-squares log-likelihood estimation
*
* @param[in] d_y Series to fit
* @param[in] d_mu mu parameters
* @param[in] d_ar AR parameters
* @param[in] d_ma MA parameters
* @param[in] d_sar Seasonal AR parameters
* @param[in] d_sma Seasonal MA parameters
* @param[out] d_loglike Evaluated log-likelihood
* @param[in] n_obs Number of observations in a time series
* @param[in] n_phi Number of phi coefficients (combined AR-SAR)
* @param[in] n_theta Number of theta coefficients (combined MA-SMA)
* @param[in] p Number of AR parameters
* @param[in] q Number of MA parameters
* @param[in] P Number of seasonal AR parameters
* @param[in] Q Number of seasonal MA parameters
* @param[in] s Seasonal period or 0
* @param[in] k Whether to use an intercept
* @param[in] start_sum At which index to start the sum
* @param[in] start_y First used y index (observation)
* @param[in] start_v First used v index (residual)
*/
template <typename DataT>
__global__ void sum_of_squares_kernel(const DataT* d_y,
const DataT* d_mu,
const DataT* d_ar,
const DataT* d_ma,
const DataT* d_sar,
const DataT* d_sma,
DataT* d_loglike,
int n_obs,
int n_phi,
int n_theta,
int p,
int q,
int P,
int Q,
int s,
int k,
int start_sum,
int start_y,
int start_v)
{
// Load phi, theta and mu to registers
DataT phi, theta;
if (threadIdx.x < n_phi) {
phi = MLCommon::TimeSeries::reduced_polynomial<true>(
blockIdx.x, d_ar, p, d_sar, P, s, threadIdx.x + 1);
}
if (threadIdx.x < n_theta) {
theta = MLCommon::TimeSeries::reduced_polynomial<false>(
blockIdx.x, d_ma, q, d_sma, Q, s, threadIdx.x + 1);
}
DataT mu = k ? d_mu[blockIdx.x] : (DataT)0;
// Shared memory: load y and initialize the residuals
extern __shared__ DataT shared_mem[];
DataT* b_y = shared_mem;
DataT* b_vs = shared_mem + n_obs - start_y;
for (int i = threadIdx.x; i < n_obs - start_y; i += blockDim.x) {
b_y[i] = d_y[n_obs * blockIdx.x + i + start_y];
}
for (int i = threadIdx.x; i < start_sum - start_v; i += blockDim.x) {
b_vs[i] = (DataT)0;
}
// Main loop
char* temp_smem = (char*)(shared_mem + 2 * n_obs - start_y - start_v);
DataT res, ssq = 0;
for (int i = start_sum; i < n_obs; i++) {
__syncthreads();
res = (DataT)0;
res -= threadIdx.x < n_phi ? phi * b_y[i - threadIdx.x - 1 - start_y] : (DataT)0;
res -= threadIdx.x < n_theta ? theta * b_vs[i - threadIdx.x - 1 - start_v] : (DataT)0;
res = raft::blockReduce(res, temp_smem);
if (threadIdx.x == 0) {
res += b_y[i - start_y] - mu;
b_vs[i - start_v] = res;
ssq += res * res;
}
}
// Compute log-likelihood and write it to global memory
if (threadIdx.x == 0) {
d_loglike[blockIdx.x] =
-0.5 * static_cast<DataT>(n_obs) * raft::myLog(ssq / static_cast<DataT>(n_obs - start_sum));
}
}
/**
* Sum-of-squares estimation method
*
* @param[in] handle cuML handle
* @param[in] d_y Series to fit: shape = (n_obs, batch_size)
* @param[in] batch_size Number of time series
* @param[in] n_obs Number of observations in a time series
* @param[in] order ARIMA hyper-parameters
* @param[in] Tparams Transformed parameters
* @param[out] d_loglike Evaluated log-likelihood (device)
* @param[in] truncate Number of observations to skip in the sum
*/
void conditional_sum_of_squares(raft::handle_t& handle,
const double* d_y,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& Tparams,
double* d_loglike,
int truncate)
{
raft::common::nvtx::range fun_scope(__func__);
auto stream = handle.get_stream();
int n_phi = order.n_phi();
int n_theta = order.n_theta();
int max_lags = std::max(n_phi, n_theta);
int start_sum = std::max(max_lags, truncate);
int start_y = start_sum - n_phi;
int start_v = start_sum - n_theta;
// Compute the sum-of-squares and the log-likelihood
int n_warps = std::max(raft::ceildiv<int>(max_lags, 32), 1);
size_t shared_mem_size = (2 * n_obs - start_y - start_v + n_warps) * sizeof(double);
sum_of_squares_kernel<<<batch_size, 32 * n_warps, shared_mem_size, stream>>>(d_y,
Tparams.mu,
Tparams.ar,
Tparams.ma,
Tparams.sar,
Tparams.sma,
d_loglike,
n_obs,
n_phi,
n_theta,
order.p,
order.q,
order.P,
order.Q,
order.s,
order.k,
start_sum,
start_y,
start_v);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
void batched_loglike(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
const double* d_exog,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* loglike,
bool trans,
bool host_loglike,
LoglikeMethod method,
int truncate,
int fc_steps,
double* d_fc,
const double* d_exog_fut,
double level,
double* d_lower,
double* d_upper)
{
raft::common::nvtx::range fun_scope(__func__);
auto stream = handle.get_stream();
double* d_pred = arima_mem.pred;
ARIMAParams<double> Tparams = {params.mu,
params.beta,
arima_mem.Tparams_ar,
arima_mem.Tparams_ma,
arima_mem.Tparams_sar,
arima_mem.Tparams_sma,
arima_mem.Tparams_sigma2};
ASSERT(method == MLE || fc_steps == 0, "Only MLE method is valid for forecasting");
/* Create log-likelihood device array if host pointer is provided */
double* d_loglike = host_loglike ? arima_mem.loglike : loglike;
if (trans) {
MLCommon::TimeSeries::batched_jones_transform(
order, batch_size, false, params, Tparams, stream);
} else {
// non-transformed case: just use original parameters
Tparams.ar = params.ar;
Tparams.ma = params.ma;
Tparams.sar = params.sar;
Tparams.sma = params.sma;
Tparams.sigma2 = params.sigma2;
}
if (method == CSS) {
conditional_sum_of_squares(handle, d_y, batch_size, n_obs, order, Tparams, d_loglike, truncate);
} else {
batched_kalman_filter(handle,
arima_mem,
d_y,
d_exog,
n_obs,
Tparams,
order,
batch_size,
d_loglike,
d_pred,
fc_steps,
d_fc,
d_exog_fut,
level,
d_lower,
d_upper);
}
if (host_loglike) {
/* Transfer log-likelihood device -> host */
raft::update_host(loglike, d_loglike, batch_size, stream);
}
}
void batched_loglike(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
const double* d_exog,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const double* d_params,
double* loglike,
bool trans,
bool host_loglike,
LoglikeMethod method,
int truncate)
{
raft::common::nvtx::range fun_scope(__func__);
// unpack parameters
auto stream = handle.get_stream();
ARIMAParams<double> params = {arima_mem.params_mu,
arima_mem.params_beta,
arima_mem.params_ar,
arima_mem.params_ma,
arima_mem.params_sar,
arima_mem.params_sma,
arima_mem.params_sigma2};
params.unpack(order, batch_size, d_params, stream);
batched_loglike(handle,
arima_mem,
d_y,
d_exog,
batch_size,
n_obs,
order,
params,
loglike,
trans,
host_loglike,
method,
truncate);
}
void batched_loglike_grad(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
const double* d_exog,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const double* d_x,
double* d_grad,
double h,
bool trans,
LoglikeMethod method,
int truncate)
{
raft::common::nvtx::range fun_scope(__func__);
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
int N = order.complexity();
// Initialize the perturbed x vector
double* d_x_pert = arima_mem.x_pert;
raft::copy(d_x_pert, d_x, N * batch_size, stream);
double* d_ll_base = arima_mem.loglike_base;
double* d_ll_pert = arima_mem.loglike_pert;
// Evaluate the log-likelihood with the given parameter vector
batched_loglike(handle,
arima_mem,
d_y,
d_exog,
batch_size,
n_obs,
order,
d_x,
d_ll_base,
trans,
false,
method,
truncate);
for (int i = 0; i < N; i++) {
// Add the perturbation to the i-th parameter
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_x_pert[N * bid + i] = d_x[N * bid + i] + h;
});
// Evaluate the log-likelihood with the positive perturbation
batched_loglike(handle,
arima_mem,
d_y,
d_exog,
batch_size,
n_obs,
order,
d_x_pert,
d_ll_pert,
trans,
false,
method,
truncate);
// First derivative with a first-order accuracy
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_grad[N * bid + i] = (d_ll_pert[bid] - d_ll_base[bid]) / h;
});
// Reset the i-th parameter
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
d_x_pert[N * bid + i] = d_x[N * bid + i];
});
}
}
void information_criterion(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_y,
const double* d_exog,
int batch_size,
int n_obs,
const ARIMAOrder& order,
const ARIMAParams<double>& params,
double* d_ic,
int ic_type)
{
raft::common::nvtx::range fun_scope(__func__);
auto stream = handle.get_stream();
/* Compute log-likelihood in d_ic */
batched_loglike(
handle, arima_mem, d_y, d_exog, batch_size, n_obs, order, params, d_ic, false, false, MLE);
/* Compute information criterion from log-likelihood and base term */
raft::stats::information_criterion_batched(d_ic,
d_ic,
static_cast<raft::stats::IC_Type>(ic_type),
order.complexity(),
batch_size,
n_obs - order.n_diff(),
stream);
}
/**
* Test that the parameters are valid for the inverse transform
*
* @tparam isAr Are these (S)AR or (S)MA parameters?
* @param[in] params Parameters
* @param[in] pq p for AR, q for MA, P for SAR, Q for SMA
*/
template <bool isAr>
DI bool test_invparams(const double* params, int pq)
{
double new_params[8];
double tmp[8];
constexpr double coef = isAr ? 1 : -1;
for (int i = 0; i < pq; i++) {
tmp[i] = params[i];
new_params[i] = tmp[i];
}
// Perform inverse transform and stop before atanh step
for (int j = pq - 1; j > 0; --j) {
double a = new_params[j];
for (int k = 0; k < j; ++k) {
tmp[k] = (new_params[k] + coef * a * new_params[j - k - 1]) / (1 - (a * a));
}
for (int iter = 0; iter < j; ++iter) {
new_params[iter] = tmp[iter];
}
}
// Verify that the values are between -1 and 1
bool result = true;
for (int i = 0; i < pq; i++) {
result = result && !(new_params[i] <= -1 || new_params[i] >= 1);
}
return result;
}
/**
* Auxiliary function of _start_params: least square approximation of an
* ARMA model (with or without seasonality)
* @note: in this function the non-seasonal case has s=1, not s=0!
*/
void _arma_least_squares(raft::handle_t& handle,
double* d_ar,
double* d_ma,
double* d_sigma2,
const MLCommon::LinAlg::Batched::Matrix<double>& bm_y,
int p,
int q,
int s,
bool estimate_sigma2,
int k = 0,
double* d_mu = nullptr)
{
const auto& handle_impl = handle;
auto stream = handle_impl.get_stream();
auto cublas_handle = handle_impl.get_cublas_handle();
auto counting = thrust::make_counting_iterator(0);
int batch_size = bm_y.batches();
int n_obs = bm_y.shape().first;
int ps = p * s, qs = q * s;
int p_ar = std::max(ps, 2 * qs);
int r = std::max(p_ar + qs, ps);
if ((q && p_ar >= n_obs - p_ar) || p + q + k >= n_obs - r) {
// Too few observations for the estimate, fill with 0 (1 for sigma2)
if (k) RAFT_CUDA_TRY(cudaMemsetAsync(d_mu, 0, sizeof(double) * batch_size, stream));
if (p) RAFT_CUDA_TRY(cudaMemsetAsync(d_ar, 0, sizeof(double) * p * batch_size, stream));
if (q) RAFT_CUDA_TRY(cudaMemsetAsync(d_ma, 0, sizeof(double) * q * batch_size, stream));
if (estimate_sigma2) {
thrust::device_ptr<double> sigma2_thrust = thrust::device_pointer_cast(d_sigma2);
thrust::fill(thrust::cuda::par.on(stream), sigma2_thrust, sigma2_thrust + batch_size, 1.0);
}
return;
}
/* Matrix formed by lag matrices of y and the residuals respectively,
* side by side. The left side will be used to estimate AR, the right
* side to estimate MA */
MLCommon::LinAlg::Batched::Matrix<double> bm_ls_ar_res(
n_obs - r, p + q + k, batch_size, cublas_handle, stream, false);
int ar_offset = r - ps;
int res_offset = r - p_ar - qs;
// Get residuals from an AR(p_ar) model to estimate the MA parameters
if (q) {
// Create lagged y
int ls_height = n_obs - p_ar;
MLCommon::LinAlg::Batched::Matrix<double> bm_ls =
MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, p_ar);
/* Matrix for the initial AR fit, initialized by copy of y
* (note: this is because gels works in-place ; the matrix has larger
* dimensions than the actual AR fit) */
MLCommon::LinAlg::Batched::Matrix<double> bm_ar_fit =
MLCommon::LinAlg::Batched::b_2dcopy(bm_y, p_ar, 0, ls_height, 1);
// Residual, initialized as offset y to avoid one kernel call
MLCommon::LinAlg::Batched::Matrix<double> bm_residual(bm_ar_fit);
// Initial AR fit
MLCommon::LinAlg::Batched::b_gels(bm_ls, bm_ar_fit);
// Compute residual (technically a gemv)
MLCommon::LinAlg::Batched::b_gemm(
false, false, ls_height, 1, p_ar, -1.0, bm_ls, bm_ar_fit, 1.0, bm_residual);
// Lags of the residual
MLCommon::LinAlg::Batched::b_lagged_mat(
bm_residual, bm_ls_ar_res, q, n_obs - r, res_offset, (n_obs - r) * (k + p), s);
}
// Fill the first column of the matrix with 1 if we fit an intercept
if (k) {
double* d_ls_ar_res = bm_ls_ar_res.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double* b_ls_ar_res = d_ls_ar_res + bid * (n_obs - r) * (p + q + k);
for (int i = 0; i < n_obs - r; i++) {
b_ls_ar_res[i] = 1.0;
}
});
}
// Lags of y
MLCommon::LinAlg::Batched::b_lagged_mat(
bm_y, bm_ls_ar_res, p, n_obs - r, ar_offset, (n_obs - r) * k, s);
/* Initializing the vector for the ARMA fit
* (note: also in-place as described for AR fit) */
MLCommon::LinAlg::Batched::Matrix<double> bm_arma_fit =
MLCommon::LinAlg::Batched::b_2dcopy(bm_y, r, 0, n_obs - r, 1);
// The residuals will be computed only if sigma2 is requested
MLCommon::LinAlg::Batched::Matrix<double> bm_final_residual(
n_obs - r, 1, batch_size, cublas_handle, stream, false);
if (estimate_sigma2) {
raft::copy(
bm_final_residual.raw_data(), bm_arma_fit.raw_data(), (n_obs - r) * batch_size, stream);
}
// ARMA fit
MLCommon::LinAlg::Batched::b_gels(bm_ls_ar_res, bm_arma_fit);
// Copy the results in the parameter vectors
const double* d_arma_fit = bm_arma_fit.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
const double* b_arma_fit = d_arma_fit + bid * (n_obs - r);
if (k) { d_mu[bid] = b_arma_fit[0]; }
if (p) {
double* b_ar = d_ar + bid * p;
for (int i = 0; i < p; i++) {
b_ar[i] = b_arma_fit[i + k];
}
}
if (q) {
double* b_ma = d_ma + bid * q;
for (int i = 0; i < q; i++) {
b_ma[i] = b_arma_fit[i + p + k];
}
}
});
if (estimate_sigma2) {
// Compute final residual (technically a gemv)
MLCommon::LinAlg::Batched::b_gemm(false,
false,
n_obs - r,
1,
p + q + k,
-1.0,
bm_ls_ar_res,
bm_arma_fit,
1.0,
bm_final_residual);
// Compute variance
double* d_residual = bm_final_residual.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double acc = 0.0;
const double* b_residual = d_residual + (n_obs - r) * bid;
for (int i = q; i < n_obs - r; i++) {
double res = b_residual[i];
acc += res * res;
}
d_sigma2[bid] = acc / static_cast<double>(n_obs - r - q);
});
}
// If (S)AR or (S)MA are not valid for the inverse transform, set them to zero
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
if (p) {
double* b_ar = d_ar + bid * p;
bool valid = test_invparams<true>(b_ar, p);
if (!valid) {
for (int ip = 0; ip < p; ip++)
b_ar[ip] = 0;
}
}
if (q) {
double* b_ma = d_ma + bid * q;
bool valid = test_invparams<false>(b_ma, q);
if (!valid) {
for (int iq = 0; iq < q; iq++)
b_ma[iq] = 0;
}
}
});
}
/**
* Auxiliary function of estimate_x0: compute the starting parameters for
* the series pre-processed by estimate_x0
*/
void _start_params(raft::handle_t& handle,
ARIMAParams<double>& params,
MLCommon::LinAlg::Batched::Matrix<double>& bm_y,
const MLCommon::LinAlg::Batched::Matrix<double>& bm_exog,
const ARIMAOrder& order)
{
int batch_size = bm_exog.batches();
cudaStream_t stream = bm_exog.stream();
// Estimate exog coefficients and subtract component to endog.
// Exog coefficients are estimated by fitting a linear regression with X=exog, y=endog
if (order.n_exog > 0) {
// In most cases, the system will be overdetermined and we can use gels
if (bm_exog.shape().first > static_cast<unsigned int>(order.n_exog)) {
// Make a copy of the exogenous series for in-place gels
MLCommon::LinAlg::Batched::Matrix<double> bm_exog_copy(bm_exog);
// Make a copy of the endogenous series for in-place gels
MLCommon::LinAlg::Batched::Matrix<double> bm_y_copy(bm_y);
// Least-squares solution of overdetermined system
rmm::device_uvector<int> info(batch_size, stream);
b_gels(bm_exog_copy, bm_y_copy, info.data());
// Make a batched matrix around the exogenous coefficients
rmm::device_uvector<double*> beta_pointers(batch_size, stream);
MLCommon::LinAlg::Batched::Matrix<double> bm_exog_coef(order.n_exog,
1,
batch_size,
bm_exog.cublasHandle(),
beta_pointers.data(),
params.beta,
stream,
false);
// Copy the solution of the system to the parameters array
b_2dcopy(bm_y_copy, bm_exog_coef, 0, 0, order.n_exog, 1);
// Set parameters to zero when solving was not successful
auto counting = thrust::make_counting_iterator(0);
int* devInfoArray = info.data();
double* d_exog_coef = bm_exog_coef.raw_data();
const int& n_exog = order.n_exog;
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
if (devInfoArray[bid] > 0) {
for (int i = 0; i < n_exog; i++) {
d_exog_coef[bid * n_exog + i] = 0.0;
}
}
});
// Compute exogenous component and store the result in bm_y_copy
b_gemm(false,
false,
bm_exog.shape().first,
1,
bm_exog.shape().second,
1.0,
bm_exog,
bm_exog_coef,
0.0,
bm_y_copy);
// Subtract exogenous component to endogenous variable
b_aA_op_B(bm_y, bm_y_copy, bm_y, [] __device__(double a, double b) { return a - b; });
}
// In other cases, we initialize to zero
else {
RAFT_CUDA_TRY(
cudaMemsetAsync(params.beta, 0, order.n_exog * batch_size * sizeof(double), stream));
}
}
// Estimate an ARMA fit without seasonality
if (order.p + order.q + order.k)
_arma_least_squares(handle,
params.ar,
params.ma,
params.sigma2,
bm_y,
order.p,
order.q,
1,
true,
order.k,
params.mu);
// Estimate a seasonal ARMA fit independently
if (order.P + order.Q)
_arma_least_squares(handle,
params.sar,
params.sma,
params.sigma2,
bm_y,
order.P,
order.Q,
order.s,
order.p + order.q + order.k == 0);
}
void estimate_x0(raft::handle_t& handle,
ARIMAParams<double>& params,
const double* d_y,
const double* d_exog,
int batch_size,
int n_obs,
const ARIMAOrder& order,
bool missing)
{
raft::common::nvtx::range fun_scope(__func__);
const auto& handle_impl = handle;
auto stream = handle_impl.get_stream();
auto cublas_handle = handle_impl.get_cublas_handle();
/// TODO: solve exogenous coefficients with only valid rows instead of interpolation?
// Pros: better coefficients
// Cons: harder to test, a bit more complicated
// Least squares can't deal with missing values: create copy with naive
// replacements for missing values
const double* d_y_no_missing;
rmm::device_uvector<double> y_no_missing(0, stream);
if (missing) {
y_no_missing.resize(n_obs * batch_size, stream);
d_y_no_missing = y_no_missing.data();
raft::copy(y_no_missing.data(), d_y, n_obs * batch_size, stream);
MLCommon::TimeSeries::fillna(y_no_missing.data(), batch_size, n_obs, stream);
} else {
d_y_no_missing = d_y;
}
// Difference if necessary, copy otherwise
MLCommon::LinAlg::Batched::Matrix<double> bm_yd(
n_obs - order.d - order.s * order.D, 1, batch_size, cublas_handle, stream, false);
MLCommon::TimeSeries::prepare_data(
bm_yd.raw_data(), d_y_no_missing, batch_size, n_obs, order.d, order.D, order.s, stream);
// Difference or copy exog
MLCommon::LinAlg::Batched::Matrix<double> bm_exog_diff(
n_obs - order.d - order.s * order.D, order.n_exog, batch_size, cublas_handle, stream, false);
if (order.n_exog > 0) {
MLCommon::TimeSeries::prepare_data(bm_exog_diff.raw_data(),
d_exog,
order.n_exog * batch_size,
n_obs,
order.d,
order.D,
order.s,
stream);
}
// Do the computation of the initial parameters
_start_params(handle, params, bm_yd, bm_exog_diff, order);
}
} // namespace ML | 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/arima/batched_kalman.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <vector>
#include <cuml/tsa/batched_kalman.hpp>
#include <cub/cub.cuh>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <raft/core/handle.hpp>
#include <raft/linalg/add.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <rmm/device_uvector.hpp>
#include <linalg/batched/matrix.cuh>
#include <linalg/block.cuh>
#include <raft/core/nvtx.hpp>
#include <timeSeries/arima_helpers.cuh>
namespace ML {
//! Thread-local Matrix-Vector multiplication.
template <int n>
DI void Mv_l(const double* A, const double* v, double* out)
{
for (int i = 0; i < n; i++) {
double sum = 0.0;
for (int j = 0; j < n; j++) {
sum += A[i + j * n] * v[j];
}
out[i] = sum;
}
}
template <int n>
DI void Mv_l(double alpha, const double* A, const double* v, double* out)
{
for (int i = 0; i < n; i++) {
double sum = 0.0;
for (int j = 0; j < n; j++) {
sum += A[i + j * n] * v[j];
}
out[i] = alpha * sum;
}
}
//! Thread-local Matrix-Matrix multiplication.
template <int n, bool aT = false, bool bT = false>
DI void MM_l(const double* A, const double* B, double* out)
{
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
double sum = 0.0;
for (int k = 0; k < n; k++) {
double Aik = aT ? A[k + i * n] : A[i + k * n];
double Bkj = bT ? B[j + k * n] : B[k + j * n];
sum += Aik * Bkj;
}
out[i + j * n] = sum;
}
}
}
/** Improve stability by making a covariance matrix symmetric and forcing
* diagonal elements to be positive
*/
template <int n>
DI void numerical_stability(double* A)
{
// A = 0.5 * (A + A')
for (int i = 0; i < n - 1; i++) {
for (int j = i + 1; j < n; j++) {
double new_val = 0.5 * (A[j * n + i] + A[i * n + j]);
A[j * n + i] = new_val;
A[i * n + j] = new_val;
}
}
// Aii = abs(Aii)
for (int i = 0; i < n; i++) {
A[i * n + i] = abs(A[i * n + i]);
}
}
/**
* Kalman loop kernel. Each thread computes kalman filter for a single series
* and stores relevant matrices in registers.
*
* @tparam rd Dimension of the state vector
* @param[in] ys Batched time series
* @param[in] nobs Number of observation per series
* @param[in] T Batched transition matrix. (r x r)
* @param[in] Z Batched "design" vector (1 x r)
* @param[in] RQR Batched R*Q*R' (r x r)
* @param[in] P Batched P (r x r)
* @param[in] alpha Batched state vector (r x 1)
* @param[in] intercept Do we fit an intercept?
* @param[in] d_mu Batched intercept (1)
* @param[in] batch_size Batch size
* @param[in] d_obs_inter Observation intercept
* @param[in] d_obs_inter_fut Observation intercept for forecasts
* @param[out] d_pred Predictions (nobs)
* @param[out] d_loglike Log-likelihood (1)
* @param[in] n_diff d + s*D
* @param[in] fc_steps Number of steps to forecast
* @param[out] d_fc Array to store the forecast
* @param[in] conf_int Whether to compute confidence intervals
* @param[out] d_F_fc Batched variance of forecast errors (fc_steps)
*/
template <int rd>
__global__ void batched_kalman_loop_kernel(const double* ys,
int nobs,
const double* T,
const double* Z,
const double* RQR,
const double* P,
const double* alpha,
bool intercept,
const double* d_mu,
int batch_size,
const double* d_obs_inter,
const double* d_obs_inter_fut,
double* d_pred,
double* d_loglike,
int n_diff,
int fc_steps = 0,
double* d_fc = nullptr,
bool conf_int = false,
double* d_F_fc = nullptr)
{
constexpr int rd2 = rd * rd;
double l_RQR[rd2];
double l_T[rd2];
double l_Z[rd];
double l_P[rd2];
double l_alpha[rd];
double l_K[rd];
double l_tmp[rd2];
double l_TP[rd2];
int bid = blockDim.x * blockIdx.x + threadIdx.x;
if (bid < batch_size) {
// Load global mem into registers
{
int b_rd_offset = bid * rd;
int b_rd2_offset = bid * rd2;
for (int i = 0; i < rd2; i++) {
l_RQR[i] = RQR[b_rd2_offset + i];
l_T[i] = T[b_rd2_offset + i];
l_P[i] = P[b_rd2_offset + i];
}
for (int i = 0; i < rd; i++) {
if (n_diff > 0) l_Z[i] = Z[b_rd_offset + i];
l_alpha[i] = alpha[b_rd_offset + i];
}
}
double b_sum_logFs = 0.0;
double b_ll_s2 = 0.0;
int n_obs_ll = 0;
const double* b_ys = ys + bid * nobs;
double* b_pred = d_pred + bid * nobs;
double mu = intercept ? d_mu[bid] : 0.0;
for (int it = 0; it < nobs; it++) {
double _Fs, vs_it;
bool missing;
{
// 1. v = y - Z*alpha
double pred = 0.0;
if (d_obs_inter != nullptr) { pred += d_obs_inter[bid * nobs + it]; }
if (n_diff == 0)
pred += l_alpha[0];
else {
for (int i = 0; i < rd; i++) {
pred += l_alpha[i] * l_Z[i];
}
}
b_pred[it] = pred;
double yt = b_ys[it];
missing = isnan(yt);
if (!missing) {
vs_it = yt - pred;
// 2. F = Z*P*Z'
if (n_diff == 0)
_Fs = l_P[0];
else {
_Fs = 0.0;
for (int i = 0; i < rd; i++) {
for (int j = 0; j < rd; j++) {
_Fs += l_P[j * rd + i] * l_Z[i] * l_Z[j];
}
}
}
if (it >= n_diff) {
b_sum_logFs += log(_Fs);
b_ll_s2 += vs_it * vs_it / _Fs;
n_obs_ll++;
}
}
}
// 3. K = 1/Fs[it] * T*P*Z'
// TP = T*P
MM_l<rd>(l_T, l_P, l_TP);
if (!missing) {
// K = 1/Fs[it] * TP*Z'
double _1_Fs = 1.0 / _Fs;
if (n_diff == 0) {
for (int i = 0; i < rd; i++) {
l_K[i] = _1_Fs * l_TP[i];
}
} else {
Mv_l<rd>(_1_Fs, l_TP, l_Z, l_K);
}
}
// 4. alpha = T*alpha + K*vs[it] + c
// tmp = T*alpha
Mv_l<rd>(l_T, l_alpha, l_tmp);
// alpha = tmp + K*vs[it]
for (int i = 0; i < rd; i++) {
l_alpha[i] = l_tmp[i] + (missing ? 0.0 : l_K[i] * vs_it);
}
// alpha = alpha + c
l_alpha[n_diff] += mu;
// 5. L = T - K * Z
// L = T (L is tmp)
for (int i = 0; i < rd2; i++) {
l_tmp[i] = l_T[i];
}
if (!missing) {
// L = L - K * Z
if (n_diff == 0) {
for (int i = 0; i < rd; i++) {
l_tmp[i] -= l_K[i];
}
} else {
for (int i = 0; i < rd; i++) {
for (int j = 0; j < rd; j++) {
l_tmp[j * rd + i] -= l_K[i] * l_Z[j];
}
}
}
}
// 6. P = T*P*L' + R*Q*R'
// P = TP*L'
MM_l<rd, false, true>(l_TP, l_tmp, l_P);
// P = P + RQR
for (int i = 0; i < rd2; i++) {
l_P[i] += l_RQR[i];
}
// Numerical stability: enforce symmetry of P and positivity of diagonal
numerical_stability<rd>(l_P);
}
// Compute log-likelihood
{
double n_obs_ll_f = static_cast<double>(n_obs_ll);
b_ll_s2 /= n_obs_ll_f;
d_loglike[bid] = -.5 * (b_sum_logFs + n_obs_ll_f * (b_ll_s2 + log(2 * M_PI)));
}
// Forecast
{
double* b_fc = fc_steps ? d_fc + bid * fc_steps : nullptr;
double* b_F_fc = conf_int ? d_F_fc + bid * fc_steps : nullptr;
for (int it = 0; it < fc_steps; it++) {
double pred = 0.0;
if (d_obs_inter_fut != nullptr) { pred += d_obs_inter_fut[bid * fc_steps + it]; }
if (n_diff == 0)
pred += l_alpha[0];
else {
for (int i = 0; i < rd; i++) {
pred += l_alpha[i] * l_Z[i];
}
}
b_fc[it] = pred;
// alpha = T*alpha + c
Mv_l<rd>(l_T, l_alpha, l_tmp);
for (int i = 0; i < rd; i++) {
l_alpha[i] = l_tmp[i];
}
l_alpha[n_diff] += mu;
if (conf_int) {
if (n_diff == 0)
b_F_fc[it] = l_P[0];
else {
double _Fs = 0.0;
for (int i = 0; i < rd; i++) {
for (int j = 0; j < rd; j++) {
_Fs += l_P[j * rd + i] * l_Z[i] * l_Z[j];
}
}
b_F_fc[it] = _Fs;
}
// P = T*P*T' + RR'
// TP = T*P
MM_l<rd>(l_T, l_P, l_TP);
// P = TP*T'
MM_l<rd, false, true>(l_TP, l_T, l_P);
// P = P + RR'
for (int i = 0; i < rd2; i++) {
l_P[i] += l_RQR[i];
}
// Numerical stability: enforce symmetry of P and positivity of diagonal
numerical_stability<rd>(l_P);
}
}
}
}
}
/**
* This union allows for efficient reuse of shared memory in the Kalman
* filter.
*/
template <typename GemmPolicy, typename GemvPolicy, typename CovPolicy, typename T>
union KalmanLoopSharedMemory {
MLCommon::LinAlg::ReductionStorage<GemmPolicy::BlockSize, T> reduction_storage;
MLCommon::LinAlg::GemmStorage<GemmPolicy, T> gemm_storage;
MLCommon::LinAlg::GemvStorage<GemvPolicy, T> gemv_storage[2];
MLCommon::LinAlg::CovStabilityStorage<CovPolicy, T> cov_stability_storage;
};
/**
* Kalman loop kernel. Each block computes kalman filter for a single series.
*
* @tparam GemmPolicy Execution policy for GEMM
* @tparam GemvPolicy Execution policy for GEMV
* @tparam CovPolicy Execution policy for the covariance stability operation
* @param[in] d_ys Batched time series
* @param[in] batch_size Batch size
* @param[in] n_obs Number of observation per series
* @param[in] d_T Batched transition matrix. (r x r)
* @param[in] d_Z Batched "design" vector (1 x r)
* @param[in] d_RQR Batched R*Q*R' (r x r)
* @param[in] d_P Batched P (r x r)
* @param[in] d_alpha Batched state vector (r x 1)
* @param[in] d_m_tmp Batched temporary matrix (r x r)
* @param[in] d_TP Batched temporary matrix to store TP (r x r)
* @param[in] intercept Do we fit an intercept?
* @param[in] d_mu Batched intercept (1)
* @param[in] rd State vector dimension
* @param[in] d_obs_inter Observation intercept
* @param[in] d_obs_inter_fut Observation intercept for forecasts
* @param[out] d_pred Predictions (nobs)
* @param[out] d_loglike Log-likelihood (1)
* @param[in] n_diff d + s*D
* @param[in] fc_steps Number of steps to forecast
* @param[out] d_fc Array to store the forecast
* @param[in] conf_int Whether to compute confidence intervals
* @param[out] d_F_fc Batched variance of forecast errors (fc_steps)
*/
template <typename GemmPolicy, typename GemvPolicy, typename CovPolicy>
__global__ void _batched_kalman_device_loop_large_kernel(const double* d_ys,
int batch_size,
int n_obs,
const double* d_T,
const double* d_Z,
const double* d_RQR,
double* d_P,
double* d_alpha,
double* d_m_tmp,
double* d_TP,
bool intercept,
const double* d_mu,
int rd,
const double* d_obs_inter,
const double* d_obs_inter_fut,
double* d_pred,
double* d_loglike,
int n_diff,
int fc_steps,
double* d_fc,
bool conf_int,
double* d_F_fc)
{
int rd2 = rd * rd;
// Dynamic shared memory allocation
extern __shared__ char dyna_shared_mem[];
double* shared_vec0 = (double*)dyna_shared_mem;
double* shared_Z = (double*)(dyna_shared_mem + rd * sizeof(double));
double* shared_alpha = (double*)(dyna_shared_mem + 2 * rd * sizeof(double));
double* shared_K = (double*)(dyna_shared_mem + 3 * rd * sizeof(double));
__shared__ KalmanLoopSharedMemory<GemmPolicy, GemvPolicy, CovPolicy, double> shared_mem;
for (int bid = blockIdx.x; bid < batch_size; bid += gridDim.x) {
/* Load Z and alpha to shared memory */
for (int i = threadIdx.x; i < rd; i += GemmPolicy::BlockSize) {
shared_Z[i] = d_Z[bid * rd + i];
shared_alpha[i] = d_alpha[bid * rd + i];
}
__syncthreads(); // ensure alpha and Z are loaded before 1.
/* Initialization */
double mu_ = intercept ? d_mu[bid] : 0.0;
double sum_logFs = 0.0;
double ll_s2 = 0.0;
int n_obs_ll = 0;
int it = 0;
/* Skip missing observations at the start */
if (d_obs_inter == nullptr) {
{
double pred0;
if (n_diff == 0) {
pred0 = shared_alpha[0];
} else {
pred0 = 0.0;
pred0 += MLCommon::LinAlg::_block_dot<GemmPolicy::BlockSize, true>(
rd, shared_Z, shared_alpha, shared_mem.reduction_storage);
__syncthreads(); // necessary to reuse shared memory
}
for (; it < n_obs && isnan(d_ys[bid * n_obs + it]); it++) {
if (threadIdx.x == 0) d_pred[bid * n_obs + it] = pred0;
}
}
}
/* Kalman loop */
for (; it < n_obs; it++) {
double vt, _F;
bool missing;
{
// 1. pred = Z*alpha + obs_intercept
// v = y - pred
double pred = 0.0;
if (d_obs_inter != nullptr) { pred += d_obs_inter[bid * n_obs + it]; }
if (n_diff == 0) {
pred += shared_alpha[0];
} else {
pred += MLCommon::LinAlg::_block_dot<GemmPolicy::BlockSize, true>(
rd, shared_Z, shared_alpha, shared_mem.reduction_storage);
__syncthreads(); // necessary to reuse shared memory
}
double yt = d_ys[bid * n_obs + it];
missing = isnan(yt);
if (!missing) {
vt = yt - pred;
// 2. F = Z*P*Z'
if (n_diff == 0) {
_F = (d_P + bid * rd2)[0];
} else {
_F = MLCommon::LinAlg::_block_xAxt<GemmPolicy::BlockSize, true, false>(
rd, shared_Z, d_P + bid * rd2, shared_mem.reduction_storage);
__syncthreads(); // necessary to reuse shared memory
}
}
if (threadIdx.x == 0) {
d_pred[bid * n_obs + it] = pred;
if (it >= n_diff && !missing) {
sum_logFs += log(_F);
ll_s2 += vt * vt / _F;
n_obs_ll++;
}
}
}
// 3. K = 1/Fs[it] * T*P*Z'
// TP = T*P (also used later)
MLCommon::LinAlg::_block_gemm<GemmPolicy>(false,
false,
rd,
rd,
rd,
1.0,
d_T + bid * rd2,
d_P + bid * rd2,
d_TP + bid * rd2,
shared_mem.gemm_storage);
__syncthreads(); // for consistency of TP
if (!missing) {
// K = 1/Fs[it] * TP*Z'
double _1_Fs = 1.0 / _F;
if (n_diff == 0) {
MLCommon::LinAlg::_block_ax(rd, _1_Fs, d_TP + bid * rd2, shared_K);
} else {
MLCommon::LinAlg::_block_gemv<GemvPolicy, false>(
rd, rd, _1_Fs, d_TP + bid * rd2, shared_Z, shared_K, shared_mem.gemv_storage[0]);
}
}
// 4. alpha = T*alpha + K*vs[it] + c
// vec1 = T*alpha
MLCommon::LinAlg::_block_gemv<GemvPolicy, false>(
rd, rd, 1.0, d_T + bid * rd2, shared_alpha, shared_vec0, shared_mem.gemv_storage[1]);
__syncthreads(); // For consistency of K and vec1
// alpha = vec1 + K*vs[it] + c
for (int i = threadIdx.x; i < rd; i += GemmPolicy::BlockSize) {
double c_ = (i == n_diff) ? mu_ : 0.0;
shared_alpha[i] = shared_vec0[i] + c_ + (missing ? 0.0 : vt * shared_K[i]);
}
// 5. L = T - K * Z
if (n_diff == 0) {
for (int i = threadIdx.x; i < rd2; i += GemmPolicy::BlockSize) {
double _KZ = (i < rd && !missing) ? shared_K[i] : 0.0;
d_m_tmp[bid * rd2 + i] = d_T[bid * rd2 + i] - _KZ;
}
} else {
for (int i = threadIdx.x; i < rd2; i += GemmPolicy::BlockSize) {
double _KZ = missing ? 0.0 : shared_K[i % rd] * shared_Z[i / rd];
d_m_tmp[bid * rd2 + i] = d_T[bid * rd2 + i] - _KZ;
}
}
// 6. P = T*P*L' + R*Q*R'
__syncthreads(); // For consistency of L
// P = TP*L'
MLCommon::LinAlg::_block_gemm<GemmPolicy>(false,
true,
rd,
rd,
rd,
1.0,
d_TP + bid * rd2,
d_m_tmp + bid * rd2,
d_P + bid * rd2,
shared_mem.gemm_storage);
__syncthreads(); // For consistency of P
// tmp = P + R*Q*R'
/// TODO: shared mem R instead of precomputed matrix?
for (int i = threadIdx.x; i < rd2; i += GemmPolicy::BlockSize) {
d_m_tmp[bid * rd2 + i] = d_P[bid * rd2 + i] + d_RQR[bid * rd2 + i];
}
__syncthreads();
// Numerical stability: enforce symmetry of P and positivity of diagonal
// P = 0.5 * (tmp + tmp')
// Pii = abs(Pii)
MLCommon::LinAlg::_block_covariance_stability<CovPolicy>(
rd, d_m_tmp + bid * rd2, d_P + bid * rd2, shared_mem.cov_stability_storage);
__syncthreads();
}
/* Forecast */
for (int it = 0; it < fc_steps; it++) {
// pred = Z * alpha + obs_intercept
double pred = 0.0;
if (d_obs_inter_fut != nullptr) { pred += d_obs_inter_fut[bid * fc_steps + it]; }
if (n_diff == 0) {
pred += shared_alpha[0];
} else {
pred += MLCommon::LinAlg::_block_dot<GemmPolicy::BlockSize, false>(
rd, shared_Z, shared_alpha, shared_mem.reduction_storage);
__syncthreads(); // necessary to reuse shared memory
}
if (threadIdx.x == 0) d_fc[bid * fc_steps + it] = pred;
// alpha = T*alpha + c
// vec0 = T*alpha
MLCommon::LinAlg::_block_gemv<GemvPolicy, false>(
rd, rd, 1.0, d_T + bid * rd2, shared_alpha, shared_vec0, shared_mem.gemv_storage[0]);
__syncthreads(); // for consistency of v_tmp + reuse of shared mem
// alpha = vec0 + c
for (int i = threadIdx.x; i < rd; i += GemmPolicy::BlockSize) {
double c_ = (i == n_diff) ? mu_ : 0.0;
shared_alpha[i] = shared_vec0[i] + c_;
}
double _F;
if (conf_int) {
if (n_diff == 0) {
_F = d_P[bid * rd2];
} else {
_F = MLCommon::LinAlg::_block_xAxt<GemmPolicy::BlockSize, false, false>(
rd, shared_Z, d_P + bid * rd2, shared_mem.reduction_storage);
__syncthreads(); // necessary to reuse shared memory
}
if (threadIdx.x == 0) d_F_fc[bid * fc_steps + it] = _F;
}
// P = T*P*T' + R*Q*R'
// TP = T*P
MLCommon::LinAlg::_block_gemm<GemmPolicy>(false,
false,
rd,
rd,
rd,
1.0,
d_T + bid * rd2,
d_P + bid * rd2,
d_TP + bid * rd2,
shared_mem.gemm_storage);
__syncthreads(); // for consistency of TP
// P = TP * T'
MLCommon::LinAlg::_block_gemm<GemmPolicy>(false,
true,
rd,
rd,
rd,
1.0,
d_TP + bid * rd2,
d_T + bid * rd2,
d_P + bid * rd2,
shared_mem.gemm_storage);
__syncthreads(); // for consistency of P
// P = P + R*Q*R'
/// TODO: shared mem R instead of precomputed matrix?
for (int i = threadIdx.x; i < rd2; i += GemmPolicy::BlockSize) {
d_m_tmp[bid * rd2 + i] = d_P[bid * rd2 + i] + d_RQR[bid * rd2 + i];
}
__syncthreads();
// Numerical stability: enforce symmetry of P and positivity of diagonal
// P = 0.5 * (tmp + tmp')
// Pii = abs(Pii)
MLCommon::LinAlg::_block_covariance_stability<CovPolicy>(
rd, d_m_tmp + bid * rd2, d_P + bid * rd2, shared_mem.cov_stability_storage);
}
/* Compute log-likelihood */
if (threadIdx.x == 0) {
double n_obs_ll_f = static_cast<double>(n_obs_ll);
ll_s2 /= n_obs_ll_f;
d_loglike[bid] = -.5 * (sum_logFs + n_obs_ll_f * (ll_s2 + log(2 * M_PI)));
}
}
}
/**
* Kalman loop for large matrices (r > 8).
*
* @param[in] arima_mem Pre-allocated temporary memory
* @param[in] d_ys Batched time series
* @param[in] nobs Number of observation per series
* @param[in] T Batched transition matrix. (r x r)
* @param[in] Z Batched "design" vector (1 x r)
* @param[in] RQR Batched R*Q*R' (r x r)
* @param[in] P Batched P (r x r)
* @param[in] alpha Batched state vector (r x 1)
* @param[in] intercept Do we fit an intercept?
* @param[in] d_mu Batched intercept (1)
* @param[in] rd Dimension of the state vector
* @param[in] d_obs_inter Observation intercept
* @param[in] d_obs_inter_fut Observation intercept for forecasts
* @param[out] d_pred Predictions (nobs)
* @param[out] d_loglike Log-likelihood (1)
* @param[in] n_diff d + s*D
* @param[in] fc_steps Number of steps to forecast
* @param[out] d_fc Array to store the forecast
* @param[in] conf_int Whether to compute confidence intervals
* @param[out] d_F_fc Batched variance of forecast errors (fc_steps)
*/
template <typename GemmPolicy, typename GemvPolicy, typename CovPolicy>
void _batched_kalman_device_loop_large(const ARIMAMemory<double>& arima_mem,
const double* d_ys,
int n_obs,
const MLCommon::LinAlg::Batched::Matrix<double>& T,
const MLCommon::LinAlg::Batched::Matrix<double>& Z,
const MLCommon::LinAlg::Batched::Matrix<double>& RQR,
MLCommon::LinAlg::Batched::Matrix<double>& P,
MLCommon::LinAlg::Batched::Matrix<double>& alpha,
bool intercept,
const double* d_mu,
int rd,
const double* d_obs_inter,
const double* d_obs_inter_fut,
double* d_pred,
double* d_loglike,
int n_diff,
int fc_steps = 0,
double* d_fc = nullptr,
bool conf_int = false,
double* d_F_fc = nullptr)
{
static_assert(GemmPolicy::BlockSize == GemvPolicy::BlockSize,
"Gemm and gemv policies: block size mismatch");
static_assert(GemmPolicy::BlockSize == CovPolicy::BlockSize,
"Gemm and cov stability policies: block size mismatch");
auto stream = T.stream();
auto cublasHandle = T.cublasHandle();
int batch_size = T.batches();
// Temporary matrices
MLCommon::LinAlg::Batched::Matrix<double> m_tmp(rd,
rd,
batch_size,
cublasHandle,
arima_mem.m_tmp_batches,
arima_mem.m_tmp_dense,
stream,
false);
MLCommon::LinAlg::Batched::Matrix<double> TP(
rd, rd, batch_size, cublasHandle, arima_mem.TP_batches, arima_mem.TP_dense, stream, false);
int grid_size = std::min(batch_size, 65536);
size_t shared_mem_size = 4 * rd * sizeof(double);
_batched_kalman_device_loop_large_kernel<GemmPolicy, GemvPolicy, CovPolicy>
<<<grid_size, GemmPolicy::BlockSize, shared_mem_size, stream>>>(d_ys,
batch_size,
n_obs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P.raw_data(),
alpha.raw_data(),
m_tmp.raw_data(),
TP.raw_data(),
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
}
/// Wrapper around functions that execute the Kalman loop (for performance)
void batched_kalman_loop(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* ys,
int nobs,
const MLCommon::LinAlg::Batched::Matrix<double>& T,
const MLCommon::LinAlg::Batched::Matrix<double>& Z,
const MLCommon::LinAlg::Batched::Matrix<double>& RQR,
MLCommon::LinAlg::Batched::Matrix<double>& P0,
MLCommon::LinAlg::Batched::Matrix<double>& alpha,
bool intercept,
const double* d_mu,
const ARIMAOrder& order,
const double* d_obs_inter,
const double* d_obs_inter_fut,
double* d_pred,
double* d_loglike,
int fc_steps = 0,
double* d_fc = nullptr,
bool conf_int = false,
double* d_F_fc = nullptr)
{
const int batch_size = T.batches();
auto stream = T.stream();
int rd = order.rd();
int n_diff = order.n_diff();
dim3 numThreadsPerBlock(32, 1);
dim3 numBlocks(raft::ceildiv<int>(batch_size, numThreadsPerBlock.x), 1);
if (rd <= 8) {
switch (rd) {
case 1:
batched_kalman_loop_kernel<1>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 2:
batched_kalman_loop_kernel<2>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 3:
batched_kalman_loop_kernel<3>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 4:
batched_kalman_loop_kernel<4>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 5:
batched_kalman_loop_kernel<5>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 6:
batched_kalman_loop_kernel<6>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 7:
batched_kalman_loop_kernel<7>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
case 8:
batched_kalman_loop_kernel<8>
<<<numBlocks, numThreadsPerBlock, 0, stream>>>(ys,
nobs,
T.raw_data(),
Z.raw_data(),
RQR.raw_data(),
P0.raw_data(),
alpha.raw_data(),
intercept,
d_mu,
batch_size,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
break;
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
} else {
int num_sm;
cudaDeviceGetAttribute(&num_sm, cudaDevAttrMultiProcessorCount, 0);
if (rd <= 16) {
if (batch_size <= 2 * num_sm) {
using GemmPolicy = MLCommon::LinAlg::BlockGemmPolicy<1, 16, 1, 1, 16, 16>;
using GemvPolicy = MLCommon::LinAlg::BlockGemvPolicy<16, 16>;
using CovPolicy = MLCommon::LinAlg::BlockPolicy<1, 1, 16, 16>;
_batched_kalman_device_loop_large<GemmPolicy, GemvPolicy, CovPolicy>(arima_mem,
ys,
nobs,
T,
Z,
RQR,
P0,
alpha,
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
} else {
using GemmPolicy = MLCommon::LinAlg::BlockGemmPolicy<1, 16, 1, 4, 16, 4>;
using GemvPolicy = MLCommon::LinAlg::BlockGemvPolicy<16, 4>;
using CovPolicy = MLCommon::LinAlg::BlockPolicy<1, 4, 16, 4>;
_batched_kalman_device_loop_large<GemmPolicy, GemvPolicy, CovPolicy>(arima_mem,
ys,
nobs,
T,
Z,
RQR,
P0,
alpha,
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
}
} else if (rd <= 32) {
if (batch_size <= 2 * num_sm) {
using GemmPolicy = MLCommon::LinAlg::BlockGemmPolicy<1, 32, 1, 4, 32, 8>;
using GemvPolicy = MLCommon::LinAlg::BlockGemvPolicy<32, 8>;
using CovPolicy = MLCommon::LinAlg::BlockPolicy<1, 4, 32, 8>;
_batched_kalman_device_loop_large<GemmPolicy, GemvPolicy, CovPolicy>(arima_mem,
ys,
nobs,
T,
Z,
RQR,
P0,
alpha,
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
} else {
using GemmPolicy = MLCommon::LinAlg::BlockGemmPolicy<1, 32, 1, 8, 32, 4>;
using GemvPolicy = MLCommon::LinAlg::BlockGemvPolicy<32, 4>;
using CovPolicy = MLCommon::LinAlg::BlockPolicy<1, 8, 32, 4>;
_batched_kalman_device_loop_large<GemmPolicy, GemvPolicy, CovPolicy>(arima_mem,
ys,
nobs,
T,
Z,
RQR,
P0,
alpha,
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
}
} else if (rd > 64 && rd <= 128) {
using GemmPolicy = MLCommon::LinAlg::BlockGemmPolicy<1, 16, 1, 16, 128, 2>;
using GemvPolicy = MLCommon::LinAlg::BlockGemvPolicy<128, 2>;
using CovPolicy = MLCommon::LinAlg::BlockPolicy<1, 8, 64, 4>;
_batched_kalman_device_loop_large<GemmPolicy, GemvPolicy, CovPolicy>(arima_mem,
ys,
nobs,
T,
Z,
RQR,
P0,
alpha,
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
} else {
using GemmPolicy = MLCommon::LinAlg::BlockGemmPolicy<1, 32, 1, 16, 64, 4>;
using GemvPolicy = MLCommon::LinAlg::BlockGemvPolicy<64, 4>;
using CovPolicy = MLCommon::LinAlg::BlockPolicy<1, 16, 64, 4>;
_batched_kalman_device_loop_large<GemmPolicy, GemvPolicy, CovPolicy>(arima_mem,
ys,
nobs,
T,
Z,
RQR,
P0,
alpha,
intercept,
d_mu,
rd,
d_obs_inter,
d_obs_inter_fut,
d_pred,
d_loglike,
n_diff,
fc_steps,
d_fc,
conf_int,
d_F_fc);
}
}
}
/**
* Kernel to finalize the computation of confidence intervals
*
* @note: One block per batch member, one thread per forecast time step
*
* @param[in] d_fc Mean forecasts
* @param[inout] d_lower Input: F_{n+t}
* Output: lower bound of the confidence intervals
* @param[out] d_upper Upper bound of the confidence intervals
* @param[in] n_elem Total number of elements (fc_steps * batch_size)
* @param[in] multiplier Coefficient associated with the confidence level
*/
__global__ void confidence_intervals(
const double* d_fc, double* d_lower, double* d_upper, int n_elem, double multiplier)
{
for (int idx = threadIdx.x; idx < n_elem; idx += blockDim.x * gridDim.x) {
double fc = d_fc[idx];
double margin = multiplier * sqrt(d_lower[idx]);
d_lower[idx] = fc - margin;
d_upper[idx] = fc + margin;
}
}
void _lyapunov_wrapper(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const MLCommon::LinAlg::Batched::Matrix<double>& A,
MLCommon::LinAlg::Batched::Matrix<double>& Q,
MLCommon::LinAlg::Batched::Matrix<double>& X,
int r)
{
if (r <= 5) {
auto stream = handle.get_stream();
auto cublasHandle = handle.get_cublas_handle();
int batch_size = A.batches();
int r2 = r * r;
//
// Use direct solution with Kronecker product
//
MLCommon::LinAlg::Batched::Matrix<double> I_m_AxA(r2,
r2,
batch_size,
cublasHandle,
arima_mem.I_m_AxA_batches,
arima_mem.I_m_AxA_dense,
stream,
false);
MLCommon::LinAlg::Batched::Matrix<double> I_m_AxA_inv(r2,
r2,
batch_size,
cublasHandle,
arima_mem.I_m_AxA_inv_batches,
arima_mem.I_m_AxA_inv_dense,
stream,
false);
MLCommon::LinAlg::Batched::_direct_lyapunov_helper(
A, Q, X, I_m_AxA, I_m_AxA_inv, arima_mem.I_m_AxA_P, arima_mem.I_m_AxA_info, r);
} else {
// Note: the other Lyapunov solver is doing temporary mem allocations,
// but when r > 5, allocation overhead shouldn't be a bottleneck
X = MLCommon::LinAlg::Batched::b_lyapunov(A, Q);
}
}
/// Internal Kalman filter implementation that assumes data exists on GPU.
void _batched_kalman_filter(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_ys,
const double* d_exog,
int nobs,
const ARIMAOrder& order,
const MLCommon::LinAlg::Batched::Matrix<double>& Zb,
const MLCommon::LinAlg::Batched::Matrix<double>& Tb,
const MLCommon::LinAlg::Batched::Matrix<double>& Rb,
double* d_pred,
double* d_loglike,
const double* d_sigma2,
bool intercept,
const double* d_mu,
const double* d_beta,
int fc_steps,
double* d_fc,
const double* d_exog_fut,
double level,
double* d_lower,
double* d_upper)
{
const size_t batch_size = Zb.batches();
auto stream = handle.get_stream();
auto cublasHandle = handle.get_cublas_handle();
auto counting = thrust::make_counting_iterator(0);
int n_diff = order.n_diff();
int rd = order.rd();
int r = order.r();
// Compute observation intercept (exogenous component).
// The observation intercept is a linear combination of the values of
// exogenous variables for this observation.
rmm::device_uvector<double> obs_intercept(0, stream);
rmm::device_uvector<double> obs_intercept_fut(0, stream);
if (order.n_exog > 0) {
obs_intercept.resize(nobs * batch_size, stream);
double alpha = 1.0;
double beta = 0.0;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemmStridedBatched(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
nobs,
1,
order.n_exog,
&alpha,
d_exog,
nobs,
nobs * order.n_exog,
d_beta,
order.n_exog,
order.n_exog,
&beta,
obs_intercept.data(),
nobs,
nobs,
batch_size,
stream));
if (fc_steps > 0) {
obs_intercept_fut.resize(fc_steps * batch_size, stream);
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemmStridedBatched(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
fc_steps,
1,
order.n_exog,
&alpha,
d_exog_fut,
fc_steps,
fc_steps * order.n_exog,
d_beta,
order.n_exog,
order.n_exog,
&beta,
obs_intercept_fut.data(),
fc_steps,
fc_steps,
batch_size,
stream));
}
}
MLCommon::LinAlg::Batched::Matrix<double> RQb(
rd, 1, batch_size, cublasHandle, arima_mem.RQ_batches, arima_mem.RQ_dense, stream, true);
double* d_RQ = RQb.raw_data();
const double* d_R = Rb.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double sigma2 = d_sigma2[bid];
for (int i = 0; i < rd; i++) {
d_RQ[bid * rd + i] = d_R[bid * rd + i] * sigma2;
}
});
MLCommon::LinAlg::Batched::Matrix<double> RQR(
rd, rd, batch_size, cublasHandle, arima_mem.RQR_batches, arima_mem.RQR_dense, stream, false);
MLCommon::LinAlg::Batched::b_gemm(false, true, rd, rd, 1, 1.0, RQb, Rb, 0.0, RQR);
// Durbin Koopman "Time Series Analysis" pg 138
raft::common::nvtx::push_range("Init P");
MLCommon::LinAlg::Batched::Matrix<double> P(
rd, rd, batch_size, cublasHandle, arima_mem.P_batches, arima_mem.P_dense, stream, true);
{
double* d_P = P.raw_data();
if (n_diff > 0) {
// Initialize the diffuse part with a large variance
/// TODO: pass this as a parameter
constexpr double kappa = 1e6;
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
double* b_P = d_P + rd * rd * bid;
for (int i = 0; i < n_diff; i++) {
b_P[(rd + 1) * i] = kappa;
}
});
// Initialize the stationary part by solving a Lyapunov equation
MLCommon::LinAlg::Batched::Matrix<double> Ts(
r, r, batch_size, cublasHandle, arima_mem.Ts_batches, arima_mem.Ts_dense, stream, false);
MLCommon::LinAlg::Batched::Matrix<double> RQRs(r,
r,
batch_size,
cublasHandle,
arima_mem.RQRs_batches,
arima_mem.RQRs_dense,
stream,
false);
MLCommon::LinAlg::Batched::Matrix<double> Ps(
r, r, batch_size, cublasHandle, arima_mem.Ps_batches, arima_mem.Ps_dense, stream, false);
MLCommon::LinAlg::Batched::b_2dcopy(Tb, Ts, n_diff, n_diff, r, r);
MLCommon::LinAlg::Batched::b_2dcopy(RQR, RQRs, n_diff, n_diff, r, r);
// Ps = MLCommon::LinAlg::Batched::b_lyapunov(Ts, RQRs);
_lyapunov_wrapper(handle, arima_mem, Ts, RQRs, Ps, r);
MLCommon::LinAlg::Batched::b_2dcopy(Ps, P, 0, 0, r, r, n_diff, n_diff);
} else {
// Initialize by solving a Lyapunov equation
// P = MLCommon::LinAlg::Batched::b_lyapunov(Tb, RQR);
_lyapunov_wrapper(handle, arima_mem, Tb, RQR, P, rd);
}
}
raft::common::nvtx::pop_range();
// Initialize the state alpha by solving (I - T*) x* = c with:
// | mu |
// c = | 0 |
// | . |
// | 0 |
// T* = T[d+s*D:, d+s*D:]
// x* = alpha_0[d+s*D:]
MLCommon::LinAlg::Batched::Matrix<double> alpha(rd,
1,
batch_size,
handle.get_cublas_handle(),
arima_mem.alpha_batches,
arima_mem.alpha_dense,
stream,
false);
if (intercept) {
// Compute I-T*
MLCommon::LinAlg::Batched::Matrix<double> ImT(
r, r, batch_size, cublasHandle, arima_mem.ImT_batches, arima_mem.ImT_dense, stream, false);
const double* d_T = Tb.raw_data();
double* d_ImT = ImT.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
const double* b_T = d_T + rd * rd * bid;
double* b_ImT = d_ImT + r * r * bid;
for (int i = 0; i < r; i++) {
for (int j = 0; j < r; j++) {
b_ImT[r * j + i] = (i == j ? 1.0 : 0.0) - b_T[rd * (j + n_diff) + i + n_diff];
}
}
});
// For r=1, prevent I-T from being too close to [[0]] -> no solution
if (r == 1) {
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
if (abs(d_ImT[bid]) < 1e-3) d_ImT[bid] = raft::signPrim(d_ImT[bid]) * 1e-3;
});
}
// Compute (I-T*)^-1
MLCommon::LinAlg::Batched::Matrix<double> ImT_inv(r,
r,
batch_size,
cublasHandle,
arima_mem.ImT_inv_batches,
arima_mem.ImT_inv_dense,
stream,
false);
MLCommon::LinAlg::Batched::Matrix<double>::inv(
ImT, ImT_inv, arima_mem.ImT_inv_P, arima_mem.ImT_inv_info);
// Compute (I-T*)^-1 * c -> multiply 1st column by mu
const double* d_ImT_inv = ImT_inv.raw_data();
double* d_alpha = alpha.raw_data();
thrust::for_each(
thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) {
const double* b_ImT_inv = d_ImT_inv + r * r * bid;
double* b_alpha = d_alpha + rd * bid;
double mu = d_mu[bid];
for (int i = 0; i < n_diff; i++) {
b_alpha[i] = 0;
}
for (int i = 0; i < r; i++) {
b_alpha[i + n_diff] = b_ImT_inv[i] * mu;
}
});
} else {
// Memset alpha to 0
RAFT_CUDA_TRY(cudaMemsetAsync(alpha.raw_data(), 0, sizeof(double) * rd * batch_size, stream));
}
batched_kalman_loop(handle,
arima_mem,
d_ys,
nobs,
Tb,
Zb,
RQR,
P,
alpha,
intercept,
d_mu,
order,
obs_intercept.data(),
obs_intercept_fut.data(),
d_pred,
d_loglike,
fc_steps,
d_fc,
level > 0,
d_lower);
if (level > 0) {
constexpr int TPB_conf = 256;
int n_blocks = raft::ceildiv<int>(fc_steps * batch_size, TPB_conf);
confidence_intervals<<<n_blocks, TPB_conf, 0, stream>>>(
d_fc, d_lower, d_upper, fc_steps * batch_size, sqrt(2.0) * erfinv(level));
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
void init_batched_kalman_matrices(raft::handle_t& handle,
const double* d_ar,
const double* d_ma,
const double* d_sar,
const double* d_sma,
int nb,
const ARIMAOrder& order,
int rd,
double* d_Z_b,
double* d_R_b,
double* d_T_b)
{
raft::common::nvtx::range fun_scope(__func__);
auto stream = handle.get_stream();
// Note: Z is unused yet but kept to avoid reintroducing it later when
// adding support for exogeneous variables
cudaMemsetAsync(d_Z_b, 0.0, rd * nb * sizeof(double), stream);
cudaMemsetAsync(d_R_b, 0.0, rd * nb * sizeof(double), stream);
cudaMemsetAsync(d_T_b, 0.0, rd * rd * nb * sizeof(double), stream);
int n_diff = order.n_diff();
int r = order.r();
auto counting = thrust::make_counting_iterator(0);
auto n_theta = order.n_theta();
auto n_phi = order.n_phi();
thrust::for_each(thrust::cuda::par.on(stream), counting, counting + nb, [=] __device__(int bid) {
// See TSA pg. 54 for Z, R, T matrices
// Z = [ 1 | 0 . . 0 1 0 . . 0 1 | 1 0 . . 0 ]
// d | s*D | r
for (int i = 0; i < order.d; i++)
d_Z_b[bid * rd + i] = 1.0;
for (int i = 1; i <= order.D; i++)
d_Z_b[bid * rd + order.d + i * order.s - 1] = 1.0;
d_Z_b[bid * rd + n_diff] = 1.0;
// | 0 |
// | . | d + s*D
// | 0 |_ _
// R = | 1 |
// | theta_1 | r
// | . |
// |theta_{r-1}|
//
d_R_b[bid * rd + n_diff] = 1.0;
for (int i = 0; i < n_theta; i++) {
d_R_b[bid * rd + n_diff + i + 1] = MLCommon::TimeSeries::reduced_polynomial<false>(
bid, d_ma, order.q, d_sma, order.Q, order.s, i + 1);
}
// | 1 | 0 .. 0 1 | 1 | d
// |_ _|_ _ _ _ _ |_ _ _ _ _ _ _ _ _ |_ _
// | | 0 .. 0 1 | 1 |
// | | 1 0 | |
// | | . . | | s*D
// | | . . | |
// T = | | 0 1 0 | |
// |_ _|_ _ _ _ _ |_ _ _ _ _ _ _ _ _ |_ _
// | | | phi_1 1 |
// | | | . 1 0 |
// | | | . . | r
// | | | . 0 . |
// | | | . 1 |
// | | | phi_r 0 . . 0 |
//
// (non-comprehensive example with d=1 and D=1)
//
double* batch_T = d_T_b + bid * rd * rd;
// 1. Differencing component
for (int i = 0; i < order.d; i++) {
for (int j = i; j < order.d; j++) {
batch_T[j * rd + i] = 1.0;
}
}
for (int id = 0; id < order.d; id++) {
batch_T[n_diff * rd + id] = 1.0;
for (int iD = 1; iD <= order.D; iD++) {
batch_T[(order.d + order.s * iD - 1) * rd + id] = 1.0;
}
}
// 2. Seasonal differencing component
for (int iD = 0; iD < order.D; iD++) {
int offset = order.d + iD * order.s;
for (int i = 0; i < order.s - 1; i++) {
batch_T[(offset + i) * rd + offset + i + 1] = 1.0;
}
batch_T[(offset + order.s - 1) * rd + offset] = 1.0;
batch_T[n_diff * rd + offset] = 1.0;
}
if (order.D == 2) { batch_T[(n_diff - 1) * rd + order.d] = 1.0; }
// 3. Auto-Regressive component
for (int i = 0; i < n_phi; i++) {
batch_T[n_diff * (rd + 1) + i] = MLCommon::TimeSeries::reduced_polynomial<true>(
bid, d_ar, order.p, d_sar, order.P, order.s, i + 1);
}
for (int i = 0; i < r - 1; i++) {
batch_T[(n_diff + i + 1) * rd + n_diff + i] = 1.0;
}
// If rd=2 and phi_2=-1, I-TxT is singular
if (rd == 2 && order.p == 2 && abs(batch_T[1] + 1) < 0.01) { batch_T[1] = -0.99; }
});
}
void batched_kalman_filter(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const double* d_ys,
const double* d_exog,
int nobs,
const ARIMAParams<double>& params,
const ARIMAOrder& order,
int batch_size,
double* d_loglike,
double* d_pred,
int fc_steps,
double* d_fc,
const double* d_exog_fut,
double level,
double* d_lower,
double* d_upper)
{
raft::common::nvtx::range fun_scope(__func__);
auto cublasHandle = handle.get_cublas_handle();
auto stream = handle.get_stream();
// see (3.18) in TSA by D&K
int rd = order.rd();
MLCommon::LinAlg::Batched::Matrix<double> Zb(
1, rd, batch_size, cublasHandle, arima_mem.Z_batches, arima_mem.Z_dense, stream, false);
MLCommon::LinAlg::Batched::Matrix<double> Tb(
rd, rd, batch_size, cublasHandle, arima_mem.T_batches, arima_mem.T_dense, stream, false);
MLCommon::LinAlg::Batched::Matrix<double> Rb(
rd, 1, batch_size, cublasHandle, arima_mem.R_batches, arima_mem.R_dense, stream, false);
init_batched_kalman_matrices(handle,
params.ar,
params.ma,
params.sar,
params.sma,
batch_size,
order,
rd,
Zb.raw_data(),
Rb.raw_data(),
Tb.raw_data());
////////////////////////////////////////////////////////////
// Computation
_batched_kalman_filter(handle,
arima_mem,
d_ys,
d_exog,
nobs,
order,
Zb,
Tb,
Rb,
d_pred,
d_loglike,
params.sigma2,
static_cast<bool>(order.k),
params.mu,
params.beta,
fc_steps,
d_fc,
d_exog_fut,
level,
d_lower,
d_upper);
}
void batched_jones_transform(raft::handle_t& handle,
const ARIMAMemory<double>& arima_mem,
const ARIMAOrder& order,
int batch_size,
bool isInv,
const double* h_params,
double* h_Tparams)
{
int N = order.complexity();
auto stream = handle.get_stream();
double* d_params = arima_mem.d_params;
double* d_Tparams = arima_mem.d_Tparams;
ARIMAParams<double> params = {arima_mem.params_mu,
arima_mem.params_beta,
arima_mem.params_ar,
arima_mem.params_ma,
arima_mem.params_sar,
arima_mem.params_sma,
arima_mem.params_sigma2};
ARIMAParams<double> Tparams = {params.mu,
params.beta,
arima_mem.Tparams_ar,
arima_mem.Tparams_ma,
arima_mem.Tparams_sar,
arima_mem.Tparams_sma,
arima_mem.Tparams_sigma2};
raft::update_device(d_params, h_params, N * batch_size, stream);
params.unpack(order, batch_size, d_params, stream);
MLCommon::TimeSeries::batched_jones_transform(order, batch_size, isInv, params, Tparams, stream);
Tparams.pack(order, batch_size, d_Tparams, stream);
raft::update_host(h_Tparams, d_Tparams, N * batch_size, stream);
}
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/pca/pca.cuh | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/decomposition/params.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/stats/cov.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <tsvd/tsvd.cuh>
namespace ML {
template <typename math_t, typename enum_solver = solver>
void truncCompExpVars(const raft::handle_t& handle,
math_t* in,
math_t* components,
math_t* explained_var,
math_t* explained_var_ratio,
const paramsTSVDTemplate<enum_solver>& prms,
cudaStream_t stream)
{
auto len = prms.n_cols * prms.n_cols;
rmm::device_uvector<math_t> components_all(len, stream);
rmm::device_uvector<math_t> explained_var_all(prms.n_cols, stream);
rmm::device_uvector<math_t> explained_var_ratio_all(prms.n_cols, stream);
calEig<math_t, enum_solver>(
handle, in, components_all.data(), explained_var_all.data(), prms, stream);
raft::matrix::truncZeroOrigin(
components_all.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream);
raft::matrix::ratio(
handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(explained_var_all.data(),
prms.n_cols,
explained_var,
prms.n_components,
std::size_t(1),
stream);
raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(),
prms.n_cols,
explained_var_ratio,
prms.n_components,
std::size_t(1),
stream);
}
/**
* @brief perform fit operation for the pca. Generates eigenvectors, explained vars, singular vals,
* etc.
* @param[in] handle: cuml handle object
* @param[in] input: the data is fitted to PCA. Size n_rows x n_cols. The size of the data is
* indicated in prms.
* @param[out] components: the principal components of the input data. Size n_cols * n_components.
* @param[out] explained_var: explained variances (eigenvalues) of the principal components. Size
* n_components * 1.
* @param[out] explained_var_ratio: the ratio of the explained variance and total variance. Size
* n_components * 1.
* @param[out] singular_vals: singular values of the data. Size n_components * 1
* @param[out] mu: mean of all the features (all the columns in the data). Size n_cols * 1.
* @param[out] noise_vars: variance of the noise. Size 1 * 1 (scalar).
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void pcaFit(const raft::handle_t& handle,
math_t* input,
math_t* components,
math_t* explained_var,
math_t* explained_var_ratio,
math_t* singular_vals,
math_t* mu,
math_t* noise_vars,
const paramsPCA& prms,
cudaStream_t stream)
{
auto cublas_handle = handle.get_cublas_handle();
ASSERT(prms.n_cols > 1, "Parameter n_cols: number of columns cannot be less than two");
ASSERT(prms.n_rows > 1, "Parameter n_rows: number of rows cannot be less than two");
ASSERT(prms.n_components > 0,
"Parameter n_components: number of components cannot be less than one");
auto n_components = prms.n_components;
if (n_components > prms.n_cols) n_components = prms.n_cols;
raft::stats::mean(mu, input, prms.n_cols, prms.n_rows, true, false, stream);
auto len = prms.n_cols * prms.n_cols;
rmm::device_uvector<math_t> cov(len, stream);
raft::stats::cov(
handle, cov.data(), input, mu, prms.n_cols, prms.n_rows, true, false, true, stream);
truncCompExpVars(
handle, cov.data(), components, explained_var, explained_var_ratio, prms, stream);
math_t scalar = (prms.n_rows - 1);
raft::matrix::seqRoot(explained_var, singular_vals, scalar, n_components, stream, true);
raft::stats::meanAdd(input, input, mu, prms.n_cols, prms.n_rows, false, true, stream);
}
/**
* @brief perform fit and transform operations for the pca. Generates transformed data,
* eigenvectors, explained vars, singular vals, etc.
* @param[in] handle: cuml handle object
* @param[in] input: the data is fitted to PCA. Size n_rows x n_cols. The size of the data is
* indicated in prms.
* @param[out] trans_input: the transformed data. Size n_rows * n_components.
* @param[out] components: the principal components of the input data. Size n_cols * n_components.
* @param[out] explained_var: explained variances (eigenvalues) of the principal components. Size
* n_components * 1.
* @param[out] explained_var_ratio: the ratio of the explained variance and total variance. Size
* n_components * 1.
* @param[out] singular_vals: singular values of the data. Size n_components * 1
* @param[out] mu: mean of all the features (all the columns in the data). Size n_cols * 1.
* @param[out] noise_vars: variance of the noise. Size 1 * 1 (scalar).
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void pcaFitTransform(const raft::handle_t& handle,
math_t* input,
math_t* trans_input,
math_t* components,
math_t* explained_var,
math_t* explained_var_ratio,
math_t* singular_vals,
math_t* mu,
math_t* noise_vars,
const paramsPCA& prms,
cudaStream_t stream)
{
pcaFit(handle,
input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
stream);
pcaTransform(handle, input, components, trans_input, singular_vals, mu, prms, stream);
signFlip(trans_input, prms.n_rows, prms.n_components, components, prms.n_cols, stream);
}
// TODO: implement pcaGetCovariance function
template <typename math_t>
void pcaGetCovariance()
{
ASSERT(false, "pcaGetCovariance: will be implemented!");
}
// TODO: implement pcaGetPrecision function
template <typename math_t>
void pcaGetPrecision()
{
ASSERT(false, "pcaGetPrecision: will be implemented!");
}
/**
* @brief performs inverse transform operation for the pca. Transforms the transformed data back to
* original data.
* @param[in] handle: the internal cuml handle object
* @param[in] trans_input: the data is fitted to PCA. Size n_rows x n_components.
* @param[in] components: transpose of the principal components of the input data. Size n_components
* * n_cols.
* @param[in] singular_vals: singular values of the data. Size n_components * 1
* @param[in] mu: mean of features (every column).
* @param[out] input: the data is fitted to PCA. Size n_rows x n_cols.
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void pcaInverseTransform(const raft::handle_t& handle,
math_t* trans_input,
math_t* components,
math_t* singular_vals,
math_t* mu,
math_t* input,
const paramsPCA& prms,
cudaStream_t stream)
{
ASSERT(prms.n_cols > 1, "Parameter n_cols: number of columns cannot be less than two");
ASSERT(prms.n_rows > 0, "Parameter n_rows: number of rows cannot be less than one");
ASSERT(prms.n_components > 0,
"Parameter n_components: number of components cannot be less than one");
auto components_len = prms.n_cols * prms.n_components;
rmm::device_uvector<math_t> components_copy{components_len, stream};
raft::copy(components_copy.data(), components, prms.n_cols * prms.n_components, stream);
if (prms.whiten) {
math_t sqrt_n_samples = sqrt(prms.n_rows - 1);
math_t scalar = prms.n_rows - 1 > 0 ? math_t(1 / sqrt_n_samples) : 0;
raft::linalg::scalarMultiply(components_copy.data(),
components_copy.data(),
scalar,
prms.n_cols * prms.n_components,
stream);
raft::matrix::matrixVectorBinaryMultSkipZero(
components_copy.data(), singular_vals, prms.n_cols, prms.n_components, true, true, stream);
}
tsvdInverseTransform(handle, trans_input, components_copy.data(), input, prms, stream);
raft::stats::meanAdd(input, input, mu, prms.n_cols, prms.n_rows, false, true, stream);
}
// TODO: implement pcaScore function
template <typename math_t>
void pcaScore()
{
ASSERT(false, "pcaScore: will be implemented!");
}
// TODO: implement pcaScoreSamples function
template <typename math_t>
void pcaScoreSamples()
{
ASSERT(false, "pcaScoreSamples: will be implemented!");
}
/**
* @brief performs transform operation for the pca. Transforms the data to eigenspace.
* @param[in] handle: the internal cuml handle object
* @param[in] input: the data is transformed. Size n_rows x n_components.
* @param[in] components: principal components of the input data. Size n_cols * n_components.
* @param[out] trans_input: the transformed data. Size n_rows * n_components.
* @param[in] singular_vals: singular values of the data. Size n_components * 1.
* @param[in] mu: mean value of the input data
* @param[in] prms: data structure that includes all the parameters from input size to algorithm.
* @param[in] stream cuda stream
*/
template <typename math_t>
void pcaTransform(const raft::handle_t& handle,
math_t* input,
math_t* components,
math_t* trans_input,
math_t* singular_vals,
math_t* mu,
const paramsPCA& prms,
cudaStream_t stream)
{
ASSERT(prms.n_cols > 1, "Parameter n_cols: number of columns cannot be less than two");
ASSERT(prms.n_rows > 0, "Parameter n_rows: number of rows cannot be less than one");
ASSERT(prms.n_components > 0,
"Parameter n_components: number of components cannot be less than one");
auto components_len = prms.n_cols * prms.n_components;
rmm::device_uvector<math_t> components_copy{components_len, stream};
raft::copy(components_copy.data(), components, prms.n_cols * prms.n_components, stream);
if (prms.whiten) {
math_t scalar = math_t(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(components_copy.data(),
components_copy.data(),
scalar,
prms.n_cols * prms.n_components,
stream);
raft::matrix::matrixVectorBinaryDivSkipZero(
components_copy.data(), singular_vals, prms.n_cols, prms.n_components, true, true, stream);
}
raft::stats::meanCenter(input, input, mu, prms.n_cols, prms.n_rows, false, true, stream);
tsvdTransform(handle, input, components_copy.data(), trans_input, prms, stream);
raft::stats::meanAdd(input, input, mu, prms.n_cols, prms.n_rows, false, true, stream);
}
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/pca/sign_flip_mg.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void findMaxAbsOfColumns(T* input,
std::size_t n_rows,
std::size_t n_cols,
T* max_vals,
cudaStream_t stream,
bool row_major = false)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
auto n = n_cols;
auto execution_policy = rmm::exec_policy(stream);
if (row_major) {
thrust::for_each(
execution_policy, counting, counting + n_rows, [=] __device__(std::size_t idx) {
T max = 0.0;
std::size_t max_index = 0;
std::size_t d_i = idx;
std::size_t end = d_i + (m * n);
for (auto i = d_i; i < end; i = i + m) {
T val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
} else {
thrust::for_each(
execution_policy, counting, counting + n_cols, [=] __device__(std::size_t idx) {
T max = 0.0;
std::size_t max_index = 0;
std::size_t d_i = idx * m;
std::size_t end = d_i + m;
for (auto i = d_i; i < end; i++) {
T val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
}
}
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void flip(T* input, std::size_t n_rows, std::size_t n_cols, T* max_vals, cudaStream_t stream)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
thrust::for_each(
rmm::exec_policy(stream), counting, counting + n_cols, [=] __device__(std::size_t idx) {
auto d_i = idx * m;
auto end = d_i + m;
if (max_vals[idx] < 0.0) {
for (auto i = d_i; i < end; i++) {
input[i] = -input[i];
}
}
});
}
/**
* @brief sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen
* vectors
* @input param handle: the internal cuml handle object
* @input/output param input param input: input matrix that will be used to determine the sign.
* @input param input_desc: MNMG description of the input
* @input/output param components: components matrix.
* @input param n_components: number of columns of components matrix
* @input param streams: cuda streams
* @input param n_streams: number of streams
* @{
*/
template <typename T>
void sign_flip_imp(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
Matrix::PartDescriptor& input_desc,
T* components,
std::size_t n_components,
cudaStream_t* streams,
std::uint32_t n_stream)
{
int rank = handle.get_comms().get_rank();
const auto& comm = handle.get_comms();
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.blocksOwnedBy(rank);
rmm::device_uvector<T> max_vals(
std::max(size_t(comm.get_size()), local_blocks.size()) * n_components, streams[0]);
for (std::size_t i = 0; i < input.size(); i++) {
T* mv_loc = max_vals.data() + (i * n_components);
findMaxAbsOfColumns(
input[i]->ptr, local_blocks[i]->size, n_components, mv_loc, streams[i % n_stream]);
}
for (std::uint32_t i = 0; i < n_stream; i++) {
handle.sync_stream(streams[i]);
}
findMaxAbsOfColumns(
max_vals.data(), n_components, local_blocks.size(), max_vals.data(), streams[0], true);
comm.allgather(max_vals.data(), max_vals.data(), n_components, streams[0]);
comm.sync_stream(streams[0]);
findMaxAbsOfColumns(
max_vals.data(), n_components, comm.get_size(), max_vals.data(), streams[0], true);
for (std::size_t i = 0; i < local_blocks.size(); i++) {
flip(
input[i]->ptr, local_blocks[i]->size, n_components, max_vals.data(), streams[i % n_stream]);
}
for (std::uint32_t i = 0; i < n_stream; i++) {
handle.sync_stream(streams[i]);
}
flip(components, input_desc.N, n_components, max_vals.data(), streams[0]);
}
void sign_flip(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
std::size_t n_components,
cudaStream_t* streams,
std::uint32_t n_stream)
{
sign_flip_imp(handle, input_data, input_desc, components, n_components, streams, n_stream);
}
void sign_flip(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
std::size_t n_components,
cudaStream_t* streams,
std::uint32_t n_stream)
{
sign_flip_imp(handle, input_data, input_desc, components, n_components, streams, n_stream);
}
} // namespace opg
} // namespace PCA
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/pca/pca.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pca.cuh"
#include <cuml/decomposition/pca.hpp>
#include <raft/core/handle.hpp>
namespace ML {
void pcaFit(raft::handle_t& handle,
float* input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
const paramsPCA& prms)
{
pcaFit(handle,
input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
handle.get_stream());
}
void pcaFit(raft::handle_t& handle,
double* input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
const paramsPCA& prms)
{
pcaFit(handle,
input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
handle.get_stream());
}
void pcaFitTransform(raft::handle_t& handle,
float* input,
float* trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
const paramsPCA& prms)
{
pcaFitTransform(handle,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
handle.get_stream());
}
void pcaFitTransform(raft::handle_t& handle,
double* input,
double* trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
const paramsPCA& prms)
{
pcaFitTransform(handle,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
handle.get_stream());
}
void pcaInverseTransform(raft::handle_t& handle,
float* trans_input,
float* components,
float* singular_vals,
float* mu,
float* input,
const paramsPCA& prms)
{
pcaInverseTransform(
handle, trans_input, components, singular_vals, mu, input, prms, handle.get_stream());
}
void pcaInverseTransform(raft::handle_t& handle,
double* trans_input,
double* components,
double* singular_vals,
double* mu,
double* input,
const paramsPCA& prms)
{
pcaInverseTransform(
handle, trans_input, components, singular_vals, mu, input, prms, handle.get_stream());
}
void pcaTransform(raft::handle_t& handle,
float* input,
float* components,
float* trans_input,
float* singular_vals,
float* mu,
const paramsPCA& prms)
{
pcaTransform(
handle, input, components, trans_input, singular_vals, mu, prms, handle.get_stream());
}
void pcaTransform(raft::handle_t& handle,
double* input,
double* components,
double* trans_input,
double* singular_vals,
double* mu,
const paramsPCA& prms)
{
pcaTransform(
handle, input, components, trans_input, singular_vals, mu, prms, handle.get_stream());
}
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/pca/pca_mg.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pca.cuh"
#include <cuml/decomposition/pca.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <cumlprims/opg/linalg/qr_based_svd.hpp>
#include <cumlprims/opg/matrix/matrix_utils.hpp>
#include <cumlprims/opg/stats/cov.hpp>
#include <cumlprims/opg/stats/mean.hpp>
#include <cumlprims/opg/stats/mean_center.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> cov_data(prms.n_cols * prms.n_cols, streams[0]);
auto cov_data_size = cov_data.size();
Matrix::Data<T> cov{cov_data.data(), cov_data_size};
Stats::opg::cov(handle, cov, input_data, input_desc, mu_data, true, streams, n_streams);
ML::truncCompExpVars<T, mg_solver>(
handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]);
T scalar = (prms.n_rows - 1);
raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true);
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
/**
* @brief performs MNMG fit operation for the pca
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param input: input data
* @input param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Reference issue https://github.com/rapidsai/cuml/issues/2470
auto n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) {
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
} else if (prms.algorithm == mg_solver::QR) {
const raft::handle_t& h = handle;
cudaStream_t stream = h.get_stream();
const auto& comm = h.get_comms();
// Center the data
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
// Allocate Q, S and V and call QR
std::vector<Matrix::Data<T>*> uMatrixParts;
Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream);
rmm::device_uvector<T> sVector(prms.n_cols, stream);
rmm::device_uvector<T> vMatrix(prms.n_cols * prms.n_cols, stream);
RAFT_CUDA_TRY(cudaMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T)));
LinAlg::opg::svdQR(h,
sVector.data(),
uMatrixParts,
vMatrix.data(),
true,
true,
prms.tol,
prms.n_iterations,
input_data,
input_desc,
rank);
// sign flip
sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams);
// Calculate instance variables
rmm::device_uvector<T> explained_var_all(prms.n_cols, stream);
rmm::device_uvector<T> explained_var_ratio_all(prms.n_cols, stream);
T scalar = 1.0 / (prms.n_rows - 1);
raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream);
raft::matrix::ratio(
handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
sVector.data(), prms.n_cols, singular_vals, prms.n_components, std::size_t(1), stream);
raft::matrix::truncZeroOrigin(explained_var_all.data(),
prms.n_cols,
explained_var,
prms.n_components,
std::size_t(1),
stream);
raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(),
prms.n_cols,
explained_var_ratio,
prms.n_components,
std::size_t(1),
stream);
raft::linalg::transpose(vMatrix.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream);
Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream);
// Re-add mean to centered data
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
const Matrix::PartDescriptor input_desc,
T* components,
std::vector<Matrix::Data<T>*>& trans_input,
T* singular_vals,
T* mu,
const paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < input.size(); i++) {
auto si = i % n_streams;
raft::stats::meanCenter(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
components,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param components: principal components of the input data
* @output param trans_input: transformed input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
Matrix::Data<T>** trans_input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
// We want to update the API of this function, and other functions with
// regards to https://github.com/rapidsai/cuml/issues/2471
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& trans_input,
Matrix::PartDescriptor trans_input_desc,
T* components,
std::vector<Matrix::Data<T>*>& input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < local_blocks.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
components,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG inverse transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param trans_input: transformed input data
* @input param components: principal components of the input data
* @output param input: input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** trans_input,
T* components,
Matrix::Data<T>** input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
inverse_transform_impl(handle,
trans_data,
trans_desc,
components,
input_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
/**
* @brief performs MNMG fit and transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param trans_input: transformed input data
* @output param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
Matrix::Data<T>** trans_input,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::floatData_t** input,
Matrix::floatData_t** trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::doubleData_t** input,
Matrix::doubleData_t** trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** input,
float* components,
Matrix::Data<float>** trans_input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** input,
double* components,
Matrix::Data<double>** trans_input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** trans_input,
float* components,
Matrix::Data<float>** input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** trans_input,
double* components,
Matrix::Data<double>** input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
} // namespace opg
} // namespace PCA
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/cd_mg.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "shuffle.h"
#include <cuml/linear_model/preprocess_mg.hpp>
#include <cuml/solvers/cd_mg.hpp>
#include <functions/softThres.cuh>
#include <cumlprims/opg/linalg/mv_aTb.hpp>
#include <cumlprims/opg/linalg/norm.hpp>
#include "shuffle.h"
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/multiply.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace CD {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& labels,
T* coef,
T* intercept,
bool fit_intercept,
bool normalize,
int epochs,
T alpha,
T l1_ratio,
bool shuffle,
T tol,
cudaStream_t* streams,
int n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
std::vector<Matrix::RankSizePair*> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank());
size_t total_M = 0.0;
for (std::size_t i = 0; i < partsToRanks.size(); i++) {
total_M += partsToRanks[i]->size;
}
rmm::device_uvector<T> pred(total_M, streams[0]);
rmm::device_uvector<T> residual(total_M, streams[0]);
rmm::device_uvector<T> squared(input_desc.N, streams[0]);
rmm::device_uvector<T> mu_input(0, streams[0]);
rmm::device_uvector<T> norm2_input(0, streams[0]);
rmm::device_uvector<T> mu_labels(0, streams[0]);
std::vector<T> h_coef(input_desc.N, T(0));
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) { norm2_input.resize(input_desc.N, streams[0]); }
GLM::opg::preProcessData(handle,
input_data,
input_desc,
labels,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
streams,
n_streams,
verbose);
}
std::vector<int> ri(input_desc.N);
std::mt19937 g(rand());
size_t memsize = input_desc.N * sizeof(int);
int* ri_h = (int*)malloc(memsize);
RAFT_CUDA_TRY(cudaHostRegister(ri_h, memsize, cudaHostRegisterDefault));
if (comm.get_rank() == 0) {
ML::Solver::initShuffle(ri, g);
for (std::size_t i = 0; i < input_desc.N; i++) {
ri_h[i] = ri[i];
}
}
comm.bcast(ri_h, input_desc.N, 0, streams[0]);
comm.sync_stream(streams[0]);
T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M;
alpha = l1_ratio * alpha * input_desc.M;
if (normalize) {
T scalar = T(1.0) + l2_alpha;
raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]);
} else {
Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)};
LinAlg::opg::colNorm2NoSeq(handle, squared_data, input_data, input_desc, streams, n_streams);
raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]);
}
std::vector<Matrix::Data<T>*> input_data_temp;
Matrix::PartDescriptor input_desc_temp = input_desc;
input_desc_temp.N = size_t(1);
std::vector<Matrix::Data<T>*> residual_temp;
Matrix::Data<T> coef_loc_data;
T* rs = residual.data();
for (std::size_t i = 0; i < partsToRanks.size(); i++) {
raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]);
Matrix::Data<T>* rs_data = new Matrix::Data<T>();
rs_data->ptr = rs;
rs_data->totalSize = partsToRanks[i]->size;
residual_temp.push_back(rs_data);
Matrix::Data<T>* temp_data = new Matrix::Data<T>();
temp_data->totalSize = partsToRanks[i]->size;
input_data_temp.push_back(temp_data);
rs += partsToRanks[i]->size;
}
for (int i = 0; i < epochs; i++) {
if (i > 0 && shuffle) {
if (comm.get_rank() == 0) {
Solver::shuffle(ri, g);
for (std::size_t k = 0; k < input_desc.N; k++) {
ri_h[k] = ri[k];
}
}
comm.bcast(ri_h, input_desc.N, 0, streams[0]);
comm.sync_stream(streams[0]);
}
T coef_max = 0.0;
T d_coef_max = 0.0;
T coef_prev = 0.0;
for (std::size_t j = 0; j < input_desc.N; j++) {
int ci = ri_h[j];
T* coef_loc = coef + ci;
T* squared_loc = squared.data() + ci;
T* input_col_loc;
T* pred_loc = pred.data();
T* residual_loc = residual.data();
for (std::size_t k = 0; k < input_data.size(); k++) {
input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size);
input_data_temp[k]->ptr = input_col_loc;
input_data_temp[k]->totalSize = partsToRanks[k]->size;
raft::linalg::multiplyScalar(
pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]);
raft::linalg::add(
residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]);
pred_loc = pred_loc + partsToRanks[k]->size;
residual_loc = residual_loc + partsToRanks[k]->size;
}
for (int k = 0; k < n_streams; k++) {
handle.sync_stream(streams[k]);
}
coef_loc_data.ptr = coef_loc;
coef_loc_data.totalSize = size_t(1);
LinAlg::opg::mv_aTb(
handle, coef_loc_data, input_data_temp, input_desc_temp, residual_temp, streams, n_streams);
if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]);
raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]);
coef_prev = h_coef[ci];
raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]);
handle.sync_stream(streams[0]);
T diff = abs(coef_prev - h_coef[ci]);
if (diff > d_coef_max) d_coef_max = diff;
if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]);
pred_loc = pred.data();
residual_loc = residual.data();
for (std::size_t k = 0; k < input_data.size(); k++) {
input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size);
raft::linalg::multiplyScalar(
pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]);
raft::linalg::subtract(
residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]);
pred_loc = pred_loc + partsToRanks[k]->size;
residual_loc = residual_loc + partsToRanks[k]->size;
}
for (int k = 0; k < n_streams; k++) {
handle.sync_stream(streams[k]);
}
}
bool flag_continue = true;
if (coef_max == T(0)) { flag_continue = false; }
if ((d_coef_max / coef_max) < tol) { flag_continue = false; }
if (!flag_continue) { break; }
}
RAFT_CUDA_TRY(cudaHostUnregister(ri_h));
free(ri_h);
for (std::size_t i = 0; i < partsToRanks.size(); i++) {
delete residual_temp[i];
delete input_data_temp[i];
}
if (fit_intercept) {
GLM::opg::postProcessData(handle,
input_data,
input_desc,
labels,
coef,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
streams,
n_streams,
verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ols
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param labels: labels data
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& labels,
T* coef,
T* intercept,
bool fit_intercept,
bool normalize,
int epochs,
T alpha,
T l1_ratio,
bool shuffle,
T tol,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Tracking issue: https://github.com/rapidsai/cuml/issues/2470
int n_streams = input_desc.blocksOwnedBy(rank).size();
;
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
epochs,
alpha,
l1_ratio,
shuffle,
tol,
streams,
n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (int i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* coef,
T intercept,
std::vector<Matrix::Data<T>*>& preds,
cudaStream_t* streams,
int n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (std::size_t i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
raft::linalg::gemm(handle,
input_data[i]->ptr,
local_blocks[i]->size,
input_desc.N,
coef,
preds[i]->ptr,
local_blocks[i]->size,
size_t(1),
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::linalg::addScalar(
preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<T>** input,
size_t n_rows,
size_t n_cols,
T* coef,
T intercept,
Matrix::Data<T>** preds,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
// Tracking issue: https://github.com/rapidsai/cuml/issues/2470
int n_streams = n_parts;
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
predict_impl(
handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (int i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<float>*>& labels,
float* coef,
float* intercept,
bool fit_intercept,
bool normalize,
int epochs,
float alpha,
float l1_ratio,
bool shuffle,
float tol,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
epochs,
alpha,
l1_ratio,
shuffle,
tol,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<double>*>& labels,
double* coef,
double* intercept,
bool fit_intercept,
bool normalize,
int epochs,
double alpha,
double l1_ratio,
bool shuffle,
double tol,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
epochs,
alpha,
l1_ratio,
shuffle,
tol,
verbose);
}
void predict(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<float>** input,
size_t n_rows,
size_t n_cols,
float* coef,
float intercept,
Matrix::Data<float>** preds,
bool verbose)
{
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose);
}
void predict(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<double>** input,
size_t n_rows,
size_t n_cols,
double* coef,
double intercept,
Matrix::Data<double>** preds,
bool verbose)
{
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose);
}
} // namespace opg
} // namespace CD
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/learning_rate.h | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/solvers/params.hpp>
#include <math.h>
namespace ML {
namespace Solver {
template <typename math_t>
math_t max(math_t a, math_t b)
{
return (a < b) ? b : a;
;
}
template <typename math_t>
math_t invScaling(math_t eta, math_t power_t, int t)
{
return (eta / pow(t, power_t));
}
template <typename math_t>
math_t regDLoss(math_t a, math_t b)
{
return a - b;
}
template <typename math_t>
math_t calOptimalInit(math_t alpha)
{
math_t typw = sqrt(math_t(1.0) / sqrt(alpha));
math_t initial_eta0 = typw / max(math_t(1.0), regDLoss(-typw, math_t(1.0)));
return (math_t(1.0) / (initial_eta0 * alpha));
}
template <typename math_t>
math_t optimal(math_t alpha, math_t optimal_init, int t)
{
return math_t(1.0) / (alpha * (optimal_init + t - 1));
}
template <typename math_t>
math_t calLearningRate(ML::lr_type lr_type, math_t eta, math_t power_t, math_t alpha, math_t t)
{
if (lr_type == ML::lr_type::CONSTANT) {
return eta;
} else if (lr_type == ML::lr_type::INVSCALING) {
return invScaling(eta, power_t, t);
} else if (lr_type == ML::lr_type::OPTIMAL) {
return optimal(alpha, eta, t);
} else {
return math_t(0);
}
}
}; // namespace Solver
}; // namespace ML
// end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/lars_impl.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/cub.cuh>
#include <cuml/common/logger.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/cholesky_r1_update.cuh>
#include <raft/util/cache_util.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/gemv.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/unary_op.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace ML {
namespace Solver {
namespace Lars {
enum class LarsFitStatus { kOk, kCollinear, kError, kStop };
/**
* @brief Select the largest element from the inactive working set.
*
* The inactive set consist of cor[n_active..n-1]. This function returns the
* index of the most correlated element. The value of the largest element is
* returned in cj.
*
* The correlation value is checked for numeric error and convergence, and the
* return status indicates whether training should continue.
*
* @param n_active number of active elements (n_active <= n )
* @param n number of elements in vector cor
* @param correlation device array of correlations, size [n]
* @param cj host pointer to return the value of the largest element
* @param wokspace buffer, size >= n_cols
* @param max_idx host pointer the index of the max correlation is returned here
* @param indices host pointer of feature column indices, size [n_cols]
* @param n_iter iteration counter
* @param stream CUDA stream
*
* @return fit status
*/
template <typename math_t, typename idx_t = int>
LarsFitStatus selectMostCorrelated(idx_t n_active,
idx_t n,
math_t* correlation,
math_t* cj,
rmm::device_uvector<math_t>& workspace,
idx_t* max_idx,
idx_t n_rows,
idx_t* indices,
idx_t n_iter,
cudaStream_t stream)
{
const idx_t align_bytes = 16 * sizeof(math_t);
// We might need to start a few elements earlier to ensure that the unary
// op has aligned access for vectorized load.
int start = raft::alignDown<idx_t>(n_active, align_bytes) / sizeof(math_t);
raft::linalg::unaryOp(
workspace.data(), correlation + start, n, [] __device__(math_t a) { return abs(a); }, stream);
thrust::device_ptr<math_t> ptr(workspace.data() + n_active - start);
auto max_ptr = thrust::max_element(thrust::cuda::par.on(stream), ptr, ptr + n - n_active);
raft::update_host(cj, max_ptr.get(), 1, stream);
raft::interruptible::synchronize(stream);
*max_idx = n_active + (max_ptr - ptr); // the index of the maximum element
CUML_LOG_DEBUG(
"Iteration %d, selected feature %d with correlation %f", n_iter, indices[*max_idx], *cj);
if (!std::isfinite(*cj)) {
CUML_LOG_ERROR("Correlation is not finite, aborting.");
return LarsFitStatus::kError;
}
// Tolerance for early stopping. Note we intentionally use here fp32 epsilon,
// otherwise the tolerance is too small (which could result in numeric error
// in Cholesky rank one update if eps < 0, or exploding regression parameters
// if eps > 0).
const math_t tolerance = std::numeric_limits<float>::epsilon();
if (abs(*cj) / n_rows < tolerance) {
CUML_LOG_WARN("Reached tolarence limit with %e", abs(*cj));
return LarsFitStatus::kStop;
}
return LarsFitStatus::kOk;
}
/**
* @brief Swap two feature vectors.
*
* The function swaps feature column j and k or the corresponding rows and
* and columns of the Gram matrix. The elements of the cor and indices arrays
* are also swapped.
*
* @param handle cuBLAS handle
* @param j column index
* @param k column index
* @param X device array of feature vectors in column major format, size
* [n_cols * ld_X]
* @param n_rows number of training vectors
* @param n_cols number of features
* @param ld_X leading dimension of X
* @param cor device array of correlations, size [n_cols]
* @param indices host array of indices, size [n_cols]
* @param G device pointer of Gram matrix (or nullptr), size [n_cols * ld_G]
* @param ld_G leading dimension of G
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t = int>
void swapFeatures(cublasHandle_t handle,
idx_t j,
idx_t k,
math_t* X,
idx_t n_rows,
idx_t n_cols,
idx_t ld_X,
math_t* cor,
idx_t* indices,
math_t* G,
idx_t ld_G,
cudaStream_t stream)
{
std::swap(indices[j], indices[k]);
if (G) {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasSwap(handle, n_cols, G + ld_G * j, 1, G + ld_G * k, 1, stream));
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasSwap(handle, n_cols, G + j, ld_G, G + k, ld_G, stream));
} else {
// Only swap X if G is nullptr. Only in that case will we use the feature
// columns, otherwise all the necessary information is already there in G.
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasSwap(handle, n_rows, X + ld_X * j, 1, X + ld_X * k, 1, stream));
}
// swap (c[j], c[k])
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasSwap(handle, 1, cor + j, 1, cor + k, 1, stream));
}
/**
* @brief Move feature at idx=j into the active set.
*
* We have an active set with n_active elements, and an inactive set with
* n_valid_cols - n_active elements. The matrix X [n_samples, n_features] is
* partitioned in a way that the first n_active columns store the active set.
* Similarly the vectors correlation and indices are partitioned in a way
* that the first n_active elements belong to the active set:
* - active set: X[:,:n_active], correlation[:n_active], indices[:n_active]
* - inactive set: X[:,n_active:], correlation[n_active:], indices[n_active:].
*
* This function moves the feature column X[:,idx] into the active set by
* replacing the first inactive element with idx. The indices and correlation
* vectors are modified accordingly. The sign array is updated with the sign
* of correlation[n_active].
*
* @param handle cuBLAS handle
* @param n_active number of active elements, will be increased by one after
* we move the new element j into the active set
* @param j index of the new element (n_active <= j < n_cols)
* @param X device array of feature vectors in column major format, size
* [n_cols * ld_X]
* @param n_rows number of training vectors
* @param n_cols number of valid features columns (ignoring those features which
* are detected to be collinear with the active set)
* @param ld_X leading dimension of X
* @param cor device array of correlations, size [n_cols]
* @param indices host array of indices, size [n_cols]
* @param G device pointer of Gram matrix (or nullptr), size [n_cols * ld_G]
* @param ld_G leading dimension of G
* @param sign device pointer to sign array, size[n]
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t = int>
void moveToActive(cublasHandle_t handle,
idx_t* n_active,
idx_t j,
math_t* X,
idx_t n_rows,
idx_t n_cols,
idx_t ld_X,
math_t* cor,
idx_t* indices,
math_t* G,
idx_t ld_G,
math_t* sign,
cudaStream_t stream)
{
idx_t idx_free = *n_active;
swapFeatures(handle, idx_free, j, X, n_rows, n_cols, ld_X, cor, indices, G, ld_G, stream);
// sign[n_active] = sign(c[n_active])
raft::linalg::unaryOp(
sign + idx_free,
cor + idx_free,
1,
[] __device__(math_t c) -> math_t {
// return the sign of c
return (math_t(0) < c) - (c < math_t(0));
},
stream);
(*n_active)++;
}
/**
* @brief Update the Cholesky decomposition of the Gram matrix of the active set
*
* G0 = X.T * X, Gram matrix without signs. We use the part that corresponds to
* the active set, [n_A x n_A]
*
* At each step on the LARS path we add one column to the active set, therefore
* the Gram matrix grows incrementally. We update the Cholesky decomposition
* G0 = U.T * U.
*
* The Cholesky decomposition can use the same storage as G0, if the input
* pointers are same.
*
* @param handle RAFT handle
* @param n_active number of active elements
* @param X device array of feature vectors in column major format, size
* [n_rows * n_cols]
* @param n_rows number of training vectors
* @param n_cols number of features
* @param ld_X leading dimension of X (stride of columns)
* @param U device pointer to the Cholesky decomposition of G0,
* size [n_cols * ld_U]
* @param ld_U leading dimension of U
* @param G0 device pointer to Gram matrix G0 = X.T*X (can be nullptr),
* size [n_cols * ld_G].
* @param ld_G leading dimension of G
* @param workspace workspace for the Cholesky update
* @param eps parameter for cheleskyRankOneUpdate
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t = int>
void updateCholesky(const raft::handle_t& handle,
idx_t n_active,
const math_t* X,
idx_t n_rows,
idx_t n_cols,
idx_t ld_X,
math_t* U,
idx_t ld_U,
const math_t* G0,
idx_t ld_G,
rmm::device_uvector<math_t>& workspace,
math_t eps,
cudaStream_t stream)
{
const cublasFillMode_t fillmode = CUBLAS_FILL_MODE_UPPER;
if (G0 == nullptr) {
// Calculate the new column of G0. It is stored in U.
math_t* G_row = U + (n_active - 1) * ld_U;
const math_t* X_row = X + (n_active - 1) * ld_X;
math_t one = 1;
math_t zero = 0;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_T,
n_rows,
n_cols,
&one,
X,
n_rows,
X_row,
1,
&zero,
G_row,
1,
stream));
} else if (G0 != U) {
// Copy the new column of G0 into U, because the factorization works in
// place.
raft::copy(U + (n_active - 1) * ld_U, G0 + (n_active - 1) * ld_G, n_active, stream);
} // Otherwise the new data is already in place in U.
// Update the Cholesky decomposition
int n_work = workspace.size();
if (n_work == 0) {
// Query workspace size and allocate it
raft::linalg::choleskyRank1Update(
handle, U, n_active, ld_U, nullptr, &n_work, fillmode, stream);
workspace.resize(n_work, stream);
}
raft::linalg::choleskyRank1Update(
handle, U, n_active, ld_U, workspace.data(), &n_work, fillmode, stream, eps);
}
/**
* @brief Solve for ws = S * GA^(-1) * 1_A using a Cholesky decomposition.
*
* See calcEquiangularVec for more details on the formulas. In this function we
* calculate ws = S * (S * G0 * S)^{-1} 1_A = G0^{-1} (S 1_A) = G0^{-1} sign_A.
*
* @param handle RAFT handle
* @param n_active number of active elements
* @param n_cols number of features
* @param sign array with sign of the active set, size [n_cols]
* @param U device pointer to the Cholesky decomposition of G0,
* size [n_cols * n_cols]
* @param ld_U leading dimension of U (column stride)
* @param ws device pointer, size [n_active]
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t = int>
void calcW0(const raft::handle_t& handle,
idx_t n_active,
idx_t n_cols,
const math_t* sign,
const math_t* U,
idx_t ld_U,
math_t* ws,
cudaStream_t stream)
{
const cublasFillMode_t fillmode = CUBLAS_FILL_MODE_UPPER;
// First we calculate x by solving equation U.T x = sign_A.
raft::copy(ws, sign, n_active, stream);
math_t alpha = 1;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublastrsm(handle.get_cublas_handle(),
CUBLAS_SIDE_LEFT,
fillmode,
CUBLAS_OP_T,
CUBLAS_DIAG_NON_UNIT,
n_active,
1,
&alpha,
U,
ld_U,
ws,
ld_U,
stream));
// ws stores x, the solution of U.T x = sign_A. Now we solve U * ws = x
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublastrsm(handle.get_cublas_handle(),
CUBLAS_SIDE_LEFT,
fillmode,
CUBLAS_OP_N,
CUBLAS_DIAG_NON_UNIT,
n_active,
1,
&alpha,
U,
ld_U,
ws,
ld_U,
stream));
// Now ws = G0^(-1) sign_A = S GA^{-1} 1_A.
}
/**
* @brief Calculate A = (1_A * GA^{-1} * 1_A)^{-1/2}.
*
* See calcEquiangularVec for more details on the formulas.
*
* @param handle RAFT handle
* @param A device pointer to store the result
* @param n_active number of active elements
* @param sign array with sign of the active set, size [n_cols]
* @param ws device pointer, size [n_active]
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t = int>
void calcA(const raft::handle_t& handle,
math_t* A,
idx_t n_active,
const math_t* sign,
const math_t* ws,
cudaStream_t stream)
{
// Calculate sum (w) = sum(ws * sign)
auto multiply = [] __device__(math_t w, math_t s) { return w * s; };
raft::linalg::mapThenSumReduce(A, n_active, multiply, stream, ws, sign);
// Calc Aa = 1 / sqrt(sum(w))
raft::linalg::unaryOp(
A, A, 1, [] __device__(math_t a) { return 1 / sqrt(a); }, stream);
}
/**
* @brief Calculate the equiangular vector u, w and A according to [1].
*
* We introduce the following variables (Python like indexing):
* - n_A number of elements in the active set
* - S = diag(sign_A): diagonal matrix with the signs, size [n_A x n_A]
* - X_A = X[:,:n_A] * S, column vectors of the active set size [n_A x n_A]
* - G0 = X.T * X, Gram matrix without signs. We just use the part that
* corresponds to the active set, [n_A x n_A]
* - GA = X_A.T * X_A is the Gram matrix of the active set, size [n_A x n_A]
* GA = S * G0[:n_A, :n_A] * S
* - 1_A = np.ones(n_A)
* - A = (1_A * GA^{-1} * 1_A)^{-1/2}, scalar, see eq (2.5) in [1]
* - w = A GA^{-1} * 1_A, vector of size [n_A] see eq (2.6) in [1]
* - ws = S * w, vector of size [n_A]
*
* The equiangular vector can be expressed the following way (equation 2.6):
* u = X_A * w = X[:,:n_A] S * w = X[:,:n_A] * ws.
*
* The equiangular vector later appears only in an expression like X.T u, which
* can be reformulated as X.T u = X.T X[:,:n_A] S * w = G[:n_A,:n_A] * ws.
* If the gram matrix is given, then we do not need to calculate u, it will be
* sufficient to calculate ws and A.
*
* We use Cholesky decomposition G0 = U.T * U to solve to calculate A and w
* which depend on GA^{-1}.
*
* References:
* [1] B. Efron, T. Hastie, I. Johnstone, R Tibshirani, Least Angle Regression
* The Annals of Statistics (2004) Vol 32, No 2, 407-499
* http://statweb.stanford.edu/~tibs/ftp/lars.pdf
*
* @param handle RAFT handle
* @param n_active number of active elements
* @param X device array of feature vectors in column major format, size
* [ld_X * n_cols]
* @param n_rows number of training vectors
* @param n_cols number of features
* @param ld_X leading dimension of array X (column stride, ld_X >= n_rows)
* @param sign array with sign of the active set, size [n_cols]
* @param U device pointer to the Cholesky decomposition of G0,
* size [ld_U * n_cols]
* @param ld_U leading dimension of array U (ld_U >= n_cols)
* @param G0 device pointer to Gram matrix G0 = X.T*X (can be nullptr),
* size [ld_G * n_cols]. Note the difference between G0 and
* GA = X_A.T * X_A
* @param ld_G leading dimension of array G0 (ld_G >= n_cols)
* @param workspace workspace for the Cholesky update
* @param ws device pointer, size [n_active]
* @param A device pointer to a scalar
* @param u_eq device pointer to the equiangular vector, only used if
* Gram==nullptr, size [n_rows].
* @param eps numerical regularizaton parameter for the Cholesky decomposition
* @param stream CUDA stream
*
* @return fit status
*/
template <typename math_t, typename idx_t = int>
LarsFitStatus calcEquiangularVec(const raft::handle_t& handle,
idx_t n_active,
math_t* X,
idx_t n_rows,
idx_t n_cols,
idx_t ld_X,
math_t* sign,
math_t* U,
idx_t ld_U,
math_t* G0,
idx_t ld_G,
rmm::device_uvector<math_t>& workspace,
math_t* ws,
math_t* A,
math_t* u_eq,
math_t eps,
cudaStream_t stream)
{
// Since we added a new vector to the active set, we update the Cholesky
// decomposition (U)
updateCholesky(
handle, n_active, X, n_rows, n_cols, ld_X, U, ld_U, G0, ld_G, workspace, eps, stream);
// Calculate ws = S GA^{-1} 1_A using U
calcW0(handle, n_active, n_cols, sign, U, ld_U, ws, stream);
calcA(handle, A, n_active, sign, ws, stream);
// ws *= Aa
raft::linalg::unaryOp(
ws, ws, n_active, [A] __device__(math_t w) { return (*A) * w; }, stream);
// Check for numeric error
math_t ws_host;
raft::update_host(&ws_host, ws, 1, stream);
math_t diag_host; // U[n_active-1, n_active-1]
raft::update_host(&diag_host, U + ld_U * (n_active - 1) + n_active - 1, 1, stream);
handle.sync_stream(stream);
if (diag_host < 1e-7) {
CUML_LOG_WARN(
"Vanising diagonal in Cholesky factorization (%e). This indicates "
"collinear features. Dropping current regressor.",
diag_host);
return LarsFitStatus::kCollinear;
}
if (!std::isfinite(ws_host)) {
CUML_LOG_WARN("ws=%f is not finite at iteration %d", ws_host, n_active);
return LarsFitStatus::kError;
}
if (G0 == nullptr) {
// Calculate u_eq only in the case if the Gram matrix is not stored.
math_t one = 1;
math_t zero = 0;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_N,
n_rows,
n_active,
&one,
X,
ld_X,
ws,
1,
&zero,
u_eq,
1,
stream));
}
return LarsFitStatus::kOk;
}
/**
* @brief Calculate the maximum step size (gamma) in the equiangular direction.
*
* Let mu = X beta.T be the current prediction vector. The modified solution
* after taking step gamma is defined as mu' = mu + gamma u. With this
* solution the correlation of the covariates in the active set will decrease
* equally, to a new value |c_j(gamma)| = Cmax - gamma A. At the same time
* the correlation of the values in the inactive set changes according to the
* following formula: c_j(gamma) = c_j - gamma a_j. We increase gamma until
* one of correlations from the inactive set becomes equal with the
* correlation from the active set.
*
* References:
* [1] B. Efron, T. Hastie, I. Johnstone, R Tibshirani, Least Angle Regression
* The Annals of Statistics (2004) Vol 32, No 2, 407-499
* http://statweb.stanford.edu/~tibs/ftp/lars.pdf
*
* @param handle RAFT handle
* @param max_iter maximum number of iterations
* @param n_rows number of samples
* @param n_cols number of valid feature columns
* @param n_active size of the active set (n_active <= max_iter <= n_cols)
* @param cj value of the maximum correlation
* @param A device pointer to a scalar, as defined by eq 2.5 in [1]
* @param cor device pointer to correlation vector, size [n_active]
* @param G device pointer to Gram matrix of the active set (without signs)
* size [n_active * ld_G]
* @param ld_G leading dimension of G (ld_G >= n_cols)
* @param X device array of training vectors in column major format,
* size [n_rows * n_cols]. Only used if the gram matrix is not available.
* @param ld_X leading dimension of X (ld_X >= n_rows)
* @param u device pointer to equiangular vector size [n_rows]. Only used if the
* Gram matrix G is not available.
* @param ws device pointer to the ws vector defined in calcEquiangularVec,
* size [n_active]
* @param gamma device pointer to a scalar. The max step size is returned here.
* @param a_vec device pointer, size [n_cols]
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t = int>
void calcMaxStep(const raft::handle_t& handle,
idx_t max_iter,
idx_t n_rows,
idx_t n_cols,
idx_t n_active,
math_t cj,
const math_t* A,
math_t* cor,
const math_t* G,
idx_t ld_G,
const math_t* X,
idx_t ld_X,
const math_t* u,
const math_t* ws,
math_t* gamma,
math_t* a_vec,
cudaStream_t stream)
{
// In the active set each element has the same correlation, whose absolute
// value is given by Cmax.
math_t Cmax = std::abs(cj);
if (n_active == n_cols) {
// Last iteration, the inactive set is empty we use equation (2.21)
raft::linalg::unaryOp(
gamma, A, 1, [Cmax] __device__(math_t A) { return Cmax / A; }, stream);
} else {
const int n_inactive = n_cols - n_active;
if (G == nullptr) {
// Calculate a = X.T[:,n_active:] * u (2.11)
math_t one = 1;
math_t zero = 0;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_T,
n_rows,
n_inactive,
&one,
X + n_active * ld_X,
ld_X,
u,
1,
&zero,
a_vec,
1,
stream));
} else {
// Calculate a = X.T[:,n_A:] * u = X.T[:, n_A:] * X[:,:n_A] * ws
// = G[n_A:,:n_A] * ws (2.11)
math_t one = 1;
math_t zero = 0;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_N,
n_inactive,
n_active,
&one,
G + n_active,
ld_G,
ws,
1,
&zero,
a_vec,
1,
stream));
}
const math_t tiny = std::numeric_limits<math_t>::min();
const math_t huge = std::numeric_limits<math_t>::max();
//
// gamma = min^+_{j \in inactive} {(Cmax - cor_j) / (A-a_j),
// (Cmax + cor_j) / (A+a_j)} (2.13)
auto map = [Cmax, A, tiny, huge] __device__(math_t c, math_t a) -> math_t {
math_t tmp1 = (Cmax - c) / (*A - a + tiny);
math_t tmp2 = (Cmax + c) / (*A + a + tiny);
// We consider only positive elements while we search for the minimum
math_t val = (tmp1 > 0) ? tmp1 : huge;
if (tmp2 > 0 && tmp2 < val) val = tmp2;
return val;
};
raft::linalg::mapThenReduce(
gamma, n_inactive, huge, map, cub::Min(), stream, cor + n_active, a_vec);
}
}
/**
* @brief Initialize for Lars training.
*
* We calculate the initial correlation, initialize the indices array, and set
* up pointers to store the Cholesky factorization.
*
* @param handle RAFT handle
* @param X device array of training vectors in column major format,
* size [ld_X * n_cols].
* @param n_rows number of samples
* @param n_cols number of valid feature columns
* @param ld_X leading dimension of X (ld_X >= n_rows)
* @param y device pointer to regression targets, size [n_rows]
* @param Gram device pointer to Gram matrix (X.T * X), size [n_cols * ld_G],
* can be nullptr
* @param ld_G leading dimension of G (ld_G >= n_cols)
* @param U_buffer device buffer that will be initialized to store the Cholesky
* factorization. Only used if Gram is nullptr.
* @param U device pointer to U
* @param ld_U leading dimension of U
* @param indices host buffer to store feature column indices
* @param cor device pointer to correlation vector, size [n_cols]
* @param max_iter host pointer to the maximum number of iterations
* @param coef_path device pointer to store coefficients along the
* regularization path size [(max_iter + 1) * max_iter], can be nullptr
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t>
void larsInit(const raft::handle_t& handle,
const math_t* X,
idx_t n_rows,
idx_t n_cols,
idx_t ld_X,
const math_t* y,
math_t* Gram,
idx_t ld_G,
rmm::device_uvector<math_t>& U_buffer,
math_t** U,
idx_t* ld_U,
std::vector<idx_t>& indices,
rmm::device_uvector<math_t>& cor,
int* max_iter,
math_t* coef_path,
cudaStream_t stream)
{
if (n_cols < *max_iter) { *max_iter = n_cols; }
if (Gram == nullptr) {
const idx_t align_bytes = 256;
*ld_U = raft::alignTo<idx_t>(*max_iter, align_bytes);
try {
U_buffer.resize((*ld_U) * (*max_iter), stream);
} catch (std::bad_alloc const&) {
THROW(
"Not enough GPU memory! The memory usage depends quadraticaly on the "
"n_nonzero_coefs parameter, try to decrease it.");
}
*U = U_buffer.data();
} else {
// Set U as G. During the solution in larsFit, the Cholesky factorization
// U will overwrite G.
*U = Gram;
*ld_U = ld_G;
}
std::iota(indices.data(), indices.data() + n_cols, 0);
math_t one = 1;
math_t zero = 0;
// Set initial correlation to X.T * y
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_T,
n_rows,
n_cols,
&one,
X,
ld_X,
y,
1,
&zero,
cor.data(),
1,
stream));
if (coef_path) {
RAFT_CUDA_TRY(
cudaMemsetAsync(coef_path, 0, sizeof(math_t) * (*max_iter + 1) * (*max_iter), stream));
}
}
/**
* @brief Update regression coefficient and correlations
*
* After we calculated the equiangular vector and the step size (gamma) we
* adjust the regression coefficients here.
*
* See calcEquiangularVec for definition of ws.
*
* @param handle RAFT handle
* @param max_iter maximum number of iterations
* @param n_cols number of valid feature columns
* @param n_active number of elements in the active set (n_active <= n_cols)
* @param gamma device pointer to the maximum step size (scalar)
* @param ws device pointer to the ws vector, size [n_cols]
* @param cor device pointer to the correlations, size [n_cols]
* @param a_vec device pointer to a = X.T[:,n_A:] * u, size [n_cols]
* @param beta pointer to regression coefficients, size [max_iter]
* @param coef_path device pointer to all the coefficients along the
* regularization path, size [(max_iter + 1) * max_iter]
* @param stream CUDA stream
*/
template <typename math_t, typename idx_t>
void updateCoef(const raft::handle_t& handle,
idx_t max_iter,
idx_t n_cols,
idx_t n_active,
math_t* gamma,
const math_t* ws,
math_t* cor,
math_t* a_vec,
math_t* beta,
math_t* coef_path,
cudaStream_t stream)
{
// It is sufficient to update correlations only for the inactive set.
// cor[n_active:] -= gamma * a_vec
int n_inactive = n_cols - n_active;
if (n_inactive > 0) {
raft::linalg::binaryOp(
cor + n_active,
cor + n_active,
a_vec,
n_inactive,
[gamma] __device__(math_t c, math_t a) { return c - *gamma * a; },
stream);
}
// beta[:n_active] += gamma * ws
raft::linalg::binaryOp(
beta,
beta,
ws,
n_active,
[gamma] __device__(math_t b, math_t w) { return b + *gamma * w; },
stream);
if (coef_path) { raft::copy(coef_path + n_active * max_iter, beta, n_active, stream); }
}
/**
* @brief Train a regressor using Least Angre Regression.
*
* Least Angle Regression (LAR or LARS) is a model selection algorithm. It
* builds up the model using the following algorithm:
*
* 1. We start with all the coefficients equal to zero.
* 2. At each step we select the predictor that has the largest absolute
* correlation with the residual.
* 3. We take the largest step possible in the direction which is equiangular
* with all the predictors selected so far. The largest step is determined
* such that using this step a new predictor will have as much correlation
* with the residual as any of the currently active predictors.
* 4. Stop if max_iter reached or all the predictors are used, or if the
* correlation between any unused predictor and the residual is lower than
* a tolerance.
*
* The solver is based on [1]. The equations referred in the comments correspond
* to the equations in the paper.
*
* Note: this algorithm assumes that the offset is removed from X and y, and
* each feature is normalized:
* - sum_i y_i = 0,
* - sum_i x_{i,j} = 0, sum_i x_{i,j}^2=1 for j=0..n_col-1
*
* References:
* [1] B. Efron, T. Hastie, I. Johnstone, R Tibshirani, Least Angle Regression
* The Annals of Statistics (2004) Vol 32, No 2, 407-499
* http://statweb.stanford.edu/~tibs/ftp/lars.pdf
*
* @param handle RAFT handle
* @param X device array of training vectors in column major format,
* size [n_rows * n_cols]. Note that the columns of X will be permuted if
* the Gram matrix is not specified. It is expected that X is normalized so
* that each column has zero mean and unit variance.
* @param n_rows number of training samples
* @param n_cols number of feature columns
* @param y device array of the regression targets, size [n_rows]. y should
* be normalized to have zero mean.
* @param beta device array of regression coefficients, has to be allocated on
* entry, size [max_iter]
* @param active_idx device array containing the indices of active variables.
* Must be allocated on entry. Size [max_iter]
* @param alphas device array to return the maximum correlation along the
* regularization path. Must be allocated on entry, size [max_iter+1].
* @param n_active host pointer to return the number of active elements (scalar)
* @param Gram device array containing Gram matrix containing X.T * X. Can be
* nullptr.
* @param max_iter maximum number of iterations, this equals with the maximum
* number of coefficients returned. max_iter <= n_cols.
* @param coef_path coefficients along the regularization path are returned
* here. Must be nullptr, or a device array already allocated on entry.
* Size [max_iter * (max_iter+1)].
* @param verbosity verbosity level
* @param ld_X leading dimension of X (stride of columns)
* @param ld_G leading dimesion of G
* @param eps numeric parameter for Cholesky rank one update
*/
template <typename math_t, typename idx_t>
void larsFit(const raft::handle_t& handle,
math_t* X,
idx_t n_rows,
idx_t n_cols,
const math_t* y,
math_t* beta,
idx_t* active_idx,
math_t* alphas,
idx_t* n_active,
math_t* Gram = nullptr,
int max_iter = 500,
math_t* coef_path = nullptr,
int verbosity = 0,
idx_t ld_X = 0,
idx_t ld_G = 0,
math_t eps = -1)
{
ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(n_rows > 0, "Parameter n_rows: number of rows cannot be less than one");
ML::Logger::get().setLevel(verbosity);
// Set default ld parameters if needed.
if (ld_X == 0) ld_X = n_rows;
if (Gram && ld_G == 0) ld_G = n_cols;
cudaStream_t stream = handle.get_stream();
// We will use either U_buffer.data() to store the Cholesky factorization, or
// store it in place at Gram. Pointer U will point to the actual storage.
rmm::device_uvector<math_t> U_buffer(0, stream);
idx_t ld_U = 0;
math_t* U = nullptr;
// Indices of elements in the active set.
std::vector<idx_t> indices(n_cols);
// Sign of the correlation at the time when the element was added to the
// active set.
rmm::device_uvector<math_t> sign(n_cols, stream);
// Correlation between the residual mu = y - X.T*beta and columns of X
rmm::device_uvector<math_t> cor(n_cols, stream);
// Temporary arrays used by the solver
rmm::device_scalar<math_t> A(stream);
rmm::device_uvector<math_t> a_vec(n_cols, stream);
rmm::device_scalar<math_t> gamma(stream);
rmm::device_uvector<math_t> u_eq(n_rows, stream);
rmm::device_uvector<math_t> ws(max_iter, stream);
rmm::device_uvector<math_t> workspace(n_cols, stream);
larsInit(handle,
X,
n_rows,
n_cols,
ld_X,
y,
Gram,
ld_G,
U_buffer,
&U,
&ld_U,
indices,
cor,
&max_iter,
coef_path,
stream);
// If we detect collinear features, then we will move them to the end of the
// correlation array and mark them as invalid (simply by decreasing
// n_valid_cols). At every iteration the solver is only working with the valid
// columns stored at X[:,:n_valid_cols], and G[:n_valid_cols, :n_valid_cols]
// cor[:n_valid_cols].
int n_valid_cols = n_cols;
*n_active = 0;
for (int i = 0; i < max_iter; i++) {
math_t cj;
idx_t j;
LarsFitStatus status = selectMostCorrelated(
*n_active, n_valid_cols, cor.data(), &cj, workspace, &j, n_rows, indices.data(), i, stream);
if (status != LarsFitStatus::kOk) { break; }
moveToActive(handle.get_cublas_handle(),
n_active,
j,
X,
n_rows,
n_valid_cols,
ld_X,
cor.data(),
indices.data(),
Gram,
ld_G,
sign.data(),
stream);
status = calcEquiangularVec(handle,
*n_active,
X,
n_rows,
n_valid_cols,
ld_X,
sign.data(),
U,
ld_U,
Gram,
ld_G,
workspace,
ws.data(),
A.data(),
u_eq.data(),
eps,
stream);
if (status == LarsFitStatus::kError) {
if (*n_active > 1) { CUML_LOG_WARN("Returning with last valid model."); }
*n_active -= 1;
break;
} else if (status == LarsFitStatus::kCollinear) {
// We move the current feature to the invalid set
swapFeatures(handle.get_cublas_handle(),
n_valid_cols - 1,
*n_active - 1,
X,
n_rows,
n_cols,
ld_X,
cor.data(),
indices.data(),
Gram,
ld_G,
stream);
*n_active -= 1;
n_valid_cols--;
continue;
}
calcMaxStep(handle,
max_iter,
n_rows,
n_valid_cols,
*n_active,
cj,
A.data(),
cor.data(),
Gram,
ld_G,
X,
ld_X,
u_eq.data(),
ws.data(),
gamma.data(),
a_vec.data(),
stream);
updateCoef(handle,
max_iter,
n_valid_cols,
*n_active,
gamma.data(),
ws.data(),
cor.data(),
a_vec.data(),
beta,
coef_path,
stream);
}
if (*n_active > 0) {
// Apply sklearn definition of alphas = cor / n_rows
raft::linalg::unaryOp(
alphas,
cor.data(),
*n_active,
[n_rows] __device__(math_t c) { return abs(c) / n_rows; },
stream);
// Calculate the final correlation. We use the correlation from the last
// iteration and apply the changed during the last LARS iteration:
// alpha[n_active] = cor[n_active-1] - gamma * A
math_t* gamma_ptr = gamma.data();
math_t* A_ptr = A.data();
raft::linalg::unaryOp(
alphas + *n_active,
cor.data() + *n_active - 1,
1,
[gamma_ptr, A_ptr, n_rows] __device__(math_t c) {
return abs(c - (*gamma_ptr) * (*A_ptr)) / n_rows;
},
stream);
raft::update_device(active_idx, indices.data(), *n_active, stream);
} else {
THROW("Model is not fitted.");
}
}
/**
* @brief Predict with least angle regressor.
*
* @param handle RAFT handle
* @param X device array of training vectors in column major format,
* size [n_rows * n_cols].
* @param n_rows number of training samples
* @param n_cols number of feature columns
* @param ld_X leading dimension of X (stride of columns)
* @param beta device array of regression coefficients, size [n_active]
* @param n_active the number of regression coefficients
* @param active_idx device array containing the indices of active variables.
* Only these columns of X will be used for prediction, size [n_active].
* @param intercept
* @param preds device array to store the predictions, size [n_rows]. Must be
* allocated on entry.
*/
template <typename math_t, typename idx_t>
void larsPredict(const raft::handle_t& handle,
const math_t* X,
idx_t n_rows,
idx_t n_cols,
idx_t ld_X,
const math_t* beta,
idx_t n_active,
idx_t* active_idx,
math_t intercept,
math_t* preds)
{
cudaStream_t stream = handle.get_stream();
rmm::device_uvector<math_t> beta_sorted(0, stream);
rmm::device_uvector<math_t> X_active_cols(0, stream);
auto execution_policy = handle.get_thrust_policy();
if (n_active == 0 || n_rows == 0) return;
if (n_active == n_cols) {
// We make a copy of the beta coefs and sort them
beta_sorted.resize(n_active, stream);
rmm::device_uvector<idx_t> idx_sorted(n_active, stream);
raft::copy(beta_sorted.data(), beta, n_active, stream);
raft::copy(idx_sorted.data(), active_idx, n_active, stream);
thrust::device_ptr<math_t> beta_ptr(beta_sorted.data());
thrust::device_ptr<idx_t> idx_ptr(idx_sorted.data());
thrust::sort_by_key(execution_policy, idx_ptr, idx_ptr + n_active, beta_ptr);
beta = beta_sorted.data();
} else {
// We collect active columns of X to contiguous space
X_active_cols.resize(n_active * ld_X, stream);
const int TPB = 64;
raft::cache::get_vecs<<<raft::ceildiv(n_active * ld_X, TPB), TPB, 0, stream>>>(
X, ld_X, active_idx, n_active, X_active_cols.data());
RAFT_CUDA_TRY(cudaGetLastError());
X = X_active_cols.data();
}
// Initialize preds = intercept
thrust::device_ptr<math_t> pred_ptr(preds);
thrust::fill(execution_policy, pred_ptr, pred_ptr + n_rows, intercept);
math_t one = 1;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_N,
n_rows,
n_active,
&one,
X,
ld_X,
beta,
1,
&one,
preds,
1,
stream));
}
}; // namespace Lars
}; // namespace Solver
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/lars.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "lars_impl.cuh"
#include <cuml/solvers/lars.hpp>
#include <raft/core/handle.hpp>
namespace ML {
namespace Solver {
namespace Lars {
// Explicit instantiation
template void larsFit<float, int>(const raft::handle_t& handle,
float* X,
int n_rows,
int n_cols,
const float* y,
float* beta,
int* active_idx,
float* alphas,
int* n_active,
float* Gram,
int max_iter,
float* coef_path,
int verbosity,
int ld_X,
int ld_G,
float eps);
template void larsFit<double, int>(const raft::handle_t& handle,
double* X,
int n_rows,
int n_cols,
const double* y,
double* beta,
int* active_idx,
double* alphas,
int* n_active,
double* Gram,
int max_iter,
double* coef_path,
int verbosity,
int ld_X,
int ld_G,
double eps);
template void larsPredict(const raft::handle_t& handle,
const float* X,
int n_rows,
int n_cols,
int ld_X,
const float* beta,
int n_active,
int* active_idx,
float intercept,
float* preds);
template void larsPredict(const raft::handle_t& handle,
const double* X,
int n_rows,
int n_cols,
int ld_X,
const double* beta,
int n_active,
int* active_idx,
double intercept,
double* preds);
}; // namespace Lars
}; // namespace Solver
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/cd.cuh | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "shuffle.h"
#include <cuml/solvers/params.hpp>
#include <functions/linearReg.cuh>
#include <functions/penalty.cuh>
#include <functions/softThres.cuh>
#include <glm/preprocess.cuh>
#include <raft/core/handle.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/axpy.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/gemv.cuh>
#include <raft/linalg/map.cuh>
#include <raft/linalg/multiply.cuh>
#include <raft/linalg/power.cuh>
#include <raft/linalg/sqrt.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/stats/sum.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace ML {
namespace Solver {
using namespace MLCommon;
namespace {
/** Epoch and iteration -related state. */
template <typename math_t>
struct ConvState {
math_t coef;
math_t coefMax;
math_t diffMax;
};
/**
* Update a single CD coefficient and the corresponding convergence criteria.
*
* @param[inout] coefLoc pointer to the coefficient (arr ptr + column index offset)
* @param[in] squaredLoc pointer to the precomputed data - L2 norm of input for across rows
* @param[inout] convStateLoc pointer to the structure holding the convergence state
* @param[in] l1_alpha L1 regularization coef
*/
template <typename math_t>
__global__ void __launch_bounds__(1, 1) cdUpdateCoefKernel(math_t* coefLoc,
const math_t* squaredLoc,
ConvState<math_t>* convStateLoc,
const math_t l1_alpha)
{
auto coef = *coefLoc;
auto r = coef > l1_alpha ? coef - l1_alpha : (coef < -l1_alpha ? coef + l1_alpha : 0);
auto squared = *squaredLoc;
r = squared > math_t(1e-5) ? r / squared : math_t(0);
auto diff = raft::myAbs(convStateLoc->coef - r);
if (convStateLoc->diffMax < diff) convStateLoc->diffMax = diff;
auto absv = raft::myAbs(r);
if (convStateLoc->coefMax < absv) convStateLoc->coefMax = absv;
convStateLoc->coef = -r;
*coefLoc = r;
}
} // namespace
/**
* Fits a linear, lasso, and elastic-net regression model using Coordinate Descent solver.
*
* i.e. finds coefficients that minimize the following loss function:
*
* f(coef) = 1/2 * || labels - input * coef ||^2
* + 1/2 * alpha * (1 - l1_ratio) * ||coef||^2
* + alpha * l1_ratio * ||coef||_1
*
*
* @param handle
* Reference of raft::handle_t
* @param input
* pointer to an array in column-major format (size of n_rows, n_cols)
* @param n_rows
* n_samples or rows in input
* @param n_cols
* n_features or columns in X
* @param labels
* pointer to an array for labels (size of n_rows)
* @param coef
* pointer to an array for coefficients (size of n_cols). This will be filled with
* coefficients once the function is executed.
* @param intercept
* pointer to a scalar for intercept. This will be filled
* once the function is executed
* @param fit_intercept
* boolean parameter to control if the intercept will be fitted or not
* @param normalize
* boolean parameter to control if the data will be normalized or not;
* NB: the input is scaled by the column-wise biased sample standard deviation estimator.
* @param epochs
* Maximum number of iterations that solver will run
* @param loss
* enum to use different loss functions. Only linear regression loss functions is supported
* right now
* @param alpha
* L1 parameter
* @param l1_ratio
* ratio of alpha will be used for L1. (1 - l1_ratio) * alpha will be used for L2
* @param shuffle
* boolean parameter to control whether coordinates will be picked randomly or not
* @param tol
* tolerance to stop the solver
* @param sample_weight
* device pointer to sample weight vector of length n_rows (nullptr or uniform weights)
* This vector is modified during the computation
*/
template <typename math_t>
void cdFit(const raft::handle_t& handle,
math_t* input,
int n_rows,
int n_cols,
math_t* labels,
math_t* coef,
math_t* intercept,
bool fit_intercept,
bool normalize,
int epochs,
ML::loss_funct loss,
math_t alpha,
math_t l1_ratio,
bool shuffle,
math_t tol,
math_t* sample_weight = nullptr)
{
raft::common::nvtx::range fun_scope("ML::Solver::cdFit-%d-%d", n_rows, n_cols);
ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(n_rows > 1, "Parameter n_rows: number of rows cannot be less than two");
ASSERT(loss == ML::loss_funct::SQRD_LOSS,
"Parameter loss: Only SQRT_LOSS function is supported for now");
cudaStream_t stream = handle.get_stream();
rmm::device_uvector<math_t> residual(n_rows, stream);
rmm::device_uvector<math_t> squared(n_cols, stream);
rmm::device_uvector<math_t> mu_input(0, stream);
rmm::device_uvector<math_t> mu_labels(0, stream);
rmm::device_uvector<math_t> norm2_input(0, stream);
math_t h_sum_sw = 0;
if (sample_weight != nullptr) {
rmm::device_scalar<math_t> sum_sw(stream);
raft::stats::sum(sum_sw.data(), sample_weight, 1, n_rows, true, stream);
raft::update_host(&h_sum_sw, sum_sw.data(), 1, stream);
raft::linalg::multiplyScalar(
sample_weight, sample_weight, (math_t)n_rows / h_sum_sw, n_rows, stream);
}
if (fit_intercept) {
mu_input.resize(n_cols, stream);
mu_labels.resize(1, stream);
if (normalize) { norm2_input.resize(n_cols, stream); }
GLM::preProcessData(handle,
input,
n_rows,
n_cols,
labels,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
sample_weight);
}
if (sample_weight != nullptr) {
raft::linalg::sqrt(sample_weight, sample_weight, n_rows, stream);
raft::matrix::matrixVectorBinaryMult(
input, sample_weight, n_rows, n_cols, false, false, stream);
raft::linalg::map_k(
labels,
n_rows,
[] __device__(math_t a, math_t b) { return a * b; },
stream,
labels,
sample_weight);
}
std::vector<int> ri(n_cols);
std::mt19937 g(rand());
initShuffle(ri, g);
math_t l2_alpha = (1 - l1_ratio) * alpha * n_rows;
math_t l1_alpha = l1_ratio * alpha * n_rows;
// Precompute the residual
if (normalize) {
// if we normalized the data, we know sample variance for each column is 1,
// thus no need to compute the norm again.
math_t scalar = math_t(n_rows) + l2_alpha;
raft::matrix::setValue(squared.data(), squared.data(), scalar, n_cols, stream);
} else {
raft::linalg::colNorm(
squared.data(), input, n_cols, n_rows, raft::linalg::L2Norm, false, stream);
raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, n_cols, stream);
}
raft::copy(residual.data(), labels, n_rows, stream);
ConvState<math_t> h_convState;
rmm::device_uvector<ConvState<math_t>> convStateBuf(1, stream);
auto convStateLoc = convStateBuf.data();
rmm::device_scalar<math_t> cublas_alpha(1.0, stream);
rmm::device_scalar<math_t> cublas_beta(0.0, stream);
for (int i = 0; i < epochs; i++) {
raft::common::nvtx::range epoch_scope("ML::Solver::cdFit::epoch-%d", i);
if (i > 0 && shuffle) { Solver::shuffle(ri, g); }
RAFT_CUDA_TRY(cudaMemsetAsync(convStateLoc, 0, sizeof(ConvState<math_t>), stream));
for (int j = 0; j < n_cols; j++) {
raft::common::nvtx::range iter_scope("ML::Solver::cdFit::col-%d", j);
int ci = ri[j];
math_t* coef_loc = coef + ci;
math_t* squared_loc = squared.data() + ci;
math_t* input_col_loc = input + (ci * n_rows);
// remember current coef
raft::copy(&(convStateLoc->coef), coef_loc, 1, stream);
// calculate the residual without the contribution from column ci
// residual[:] += coef[ci] * X[:, ci]
raft::linalg::axpy<math_t, true>(
handle, n_rows, coef_loc, input_col_loc, 1, residual.data(), 1, stream);
// coef[ci] = dot(X[:, ci], residual[:])
raft::linalg::gemv<math_t, true>(handle,
false,
1,
n_rows,
cublas_alpha.data(),
input_col_loc,
1,
residual.data(),
1,
cublas_beta.data(),
coef_loc,
1,
stream);
// Calculate the new coefficient that minimizes f along coordinate line ci
// coef[ci] = SoftTreshold(dot(X[:, ci], residual[:]), l1_alpha) / dot(X[:, ci], X[:, ci]))
// Also, update the convergence criteria.
cdUpdateCoefKernel<math_t><<<dim3(1, 1, 1), dim3(1, 1, 1), 0, stream>>>(
coef_loc, squared_loc, convStateLoc, l1_alpha);
RAFT_CUDA_TRY(cudaGetLastError());
// Restore the residual using the updated coeffecient
raft::linalg::axpy<math_t, true>(
handle, n_rows, &(convStateLoc->coef), input_col_loc, 1, residual.data(), 1, stream);
}
raft::update_host(&h_convState, convStateLoc, 1, stream);
handle.sync_stream(stream);
if (h_convState.coefMax < tol || (h_convState.diffMax / h_convState.coefMax) < tol) break;
}
if (sample_weight != nullptr) {
raft::matrix::matrixVectorBinaryDivSkipZero(
input, sample_weight, n_rows, n_cols, false, false, stream);
raft::linalg::map_k(
labels,
n_rows,
[] __device__(math_t a, math_t b) { return a / b; },
stream,
labels,
sample_weight);
raft::linalg::powerScalar(sample_weight, sample_weight, (math_t)2, n_rows, stream);
raft::linalg::multiplyScalar(sample_weight, sample_weight, h_sum_sw / n_rows, n_rows, stream);
}
if (fit_intercept) {
GLM::postProcessData(handle,
input,
n_rows,
n_cols,
labels,
coef,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize);
} else {
*intercept = math_t(0);
}
}
/**
* Fits a linear, lasso, and elastic-net regression model using Coordinate Descent solver
* @param handle
* cuml handle
* @param input
* pointer to an array in column-major format (size of n_rows, n_cols)
* @param n_rows
* n_samples or rows in input
* @param n_cols
* n_features or columns in X
* @param coef
* pointer to an array for coefficients (size of n_cols). Calculated in cdFit function.
* @param intercept
* intercept value calculated in cdFit function
* @param preds
* pointer to an array for predictions (size of n_rows). This will be fitted once functions
* is executed.
* @param loss
* enum to use different loss functions. Only linear regression loss functions is supported
* right now.
*/
template <typename math_t>
void cdPredict(const raft::handle_t& handle,
const math_t* input,
int n_rows,
int n_cols,
const math_t* coef,
math_t intercept,
math_t* preds,
ML::loss_funct loss)
{
ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(n_rows > 1, "Parameter n_rows: number of rows cannot be less than two");
ASSERT(loss == ML::loss_funct::SQRD_LOSS,
"Parameter loss: Only SQRT_LOSS function is supported for now");
Functions::linearRegH(handle, input, n_rows, n_cols, coef, preds, intercept, handle.get_stream());
}
}; // namespace Solver
}; // namespace ML
// end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/solver.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cd.cuh"
#include "sgd.cuh"
#include <cuml/solvers/params.hpp>
#include <cuml/solvers/solver.hpp>
#include <raft/core/handle.hpp>
namespace ML {
namespace Solver {
using namespace ML;
void sgdFit(raft::handle_t& handle,
float* input,
int n_rows,
int n_cols,
float* labels,
float* coef,
float* intercept,
bool fit_intercept,
int batch_size,
int epochs,
int lr_type,
float eta0,
float power_t,
int loss,
int penalty,
float alpha,
float l1_ratio,
bool shuffle,
float tol,
int n_iter_no_change)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else if (loss == 1) {
loss_funct = ML::loss_funct::LOG;
} else if (loss == 2) {
loss_funct = ML::loss_funct::HINGE;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
MLCommon::Functions::penalty pen;
if (penalty == 0) {
pen = MLCommon::Functions::penalty::NONE;
} else if (penalty == 1) {
pen = MLCommon::Functions::penalty::L1;
} else if (penalty == 2) {
pen = MLCommon::Functions::penalty::L2;
} else if (penalty == 3) {
pen = MLCommon::Functions::penalty::ELASTICNET;
} else {
ASSERT(false, "glm.cu: penalty is not supported yet.");
}
ML::lr_type learning_rate_type;
if (lr_type == 0) {
learning_rate_type = ML::lr_type::OPTIMAL;
} else if (lr_type == 1) {
learning_rate_type = ML::lr_type::CONSTANT;
} else if (lr_type == 2) {
learning_rate_type = ML::lr_type::INVSCALING;
} else if (lr_type == 3) {
learning_rate_type = ML::lr_type::ADAPTIVE;
} else {
ASSERT(false, "glm.cu: this learning rate type is not supported.");
}
sgdFit(handle,
input,
n_rows,
n_cols,
labels,
coef,
intercept,
fit_intercept,
batch_size,
epochs,
learning_rate_type,
eta0,
power_t,
loss_funct,
pen,
alpha,
l1_ratio,
shuffle,
tol,
n_iter_no_change,
handle.get_stream());
}
void sgdFit(raft::handle_t& handle,
double* input,
int n_rows,
int n_cols,
double* labels,
double* coef,
double* intercept,
bool fit_intercept,
int batch_size,
int epochs,
int lr_type,
double eta0,
double power_t,
int loss,
int penalty,
double alpha,
double l1_ratio,
bool shuffle,
double tol,
int n_iter_no_change)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else if (loss == 1) {
loss_funct = ML::loss_funct::LOG;
} else if (loss == 2) {
loss_funct = ML::loss_funct::HINGE;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
MLCommon::Functions::penalty pen;
if (penalty == 0) {
pen = MLCommon::Functions::penalty::NONE;
} else if (penalty == 1) {
pen = MLCommon::Functions::penalty::L1;
} else if (penalty == 2) {
pen = MLCommon::Functions::penalty::L2;
} else if (penalty == 3) {
pen = MLCommon::Functions::penalty::ELASTICNET;
} else {
ASSERT(false, "glm.cu: penalty is not supported yet.");
}
ML::lr_type learning_rate_type;
if (lr_type == 0) {
learning_rate_type = ML::lr_type::OPTIMAL;
} else if (lr_type == 1) {
learning_rate_type = ML::lr_type::CONSTANT;
} else if (lr_type == 2) {
learning_rate_type = ML::lr_type::INVSCALING;
} else if (lr_type == 3) {
learning_rate_type = ML::lr_type::ADAPTIVE;
} else {
ASSERT(false, "glm.cu: this learning rate type is not supported.");
}
sgdFit(handle,
input,
n_rows,
n_cols,
labels,
coef,
intercept,
fit_intercept,
batch_size,
epochs,
learning_rate_type,
eta0,
power_t,
loss_funct,
pen,
alpha,
l1_ratio,
shuffle,
tol,
n_iter_no_change,
handle.get_stream());
}
void sgdPredict(raft::handle_t& handle,
const float* input,
int n_rows,
int n_cols,
const float* coef,
float intercept,
float* preds,
int loss)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else if (loss == 1) {
loss_funct = ML::loss_funct::LOG;
} else if (loss == 2) {
loss_funct = ML::loss_funct::HINGE;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
sgdPredict(
handle, input, n_rows, n_cols, coef, intercept, preds, loss_funct, handle.get_stream());
}
void sgdPredict(raft::handle_t& handle,
const double* input,
int n_rows,
int n_cols,
const double* coef,
double intercept,
double* preds,
int loss)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else if (loss == 1) {
loss_funct = ML::loss_funct::LOG;
} else if (loss == 2) {
loss_funct = ML::loss_funct::HINGE;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
sgdPredict(
handle, input, n_rows, n_cols, coef, intercept, preds, loss_funct, handle.get_stream());
}
void sgdPredictBinaryClass(raft::handle_t& handle,
const float* input,
int n_rows,
int n_cols,
const float* coef,
float intercept,
float* preds,
int loss)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else if (loss == 1) {
loss_funct = ML::loss_funct::LOG;
} else if (loss == 2) {
loss_funct = ML::loss_funct::HINGE;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
sgdPredictBinaryClass(
handle, input, n_rows, n_cols, coef, intercept, preds, loss_funct, handle.get_stream());
}
void sgdPredictBinaryClass(raft::handle_t& handle,
const double* input,
int n_rows,
int n_cols,
const double* coef,
double intercept,
double* preds,
int loss)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else if (loss == 1) {
loss_funct = ML::loss_funct::LOG;
} else if (loss == 2) {
loss_funct = ML::loss_funct::HINGE;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
sgdPredictBinaryClass(
handle, input, n_rows, n_cols, coef, intercept, preds, loss_funct, handle.get_stream());
}
void cdFit(raft::handle_t& handle,
float* input,
int n_rows,
int n_cols,
float* labels,
float* coef,
float* intercept,
bool fit_intercept,
bool normalize,
int epochs,
int loss,
float alpha,
float l1_ratio,
bool shuffle,
float tol,
float* sample_weight)
{
ASSERT(loss == 0, "Parameter loss: Only SQRT_LOSS function is supported for now");
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
cdFit<float>(handle,
input,
n_rows,
n_cols,
labels,
coef,
intercept,
fit_intercept,
normalize,
epochs,
loss_funct,
alpha,
l1_ratio,
shuffle,
tol,
sample_weight);
}
void cdFit(raft::handle_t& handle,
double* input,
int n_rows,
int n_cols,
double* labels,
double* coef,
double* intercept,
bool fit_intercept,
bool normalize,
int epochs,
int loss,
double alpha,
double l1_ratio,
bool shuffle,
double tol,
double* sample_weight)
{
ASSERT(loss == 0, "Parameter loss: Only SQRT_LOSS function is supported for now");
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
cdFit<double>(handle,
input,
n_rows,
n_cols,
labels,
coef,
intercept,
fit_intercept,
normalize,
epochs,
loss_funct,
alpha,
l1_ratio,
shuffle,
tol,
sample_weight);
}
void cdPredict(raft::handle_t& handle,
const float* input,
int n_rows,
int n_cols,
const float* coef,
float intercept,
float* preds,
int loss)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
cdPredict<float>(handle, input, n_rows, n_cols, coef, intercept, preds, loss_funct);
}
void cdPredict(raft::handle_t& handle,
const double* input,
int n_rows,
int n_cols,
const double* coef,
double intercept,
double* preds,
int loss)
{
ML::loss_funct loss_funct = ML::loss_funct::SQRD_LOSS;
if (loss == 0) {
loss_funct = ML::loss_funct::SQRD_LOSS;
} else {
ASSERT(false, "glm.cu: other functions are not supported yet.");
}
cdPredict<double>(handle, input, n_rows, n_cols, coef, intercept, preds, loss_funct);
}
} // namespace Solver
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/shuffle.h | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cstddef>
#include <random>
namespace ML {
namespace Solver {
template <typename math_t>
void initShuffle(std::vector<math_t>& rand_indices, std::mt19937& g, math_t random_state = 0)
{
g.seed((int)random_state);
for (std::size_t i = 0; i < rand_indices.size(); ++i)
rand_indices[i] = i;
}
template <typename math_t>
void shuffle(std::vector<math_t>& rand_indices, std::mt19937& g)
{
std::shuffle(rand_indices.begin(), rand_indices.end(), g);
}
}; // namespace Solver
}; // namespace ML
// end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/solver/sgd.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "learning_rate.h"
#include "shuffle.h"
#include <cuml/solvers/params.hpp>
#include <functions/hinge.cuh>
#include <functions/linearReg.cuh>
#include <functions/logisticReg.cuh>
#include <glm/preprocess.cuh>
#include <raft/core/handle.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/gemv.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace ML {
namespace Solver {
using namespace MLCommon;
/**
* Fits a linear, lasso, and elastic-net regression model using Coordinate Descent solver
* @param handle
* Reference of raft::handle_t
* @param input
* pointer to an array in column-major format (size of n_rows, n_cols)
* @param n_rows
* n_samples or rows in input
* @param n_cols
* n_features or columns in X
* @param labels
* pointer to an array for labels (size of n_rows)
* @param coef
* pointer to an array for coefficients (size of n_cols). This will be filled with
* coefficients once the function is executed.
* @param intercept
* pointer to a scalar for intercept. This will be filled
* once the function is executed
* @param fit_intercept
* boolean parameter to control if the intercept will be fitted or not
* @param batch_size
* number of rows in the minibatch
* @param epochs
* number of iterations that the solver will run
* @param lr_type
* type of the learning rate function (i.e. OPTIMAL, CONSTANT, INVSCALING, ADAPTIVE)
* @param eta0
* learning rate for constant lr_type. It's used to calculate learning rate function for
* other types of lr_type
* @param power_t
* power value in the INVSCALING lr_type
* @param loss
* enum to use different loss functions.
* @param penalty
* None, L1, L2, or Elastic-net penalty
* @param alpha
* alpha value in L1
* @param l1_ratio
* ratio of alpha will be used for L1. (1 - l1_ratio) * alpha will be used for L2.
* @param shuffle
* boolean parameter to control whether coordinates will be picked randomly or not.
* @param tol
* tolerance to stop the solver
* @param n_iter_no_change
* solver stops if there is no update greater than tol after n_iter_no_change iterations
* @param stream
* cuda stream
*/
template <typename math_t>
void sgdFit(const raft::handle_t& handle,
math_t* input,
int n_rows,
int n_cols,
math_t* labels,
math_t* coef,
math_t* intercept,
bool fit_intercept,
int batch_size,
int epochs,
ML::lr_type lr_type,
math_t eta0,
math_t power_t,
ML::loss_funct loss,
Functions::penalty penalty,
math_t alpha,
math_t l1_ratio,
bool shuffle,
math_t tol,
int n_iter_no_change,
cudaStream_t stream)
{
ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(n_rows > 1, "Parameter n_rows: number of rows cannot be less than two");
cublasHandle_t cublas_handle = handle.get_cublas_handle();
rmm::device_uvector<math_t> mu_input(0, stream);
rmm::device_uvector<math_t> mu_labels(0, stream);
rmm::device_uvector<math_t> norm2_input(0, stream);
if (fit_intercept) {
mu_input.resize(n_cols, stream);
mu_labels.resize(1, stream);
GLM::preProcessData(handle,
input,
n_rows,
n_cols,
labels,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
false);
}
rmm::device_uvector<math_t> grads(n_cols, stream);
rmm::device_uvector<int> indices(batch_size, stream);
rmm::device_uvector<math_t> input_batch(batch_size * n_cols, stream);
rmm::device_uvector<math_t> labels_batch(batch_size, stream);
rmm::device_scalar<math_t> loss_value(stream);
math_t prev_loss_value = math_t(0);
math_t curr_loss_value = math_t(0);
std::vector<int> rand_indices(n_rows);
std::mt19937 g(rand());
initShuffle(rand_indices, g);
math_t t = math_t(1);
math_t learning_rate = math_t(0);
if (lr_type == ML::lr_type::ADAPTIVE) {
learning_rate = eta0;
} else if (lr_type == ML::lr_type::OPTIMAL) {
eta0 = calOptimalInit(alpha);
}
int n_iter_no_change_curr = 0;
for (int i = 0; i < epochs; i++) {
int cbs = 0;
int j = 0;
if (i > 0 && shuffle) { Solver::shuffle(rand_indices, g); }
while (j < n_rows) {
if ((j + batch_size) > n_rows) {
cbs = n_rows - j;
} else {
cbs = batch_size;
}
if (cbs == 0) break;
raft::update_device(indices.data(), &rand_indices[j], cbs, stream);
raft::matrix::copyRows(
input, n_rows, n_cols, input_batch.data(), indices.data(), cbs, stream);
raft::matrix::copyRows(labels, n_rows, 1, labels_batch.data(), indices.data(), cbs, stream);
if (loss == ML::loss_funct::SQRD_LOSS) {
Functions::linearRegLossGrads(handle,
input_batch.data(),
cbs,
n_cols,
labels_batch.data(),
coef,
grads.data(),
penalty,
alpha,
l1_ratio,
stream);
} else if (loss == ML::loss_funct::LOG) {
Functions::logisticRegLossGrads(handle,
input_batch.data(),
cbs,
n_cols,
labels_batch.data(),
coef,
grads.data(),
penalty,
alpha,
l1_ratio,
stream);
} else if (loss == ML::loss_funct::HINGE) {
Functions::hingeLossGrads(handle,
input_batch.data(),
cbs,
n_cols,
labels_batch.data(),
coef,
grads.data(),
penalty,
alpha,
l1_ratio,
stream);
} else {
ASSERT(false, "sgd.cuh: Other loss functions have not been implemented yet!");
}
if (lr_type != ML::lr_type::ADAPTIVE)
learning_rate = calLearningRate(lr_type, eta0, power_t, alpha, t);
raft::linalg::scalarMultiply(grads.data(), grads.data(), learning_rate, n_cols, stream);
raft::linalg::subtract(coef, coef, grads.data(), n_cols, stream);
j = j + cbs;
t = t + 1;
}
if (tol > math_t(0)) {
if (loss == ML::loss_funct::SQRD_LOSS) {
Functions::linearRegLoss(handle,
input,
n_rows,
n_cols,
labels,
coef,
loss_value.data(),
penalty,
alpha,
l1_ratio,
stream);
} else if (loss == ML::loss_funct::LOG) {
Functions::logisticRegLoss(handle,
input,
n_rows,
n_cols,
labels,
coef,
loss_value.data(),
penalty,
alpha,
l1_ratio,
stream);
} else if (loss == ML::loss_funct::HINGE) {
Functions::hingeLoss(handle,
input,
n_rows,
n_cols,
labels,
coef,
loss_value.data(),
penalty,
alpha,
l1_ratio,
stream);
}
raft::update_host(&curr_loss_value, loss_value.data(), 1, stream);
handle.sync_stream(stream);
if (i > 0) {
if (curr_loss_value > (prev_loss_value - tol)) {
n_iter_no_change_curr = n_iter_no_change_curr + 1;
if (n_iter_no_change_curr > n_iter_no_change) {
if (lr_type == ML::lr_type::ADAPTIVE && learning_rate > math_t(1e-6)) {
learning_rate = learning_rate / math_t(5);
n_iter_no_change_curr = 0;
} else {
break;
}
}
} else {
n_iter_no_change_curr = 0;
}
}
prev_loss_value = curr_loss_value;
}
}
if (fit_intercept) {
GLM::postProcessData(handle,
input,
n_rows,
n_cols,
labels,
coef,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
false);
} else {
*intercept = math_t(0);
}
}
/**
* Make predictions
* @param handle
* Reference of raft::handle_t
* @param input
* pointer to an array in column-major format (size of n_rows, n_cols)
* @param n_rows
* n_samples or rows in input
* @param n_cols
* n_features or columns in X
* @param coef
* pointer to an array for coefficients (size of n_cols). Calculated in cdFit function.
* @param intercept
* intercept value calculated in cdFit function
* @param preds
* pointer to an array for predictions (size of n_rows). This will be fitted once functions
* is executed.
* @param loss
* enum to use different loss functions. Only linear regression loss functions is supported
* right now.
* @param stream
* cuda stream
*/
template <typename math_t>
void sgdPredict(const raft::handle_t& handle,
const math_t* input,
int n_rows,
int n_cols,
const math_t* coef,
math_t intercept,
math_t* preds,
ML::loss_funct loss,
cudaStream_t stream)
{
ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(n_rows > 1, "Parameter n_rows: number of rows cannot be less than two");
if (loss == ML::loss_funct::SQRD_LOSS) {
Functions::linearRegH(handle, input, n_rows, n_cols, coef, preds, intercept, stream);
} else if (loss == ML::loss_funct::LOG) {
Functions::logisticRegH(handle, input, n_rows, n_cols, coef, preds, intercept, stream);
} else if (loss == ML::loss_funct::HINGE) {
Functions::hingeH(handle, input, n_rows, n_cols, coef, preds, intercept, stream);
}
}
/**
* Make binary classifications
* @param handle
* Reference of raft::handle_t
* @param input
* pointer to an array in column-major format (size of n_rows, n_cols)
* @param n_rows
* n_samples or rows in input
* @param n_cols
* n_features or columns in X
* @param coef
* pointer to an array for coefficients (size of n_cols). Calculated in cdFit function.
* @param intercept
* intercept value calculated in cdFit function
* @param preds
* pointer to an array for predictions (size of n_rows). This will be fitted once functions
* is executed.
* @param loss
* enum to use different loss functions. Only linear regression loss functions is supported
* right now.
* @param stream
* cuda stream
*/
template <typename math_t>
void sgdPredictBinaryClass(const raft::handle_t& handle,
const math_t* input,
int n_rows,
int n_cols,
const math_t* coef,
math_t intercept,
math_t* preds,
ML::loss_funct loss,
cudaStream_t stream)
{
sgdPredict(handle, input, n_rows, n_cols, coef, intercept, preds, loss, stream);
math_t scalar = math_t(1);
if (loss == ML::loss_funct::SQRD_LOSS || loss == ML::loss_funct::LOG) {
raft::linalg::unaryOp(
preds,
preds,
n_rows,
[scalar] __device__(math_t in) {
if (in >= math_t(0.5))
return math_t(1);
else
return math_t(0);
},
stream);
} else if (loss == ML::loss_funct::HINGE) {
raft::linalg::unaryOp(
preds,
preds,
n_rows,
[scalar] __device__(math_t in) {
if (in >= math_t(0.0))
return math_t(1);
else
return math_t(0);
},
stream);
}
}
}; // namespace Solver
}; // namespace ML
// end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/datasets/make_regression.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/datasets/make_regression.hpp>
#include <raft/core/handle.hpp>
#include <raft/random/make_regression.cuh>
namespace ML {
namespace Datasets {
template <typename DataT, typename IdxT>
void make_regression_helper(const raft::handle_t& handle,
DataT* out,
DataT* values,
IdxT n_rows,
IdxT n_cols,
IdxT n_informative,
DataT* coef,
IdxT n_targets,
DataT bias,
IdxT effective_rank,
DataT tail_strength,
DataT noise,
bool shuffle,
uint64_t seed)
{
const auto& handle_impl = handle;
cudaStream_t stream = handle_impl.get_stream();
cublasHandle_t cublas_handle = handle_impl.get_cublas_handle();
cusolverDnHandle_t cusolver_handle = handle_impl.get_cusolver_dn_handle();
raft::random::make_regression(handle,
out,
values,
n_rows,
n_cols,
n_informative,
stream,
coef,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed);
}
void make_regression(const raft::handle_t& handle,
float* out,
float* values,
int64_t n_rows,
int64_t n_cols,
int64_t n_informative,
float* coef,
int64_t n_targets,
float bias,
int64_t effective_rank,
float tail_strength,
float noise,
bool shuffle,
uint64_t seed)
{
make_regression_helper(handle,
out,
values,
n_rows,
n_cols,
n_informative,
coef,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed);
}
void make_regression(const raft::handle_t& handle,
double* out,
double* values,
int64_t n_rows,
int64_t n_cols,
int64_t n_informative,
double* coef,
int64_t n_targets,
double bias,
int64_t effective_rank,
double tail_strength,
double noise,
bool shuffle,
uint64_t seed)
{
make_regression_helper(handle,
out,
values,
n_rows,
n_cols,
n_informative,
coef,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed);
}
void make_regression(const raft::handle_t& handle,
float* out,
float* values,
int n_rows,
int n_cols,
int n_informative,
float* coef,
int n_targets,
float bias,
int effective_rank,
float tail_strength,
float noise,
bool shuffle,
uint64_t seed)
{
make_regression_helper(handle,
out,
values,
n_rows,
n_cols,
n_informative,
coef,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed);
}
void make_regression(const raft::handle_t& handle,
double* out,
double* values,
int n_rows,
int n_cols,
int n_informative,
double* coef,
int n_targets,
double bias,
int effective_rank,
double tail_strength,
double noise,
bool shuffle,
uint64_t seed)
{
make_regression_helper(handle,
out,
values,
n_rows,
n_cols,
n_informative,
coef,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed);
}
} // namespace Datasets
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/datasets/make_arima.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/datasets/make_arima.hpp>
#include <raft/core/handle.hpp>
#include <random/make_arima.cuh>
namespace ML {
namespace Datasets {
template <typename DataT, typename IdxT>
inline void make_arima_helper(const raft::handle_t& handle,
DataT* out,
IdxT batch_size,
IdxT n_obs,
ARIMAOrder order,
DataT scale,
DataT noise_scale,
DataT intercept_scale,
uint64_t seed)
{
auto stream = handle.get_stream();
MLCommon::Random::make_arima(
out, batch_size, n_obs, order, stream, scale, noise_scale, intercept_scale, seed);
}
void make_arima(const raft::handle_t& handle,
float* out,
int batch_size,
int n_obs,
ARIMAOrder order,
float scale,
float noise_scale,
float intercept_scale,
uint64_t seed)
{
make_arima_helper(
handle, out, batch_size, n_obs, order, scale, noise_scale, intercept_scale, seed);
}
void make_arima(const raft::handle_t& handle,
double* out,
int batch_size,
int n_obs,
ARIMAOrder order,
double scale,
double noise_scale,
double intercept_scale,
uint64_t seed)
{
make_arima_helper(
handle, out, batch_size, n_obs, order, scale, noise_scale, intercept_scale, seed);
}
} // namespace Datasets
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/datasets/make_blobs.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/datasets/make_blobs.hpp>
#include <raft/core/handle.hpp>
#include <raft/random/make_blobs.cuh>
namespace ML {
namespace Datasets {
void make_blobs(const raft::handle_t& handle,
float* out,
int64_t* labels,
int64_t n_rows,
int64_t n_cols,
int64_t n_clusters,
bool row_major,
const float* centers,
const float* cluster_std,
const float cluster_std_scalar,
bool shuffle,
float center_box_min,
float center_box_max,
uint64_t seed)
{
raft::random::make_blobs(out,
labels,
n_rows,
n_cols,
n_clusters,
handle.get_stream(),
row_major,
centers,
cluster_std,
cluster_std_scalar,
shuffle,
center_box_min,
center_box_max,
seed);
}
void make_blobs(const raft::handle_t& handle,
double* out,
int64_t* labels,
int64_t n_rows,
int64_t n_cols,
int64_t n_clusters,
bool row_major,
const double* centers,
const double* cluster_std,
const double cluster_std_scalar,
bool shuffle,
double center_box_min,
double center_box_max,
uint64_t seed)
{
raft::random::make_blobs(out,
labels,
n_rows,
n_cols,
n_clusters,
handle.get_stream(),
row_major,
centers,
cluster_std,
cluster_std_scalar,
shuffle,
center_box_min,
center_box_max,
seed);
}
void make_blobs(const raft::handle_t& handle,
float* out,
int* labels,
int n_rows,
int n_cols,
int n_clusters,
bool row_major,
const float* centers,
const float* cluster_std,
const float cluster_std_scalar,
bool shuffle,
float center_box_min,
float center_box_max,
uint64_t seed)
{
raft::random::make_blobs(out,
labels,
n_rows,
n_cols,
n_clusters,
handle.get_stream(),
row_major,
centers,
cluster_std,
cluster_std_scalar,
shuffle,
center_box_min,
center_box_max,
seed);
}
void make_blobs(const raft::handle_t& handle,
double* out,
int* labels,
int n_rows,
int n_cols,
int n_clusters,
bool row_major,
const double* centers,
const double* cluster_std,
const double cluster_std_scalar,
bool shuffle,
double center_box_min,
double center_box_max,
uint64_t seed)
{
raft::random::make_blobs(out,
labels,
n_rows,
n_cols,
n_clusters,
handle.get_stream(),
row_major,
centers,
cluster_std,
cluster_std_scalar,
shuffle,
center_box_min,
center_box_max,
seed);
}
} // namespace Datasets
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn_opg_common.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/common/logger.hpp>
#include <cuml/neighbors/knn_mg.hpp>
#include <selection/knn.cuh>
#include <cumlprims/opg/matrix/data.hpp>
#include <cumlprims/opg/matrix/part_descriptor.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <raft/spatial/knn/knn.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
#include <memory>
#include <set>
namespace ML {
namespace KNN {
namespace opg {
namespace knn_common {
/**
* The enumeration of KNN distributed operations
*/
enum knn_operation {
knn, /**< Simple KNN */
classification, /**< KNN classification */
class_proba, /**< KNN classification probabilities */
regression /**< KNN regression */
};
/**
* A structure to store parameters for distributed KNN
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
struct opg_knn_param {
opg_knn_param(knn_operation knn_op,
std::vector<Matrix::Data<in_t>*>* idx_data,
Matrix::PartDescriptor* idx_desc,
std::vector<Matrix::Data<in_t>*>* query_data,
Matrix::PartDescriptor* query_desc,
bool rowMajorIndex,
bool rowMajorQuery,
size_t k,
size_t batch_size,
bool verbose)
{
this->knn_op = knn_op;
this->idx_data = idx_data;
this->idx_desc = idx_desc;
this->query_data = query_data;
this->query_desc = query_desc;
this->rowMajorIndex = rowMajorIndex;
this->rowMajorQuery = rowMajorQuery;
this->k = k;
this->batch_size = batch_size;
this->verbose = verbose;
}
knn_operation knn_op; /**< Type of KNN distributed operation */
std::vector<Matrix::Data<dist_t>*>* out_D = nullptr; /**< KNN distances output array */
std::vector<Matrix::Data<ind_t>*>* out_I = nullptr; /**< KNN indices output array */
std::vector<Matrix::Data<in_t>*>* idx_data = nullptr; /**< Index input array */
Matrix::PartDescriptor* idx_desc = nullptr; /**< Descriptor for index input array */
std::vector<Matrix::Data<in_t>*>* query_data = nullptr; /**< Query input array */
Matrix::PartDescriptor* query_desc = nullptr; /**< Descriptor for query input array */
bool rowMajorIndex; /**< Is index row major? */
bool rowMajorQuery; /**< Is query row major? */
size_t k = 0; /**< Number of nearest neighbors */
size_t batch_size = 0; /**< Batch size */
bool verbose; /**< verbose */
std::size_t n_outputs = 0; /**< Number of outputs per query (cl&re) */
std::vector<std::vector<out_t*>>* y; /**< Labels input array (cl&re) */
std::vector<Matrix::Data<out_t>*>* out; /**< KNN outputs output array (cl&re) */
std::vector<int>* n_unique = nullptr; /**< Number of unique labels (classification) */
std::vector<out_t*>* uniq_labels = nullptr; /**< Unique labels (classification) */
std::vector<std::vector<float*>>* probas =
nullptr; /**< KNN classification probabilities output array (class-probas) */
};
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
struct KNN_params : public opg_knn_param<in_t, ind_t, dist_t, out_t> {
KNN_params(knn_operation knn_op,
std::vector<Matrix::Data<in_t>*>* idx_data,
Matrix::PartDescriptor* idx_desc,
std::vector<Matrix::Data<in_t>*>* query_data,
Matrix::PartDescriptor* query_desc,
bool rowMajorIndex,
bool rowMajorQuery,
size_t k,
size_t batch_size,
bool verbose,
std::vector<Matrix::Data<dist_t>*>* out_D,
std::vector<Matrix::Data<ind_t>*>* out_I)
: opg_knn_param<in_t, ind_t, dist_t, out_t>(knn_op,
idx_data,
idx_desc,
query_data,
query_desc,
rowMajorIndex,
rowMajorQuery,
k,
batch_size,
verbose)
{
this->out_D = out_D;
this->out_I = out_I;
}
};
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
struct KNN_RE_params : public opg_knn_param<in_t, ind_t, dist_t, out_t> {
KNN_RE_params(knn_operation knn_op,
std::vector<Matrix::Data<in_t>*>* idx_data,
Matrix::PartDescriptor* idx_desc,
std::vector<Matrix::Data<in_t>*>* query_data,
Matrix::PartDescriptor* query_desc,
bool rowMajorIndex,
bool rowMajorQuery,
size_t k,
size_t batch_size,
bool verbose,
std::size_t n_outputs,
std::vector<std::vector<out_t*>>* y,
std::vector<Matrix::Data<out_t>*>* out)
: opg_knn_param<in_t, ind_t, dist_t, out_t>(knn_op,
idx_data,
idx_desc,
query_data,
query_desc,
rowMajorIndex,
rowMajorQuery,
k,
batch_size,
verbose)
{
this->n_outputs = n_outputs;
this->y = y;
this->out = out;
}
};
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
struct KNN_CL_params : public opg_knn_param<in_t, ind_t, dist_t, out_t> {
KNN_CL_params(knn_operation knn_op,
std::vector<Matrix::Data<in_t>*>* idx_data,
Matrix::PartDescriptor* idx_desc,
std::vector<Matrix::Data<in_t>*>* query_data,
Matrix::PartDescriptor* query_desc,
bool rowMajorIndex,
bool rowMajorQuery,
size_t k,
size_t batch_size,
bool verbose,
std::size_t n_outputs,
std::vector<std::vector<out_t*>>* y,
std::vector<int>* n_unique,
std::vector<out_t*>* uniq_labels,
std::vector<Matrix::Data<out_t>*>* out,
std::vector<std::vector<float*>>* probas)
: opg_knn_param<in_t, ind_t, dist_t, out_t>(knn_op,
idx_data,
idx_desc,
query_data,
query_desc,
rowMajorIndex,
rowMajorQuery,
k,
batch_size,
verbose)
{
this->n_outputs = n_outputs;
this->y = y;
this->n_unique = n_unique;
this->uniq_labels = uniq_labels;
this->out = out;
this->probas = probas;
}
};
/**
* A structure to store utilities for distributed KNN operations
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
struct opg_knn_work {
opg_knn_work(opg_knn_param<in_t, ind_t, dist_t, out_t>& params, raft::handle_t& handle)
: res_D(0, handle.get_stream()), res_I(0, handle.get_stream()), res(0, handle.get_stream())
{
this->my_rank = handle.get_comms().get_rank();
this->idxRanks = params.idx_desc->uniqueRanks();
this->idxPartsToRanks = params.idx_desc->partsToRanks;
this->local_idx_parts = params.idx_desc->blocksOwnedBy(handle.get_comms().get_rank());
this->queryPartsToRanks = params.query_desc->partsToRanks;
}
int my_rank; /**< Rank of this worker */
std::set<int> idxRanks; /**< Set of ranks having at least 1 index partition */
std::vector<Matrix::RankSizePair*> idxPartsToRanks; /**< Index parts to rank */
std::vector<Matrix::RankSizePair*> local_idx_parts; /**< List of index parts stored locally */
std::vector<Matrix::RankSizePair*> queryPartsToRanks; /**< Query parts to rank */
rmm::device_uvector<dist_t> res_D; /**< Temporary allocation to exchange distances */
rmm::device_uvector<ind_t> res_I; /**< Temporary allocation to exchange indices */
rmm::device_uvector<out_t> res; /**< Temporary allocation to exchange outputs (cl&re) */
};
/*!
Main function, computes distributed KNN operation
@param[in] params Parameters for distributed KNN operation
@param[in] handle RAFT handle
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
void opg_knn(opg_knn_param<in_t, ind_t, dist_t, out_t>& params, raft::handle_t& handle)
{
opg_knn_work<in_t, ind_t, dist_t, out_t> work(params, handle);
ASSERT(params.k <= 1024, "k must be <= 1024");
ASSERT(params.batch_size > 0, "max_batch_size must be > 0");
ASSERT(params.k < params.idx_desc->M, "k must be less than the total number of query rows");
for (Matrix::RankSizePair* rsp : work.idxPartsToRanks) {
ASSERT(rsp->size >= params.k,
"k must be <= the number of rows in the smallest index partition.");
}
int local_parts_completed = 0;
// Loop through query parts for all ranks
for (int i = 0; i < params.query_desc->totalBlocks(); i++) { // For each query partitions
Matrix::RankSizePair* partition = work.queryPartsToRanks[i];
int part_rank = partition->rank;
size_t part_n_rows = partition->size;
size_t total_batches = raft::ceildiv(part_n_rows, params.batch_size);
size_t total_n_processed = 0;
// For each batch in a query partition
for (std::size_t cur_batch = 0; cur_batch < total_batches; cur_batch++) {
size_t cur_batch_size = params.batch_size;
if (cur_batch == total_batches - 1)
cur_batch_size = part_n_rows - (cur_batch * params.batch_size);
if (work.my_rank == part_rank) CUML_LOG_DEBUG("Root Rank is %d", work.my_rank);
/**
* Root broadcasts batch to all other ranks
*/
CUML_LOG_DEBUG("Rank %d: Performing Broadcast", work.my_rank);
rmm::device_uvector<in_t> part_data(0, handle.get_stream());
size_t batch_input_elms = cur_batch_size * params.query_desc->N;
size_t batch_input_offset = batch_input_elms * cur_batch;
in_t* cur_query_ptr{nullptr};
rmm::device_uvector<in_t> tmp_batch_buf(0, handle.get_stream());
// current partition's owner rank broadcasts
if (part_rank == work.my_rank) {
Matrix::Data<in_t>* data = params.query_data->at(local_parts_completed);
// If query is column major and total_batches > 0, create a
// temporary buffer for the batch so that we can stack rows.
if (!params.rowMajorQuery && total_batches > 1) {
tmp_batch_buf.resize(batch_input_elms, handle.get_stream());
for (std::size_t col_data = 0; col_data < params.query_desc->N; col_data++) {
raft::copy(tmp_batch_buf.data() + (col_data * cur_batch_size),
data->ptr + ((col_data * part_n_rows) + total_n_processed),
cur_batch_size,
handle.get_stream());
}
cur_query_ptr = tmp_batch_buf.data();
} else {
cur_query_ptr = data->ptr + batch_input_offset;
}
// all other (index) ranks receive
} else if (work.idxRanks.find(work.my_rank) != work.idxRanks.end()) {
part_data.resize(batch_input_elms, handle.get_stream());
cur_query_ptr = part_data.data();
}
bool my_rank_is_idx = work.idxRanks.find(work.my_rank) != work.idxRanks.end();
/**
* Send query to index partitions
*/
if (work.my_rank == part_rank || my_rank_is_idx)
broadcast_query(work, handle, part_rank, cur_query_ptr, batch_input_elms);
if (my_rank_is_idx) {
/**
* All index ranks perform local KNN
*/
CUML_LOG_DEBUG("Rank %d: Performing Local KNN", work.my_rank);
size_t batch_knn_elms = params.k * cur_batch_size;
if (params.knn_op != knn_operation::knn) {
// No labels for KNN only operation
work.res.resize(batch_knn_elms * params.n_outputs, handle.get_stream());
}
work.res_I.resize(batch_knn_elms, handle.get_stream());
work.res_D.resize(batch_knn_elms, handle.get_stream());
// Perform a local KNN search
perform_local_knn(params, work, handle, cur_query_ptr, cur_batch_size);
if (params.knn_op != knn_operation::knn) {
// Get the right labels for indices obtained after a KNN merge
copy_label_outputs_from_index_parts(params, work, handle, cur_batch_size);
}
}
if (part_rank == work.my_rank || my_rank_is_idx) {
/**
* Ranks exchange results.
* Each rank having index partition(s) sends
* its local results (my_rank_is_idx)
* Additionally the owner of currently processed query partition
* receives and performs a reduce even if it has
* no index partition (part_rank == my_rank)
*/
CUML_LOG_DEBUG("Rank %d: Exchanging results", work.my_rank);
exchange_results(params, work, handle, part_rank, cur_batch_size);
}
/**
* Root rank performs local reduce
*/
if (part_rank == work.my_rank) {
CUML_LOG_DEBUG("Rank %d: Performing Reduce", work.my_rank);
// Reduce all local results to a global result for a given query batch
reduce(params, work, handle, local_parts_completed, total_n_processed, cur_batch_size);
CUML_LOG_DEBUG("Rank %d: Finished Reduce", work.my_rank);
}
total_n_processed += cur_batch_size;
}
if (work.my_rank == part_rank) local_parts_completed++;
}
};
/*!
Broadcast query batch across all the workers
@param[in] params Parameters for distributed KNN operation
@param[in] handle RAFT handle
@param[in] part_rank Rank of currently processed query batch
@param[in] broadcast Pointer to broadcast
@param[in] broadcast_size Size of broadcast
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
void broadcast_query(opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
int part_rank,
in_t* broadcast,
size_t broadcast_size)
{
int request_idx = 0;
std::vector<raft::comms::request_t> requests;
if (part_rank == work.my_rank) { // Either broadcast to other workers
int idx_rank_size = work.idxRanks.size();
if (work.idxRanks.find(work.my_rank) != work.idxRanks.end()) { --idx_rank_size; }
requests.resize(idx_rank_size);
for (int rank : work.idxRanks) {
if (rank != work.my_rank) {
handle.get_comms().isend(broadcast, broadcast_size, rank, 0, requests.data() + request_idx);
++request_idx;
}
}
} else { // Or receive from broadcaster
requests.resize(1);
handle.get_comms().irecv(
broadcast, broadcast_size, part_rank, 0, requests.data() + request_idx);
++request_idx;
}
try {
handle.get_comms().waitall(requests.size(), requests.data());
} catch (raft::exception& e) {
CUML_LOG_DEBUG("FAILURE!");
}
}
/*!
Perform a local KNN search for a given query batch
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[in] query Pointer to query
@param[in] query_size Size of query
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
void perform_local_knn(opg_knn_param<in_t, ind_t, dist_t, out_t>& params,
opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
in_t* query,
size_t query_size)
{
std::vector<in_t*> ptrs(params.idx_data->size());
std::vector<std::size_t> sizes(params.idx_data->size());
for (std::size_t cur_idx = 0; cur_idx < params.idx_data->size(); cur_idx++) {
ptrs[cur_idx] = params.idx_data->at(cur_idx)->ptr;
sizes[cur_idx] = work.local_idx_parts[cur_idx]->size;
}
// Offset nearest neighbor index matrix by partition indices
std::vector<size_t> start_indices = params.idx_desc->startIndices(work.my_rank);
// PartDescriptor uses size_t while FAISS uses int64_t
// so we need to do a quick conversion.
std::vector<int64_t> start_indices_long;
for (size_t start_index : start_indices)
start_indices_long.push_back((int64_t)start_index);
// ID ranges need to be offset by each local partition's
// starting indices.
raft::spatial::knn::brute_force_knn<std::int64_t, float, std::size_t>(
handle,
ptrs,
sizes,
params.idx_desc->N,
query,
query_size,
work.res_I.data(),
work.res_D.data(),
params.k,
params.rowMajorIndex,
params.rowMajorQuery,
&start_indices_long,
raft::distance::DistanceType::L2SqrtExpanded);
handle.sync_stream(handle.get_stream());
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* This function copies the labels associated to the locally merged indices
* from the index partitions to a merged array of labels
* @param[out] out merged labels
* @param[in] knn_indices merged indices
* @param[in] parts unmerged labels in partitions
* @param[in] offsets array splitting the partitions making it possible
* to identify the origin partition of an nearest neighbor index
* @param[in] cur_batch_size current batch size
* @param[in] n_parts number of partitions
* @param[in] n_labels number of labels to write (batch_size * n_outputs)
*/
template <int TPB_X, typename ind_t, typename out_t>
__global__ void copy_label_outputs_from_index_parts_kernel(out_t* out,
ind_t* knn_indices,
out_t** parts,
uint64_t* offsets,
size_t cur_batch_size,
int n_parts,
int n_labels)
{
uint64_t i = (blockIdx.x * TPB_X) + threadIdx.x;
if (i >= n_labels) return;
uint64_t nn_idx = knn_indices[i];
int part_idx = 0;
for (; part_idx < n_parts && nn_idx >= offsets[part_idx]; part_idx++) {}
part_idx = std::min(std::max(0, part_idx - 1), n_parts - 1);
uint64_t offset = nn_idx - offsets[part_idx];
out[i] = parts[part_idx][offset];
}
/*!
Get the right labels for indices obtained after a KNN merge
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[in] batch_size Batch size
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
void copy_label_outputs_from_index_parts(opg_knn_param<in_t, ind_t, dist_t, out_t>& params,
opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
size_t batch_size)
{
const int TPB_X = 256;
int n_labels = batch_size * params.k;
dim3 grid(raft::ceildiv(n_labels, TPB_X));
dim3 blk(TPB_X);
uint64_t offset = 0;
std::vector<uint64_t> offsets_h;
for (auto& rsp : work.idxPartsToRanks) {
if (rsp->rank == work.my_rank) { offsets_h.push_back(offset); }
offset += rsp->size;
}
std::size_t n_parts = offsets_h.size();
rmm::device_uvector<uint64_t> offsets_d(n_parts, handle.get_stream());
raft::update_device(offsets_d.data(), offsets_h.data(), n_parts, handle.get_stream());
std::vector<out_t*> parts_h(n_parts);
rmm::device_uvector<out_t*> parts_d(n_parts, handle.get_stream());
for (std::size_t o = 0; o < params.n_outputs; o++) {
for (std::size_t p = 0; p < n_parts; p++) {
parts_h[p] = params.y->at(p)[o];
}
raft::update_device(parts_d.data(), parts_h.data(), n_parts, handle.get_stream());
copy_label_outputs_from_index_parts_kernel<TPB_X, ind_t, out_t>
<<<grid, blk, 0, handle.get_stream()>>>(work.res.data() + (o * n_labels),
work.res_I.data(),
parts_d.data(),
offsets_d.data(),
batch_size,
n_parts,
n_labels);
}
handle.sync_stream(handle.get_stream());
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/*!
Exchange results of local KNN search and operation for a given query batch
All non-root index ranks send the results for the current
query batch to the root rank for the batch.
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[in] part_rank Rank of currently processed query batch
@param[in] batch_size Batch size
*/
template <typename in_t, typename ind_t, typename dist_t, typename out_t>
void exchange_results(opg_knn_param<in_t, ind_t, dist_t, out_t>& params,
opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
int part_rank,
size_t batch_size)
{
size_t batch_elms = batch_size * params.k;
int request_idx = 0;
std::vector<raft::comms::request_t> requests;
if (part_rank != work.my_rank) { // Either send local KNN results
requests.resize(2);
handle.get_comms().isend(
work.res_I.data(), batch_elms, part_rank, 0, requests.data() + request_idx);
++request_idx;
handle.get_comms().isend(
work.res_D.data(), batch_elms, part_rank, 0, requests.data() + request_idx);
++request_idx;
if (params.knn_op != knn_operation::knn) {
requests.resize(2 + params.n_outputs);
for (std::size_t o = 0; o < params.n_outputs; o++) {
handle.get_comms().isend(work.res.data() + (o * batch_elms),
batch_elms,
part_rank,
0,
requests.data() + request_idx);
++request_idx;
}
}
} else { // Or, as the owner of currently processed query batch,
// receive results from other workers for reduce
bool part_rank_is_idx = work.idxRanks.find(part_rank) != work.idxRanks.end();
size_t idx_rank_size = work.idxRanks.size();
// if root rank is an index, it will already have
// query data, so no need to receive from it.
work.res_I.resize(batch_elms * idx_rank_size, handle.get_stream());
work.res_D.resize(batch_elms * idx_rank_size, handle.get_stream());
if (params.knn_op != knn_operation::knn) {
work.res.resize(batch_elms * params.n_outputs * idx_rank_size, handle.get_stream());
}
if (part_rank_is_idx) {
/**
* If this worker (in charge of reduce),
* has some local results as well,
* copy them at right location
*/
--idx_rank_size;
int i = 0;
for (int rank : work.idxRanks) {
if (rank == work.my_rank) {
size_t batch_offset = batch_elms * i;
// Indices and distances are stored in rank order
raft::copy_async(
work.res_I.data() + batch_offset, work.res_I.data(), batch_elms, handle.get_stream());
raft::copy_async(
work.res_D.data() + batch_offset, work.res_D.data(), batch_elms, handle.get_stream());
if (params.knn_op != knn_operation::knn) {
rmm::device_uvector<out_t> tmp_res(params.n_outputs * batch_elms, handle.get_stream());
raft::copy_async(tmp_res.data(), work.res.data(), tmp_res.size(), handle.get_stream());
for (std::size_t o = 0; o < params.n_outputs; ++o) {
// Outputs are stored in target order and then in rank order
raft::copy_async(
work.res.data() + (o * work.idxRanks.size() * batch_elms) + batch_offset,
tmp_res.data() + (o * batch_elms),
batch_elms,
handle.get_stream());
}
}
handle.sync_stream(handle.get_stream());
break;
}
i++;
}
}
size_t request_size = 2 * idx_rank_size;
if (params.knn_op != knn_operation::knn) request_size = (2 + params.n_outputs) * idx_rank_size;
requests.resize(request_size);
int num_received = 0;
for (int rank : work.idxRanks) {
if (rank != work.my_rank) {
size_t batch_offset = batch_elms * num_received;
// Indices and distances are stored in rank order
handle.get_comms().irecv(
work.res_I.data() + batch_offset, batch_elms, rank, 0, requests.data() + request_idx);
++request_idx;
handle.get_comms().irecv(
work.res_D.data() + batch_offset, batch_elms, rank, 0, requests.data() + request_idx);
++request_idx;
if (params.knn_op != knn_operation::knn) {
for (std::size_t o = 0; o < params.n_outputs; o++) {
// Outputs are stored in target order and then in rank order
out_t* r = work.res.data() + (o * work.idxRanks.size() * batch_elms) + batch_offset;
handle.get_comms().irecv(r, batch_elms, rank, 0, requests.data() + request_idx);
++request_idx;
}
}
}
if (rank != work.my_rank || part_rank_is_idx) {
/**
* Increase index for each new reception
* Also increase index when the worker doing a reduce operation
* has some index data (previously copied at right location).
*/
++num_received;
}
}
}
try {
handle.get_comms().waitall(requests.size(), requests.data());
} catch (raft::exception& e) {
CUML_LOG_DEBUG("FAILURE!");
}
}
/*!
Reduce all local results to a global result for a given query batch
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[in] part_idx Partition index of query batch
@param[in] processed_in_part Number of queries already processed in part (serves as offset)
@param[in] batch_size Batch size
*/
template <typename in_t,
typename ind_t,
typename dist_t,
typename out_t,
typename trans_t = int64_t>
void reduce(opg_knn_param<in_t, ind_t, dist_t, out_t>& params,
opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
int part_idx,
size_t processed_in_part,
size_t batch_size)
{
rmm::device_uvector<trans_t> trans(work.idxRanks.size(), handle.get_stream());
RAFT_CUDA_TRY(
cudaMemsetAsync(trans.data(), 0, work.idxRanks.size() * sizeof(trans_t), handle.get_stream()));
size_t batch_offset = processed_in_part * params.k;
ind_t* indices = nullptr;
dist_t* distances = nullptr;
rmm::device_uvector<ind_t> indices_b(0, handle.get_stream());
rmm::device_uvector<dist_t> distances_b(0, handle.get_stream());
if (params.knn_op == knn_operation::knn) {
indices = params.out_I->at(part_idx)->ptr + batch_offset;
distances = params.out_D->at(part_idx)->ptr + batch_offset;
} else {
indices_b.resize(batch_size * params.k, handle.get_stream());
distances_b.resize(batch_size * params.k, handle.get_stream());
indices = indices_b.data();
distances = distances_b.data();
}
// Merge all KNN local results
raft::spatial::knn::knn_merge_parts(work.res_D.data(),
work.res_I.data(),
distances,
indices,
batch_size,
work.idxRanks.size(),
params.k,
handle.get_stream(),
trans.data());
handle.sync_stream(handle.get_stream());
RAFT_CUDA_TRY(cudaPeekAtLastError());
if (params.knn_op != knn_operation::knn) {
rmm::device_uvector<out_t> merged_outputs_b(params.n_outputs * batch_size * params.k,
handle.get_stream());
// Get the right labels for indices obtained after local KNN searches
merge_labels(params,
work,
handle,
merged_outputs_b.data(),
indices,
work.res.data(),
work.res_I.data(),
batch_size);
out_t* outputs = nullptr;
std::vector<float*> probas_with_offsets;
if (params.knn_op != knn_operation::class_proba) {
outputs = params.out->at(part_idx)->ptr + (processed_in_part * params.n_outputs);
} else {
std::vector<float*>& probas_part = params.probas->at(part_idx);
for (std::size_t i = 0; i < params.n_outputs; i++) {
float* ptr = probas_part[i];
int n_unique_classes = params.n_unique->at(i);
probas_with_offsets.push_back(ptr + (processed_in_part * n_unique_classes));
}
}
// Perform final classification, regression or class-proba operation
perform_local_operation(
params, work, handle, outputs, probas_with_offsets, merged_outputs_b.data(), batch_size);
handle.sync_stream(handle.get_stream());
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/**
* This function copies the labels associated to the merged indices
* from the unmerged to a merged (n_ranks times smaller) array of labels
* @param[out] outputs merged labels
* @param[in] knn_indices merged indices
* @param[in] unmerged_outputs unmerged labels
* @param[in] unmerged_knn_indices unmerged indices
* @param[in] offsets array splitting the partitions making it possible
* to identify the origin partition of an nearest neighbor index
* @param[in] parts_to_ranks get rank index from index partition index,
* informative to find positions as the unmerged arrays are built
* so that ranks are in order (unlike partitions)
* @param[in] nearest_neighbors number of nearest neighbors to look for in query
* @param[in] n_outputs number of targets
* @param[in] n_labels number of labels to write (batch_size * n_outputs)
* @param[in] n_parts number of index partitions
* @param[in] n_ranks number of index ranks
*/
template <int TPB_X, typename dist_t, typename out_t>
__global__ void merge_labels_kernel(out_t* outputs,
dist_t* knn_indices,
out_t* unmerged_outputs,
dist_t* unmerged_knn_indices,
size_t* offsets,
int* parts_to_ranks,
int nearest_neighbors,
int n_outputs,
int n_labels,
int n_parts,
int n_ranks)
{
uint64_t i = (blockIdx.x * TPB_X) + threadIdx.x;
if (i >= n_labels) return;
uint64_t nn_idx = knn_indices[i];
int part_idx = 0;
for (; part_idx < n_parts && nn_idx >= offsets[part_idx]; part_idx++) {}
part_idx = std::min(std::max(0, part_idx - 1), n_parts - 1);
int rank_idx = parts_to_ranks[part_idx];
int inbatch_idx = i / nearest_neighbors;
uint64_t elm_idx = (rank_idx * n_labels) + inbatch_idx * nearest_neighbors;
for (int k = 0; k < nearest_neighbors; k++) {
if (nn_idx == unmerged_knn_indices[elm_idx + k]) {
for (int o = 0; o < n_outputs; o++) {
outputs[(o * n_labels) + i] = unmerged_outputs[(o * n_ranks * n_labels) + elm_idx + k];
}
return;
}
}
}
/*!
Get the right labels for indices obtained after local KNN searches
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[out] output KNN outputs output array
@param[out] knn_indices KNN class-probas output array (class-proba only)
@param[in] unmerged_outputs KNN labels input array
@param[in] unmerged_knn_indices Batch size
@param[in] batch_size Batch size
*/
template <typename opg_knn_param_t, typename opg_knn_work_t, typename ind_t, typename out_t>
void merge_labels(opg_knn_param_t& params,
opg_knn_work_t& work,
raft::handle_t& handle,
out_t* output,
ind_t* knn_indices,
out_t* unmerged_outputs,
ind_t* unmerged_knn_indices,
int batch_size)
{
const int TPB_X = 256;
int n_labels = batch_size * params.k;
dim3 grid(raft::ceildiv(n_labels, TPB_X));
dim3 blk(TPB_X);
int offset = 0;
std::vector<uint64_t> offsets_h;
for (auto& rsp : work.idxPartsToRanks) {
offsets_h.push_back(offset);
offset += rsp->size;
}
rmm::device_uvector<uint64_t> offsets_d(offsets_h.size(), handle.get_stream());
raft::update_device(offsets_d.data(), offsets_h.data(), offsets_h.size(), handle.get_stream());
std::vector<int> parts_to_ranks_h;
for (auto& rsp : work.idxPartsToRanks) {
int i = 0;
for (int rank : work.idxRanks) {
if (rank == rsp->rank) { parts_to_ranks_h.push_back(i); }
++i;
}
}
rmm::device_uvector<int> parts_to_ranks_d(parts_to_ranks_h.size(), handle.get_stream());
raft::update_device(
parts_to_ranks_d.data(), parts_to_ranks_h.data(), parts_to_ranks_h.size(), handle.get_stream());
merge_labels_kernel<TPB_X><<<grid, blk, 0, handle.get_stream()>>>(output,
knn_indices,
unmerged_outputs,
unmerged_knn_indices,
offsets_d.data(),
parts_to_ranks_d.data(),
params.k,
params.n_outputs,
n_labels,
work.idxPartsToRanks.size(),
work.idxRanks.size());
}
/*!
Perform final classification, regression or class-proba operation for a given query batch
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[out] outputs KNN outputs output array
@param[out] probas_with_offsets KNN class-probas output array (class-proba only)
@param[in] labels KNN labels input array
@param[in] batch_size Batch size
*/
template <typename in_t,
typename ind_t,
typename dist_t,
typename out_t,
typename std::enable_if<std::is_floating_point<out_t>::value>::type* = nullptr>
void perform_local_operation(opg_knn_param<in_t, ind_t, dist_t, out_t>& params,
opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
out_t* outputs,
std::vector<float*>& probas_with_offsets,
out_t* labels,
size_t batch_size)
{
size_t n_labels = batch_size * params.k;
std::vector<out_t*> y(params.n_outputs);
for (std::size_t o = 0; o < params.n_outputs; o++) {
y[o] = reinterpret_cast<out_t*>(labels) + (o * n_labels);
}
MLCommon::Selection::knn_regress<float, 32, true>(
handle, outputs, nullptr, y, n_labels, batch_size, params.k);
}
/*!
Perform final classification, regression or class-proba operation for a given query batch
@param[in] params Parameters for distributed KNN operation
@param[in] work Current work for distributed KNN
@param[in] handle RAFT handle
@param[out] outputs KNN outputs output array
@param[out] probas_with_offsets KNN class-probas output array (class-proba only)
@param[in] labels KNN labels input array
@param[in] batch_size Batch size
*/
template <typename in_t,
typename ind_t,
typename dist_t,
typename out_t,
typename std::enable_if<std::is_integral<out_t>::value>::type* = nullptr>
void perform_local_operation(opg_knn_param<in_t, ind_t, dist_t, out_t>& params,
opg_knn_work<in_t, ind_t, dist_t, out_t>& work,
raft::handle_t& handle,
out_t* outputs,
std::vector<float*>& probas_with_offsets,
out_t* labels,
size_t batch_size)
{
size_t n_labels = batch_size * params.k;
std::vector<out_t*> y(params.n_outputs);
for (std::size_t o = 0; o < params.n_outputs; o++) {
y[o] = reinterpret_cast<out_t*>(labels) + (o * n_labels);
}
switch (params.knn_op) {
case knn_operation::classification:
MLCommon::Selection::knn_classify<32, true>(handle,
outputs,
nullptr,
y,
n_labels,
batch_size,
params.k,
*(params.uniq_labels),
*(params.n_unique));
break;
case knn_operation::class_proba:
MLCommon::Selection::class_probs<32, true>(handle,
probas_with_offsets,
nullptr,
y,
n_labels,
batch_size,
params.k,
*(params.uniq_labels),
*(params.n_unique));
break;
default: CUML_LOG_DEBUG("FAILURE!");
}
}
}; // namespace knn_common
}; // namespace opg
}; // namespace KNN
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn_regress_mg.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_opg_common.cuh"
namespace ML {
namespace KNN {
namespace opg {
using namespace knn_common;
template struct KNN_RE_params<float, int64_t, float, float>;
void knn_regress(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>* out,
std::vector<Matrix::floatData_t*>& idx_data,
Matrix::PartDescriptor& idx_desc,
std::vector<Matrix::floatData_t*>& query_data,
Matrix::PartDescriptor& query_desc,
std::vector<std::vector<float*>>& y,
bool rowMajorIndex,
bool rowMajorQuery,
int k,
int n_outputs,
size_t batch_size,
bool verbose)
{
KNN_RE_params<float, int64_t, float, float> params(knn_operation::regression,
&idx_data,
&idx_desc,
&query_data,
&query_desc,
rowMajorIndex,
rowMajorQuery,
k,
batch_size,
verbose,
n_outputs,
&y,
out);
opg_knn(params, handle);
}
}; // namespace opg
}; // namespace KNN
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn_mg.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_opg_common.cuh"
namespace ML {
namespace KNN {
namespace opg {
using namespace knn_common;
template struct KNN_params<float, int64_t, float, int>;
void knn(raft::handle_t& handle,
std::vector<Matrix::Data<int64_t>*>* out_I,
std::vector<Matrix::floatData_t*>* out_D,
std::vector<Matrix::floatData_t*>& idx_data,
Matrix::PartDescriptor& idx_desc,
std::vector<Matrix::floatData_t*>& query_data,
Matrix::PartDescriptor& query_desc,
bool rowMajorIndex,
bool rowMajorQuery,
int k,
size_t batch_size,
bool verbose)
{
KNN_params<float, int64_t, float, int> params(knn_operation::knn,
&idx_data,
&idx_desc,
&query_data,
&query_desc,
rowMajorIndex,
rowMajorQuery,
k,
batch_size,
verbose,
out_D,
out_I);
opg_knn(params, handle);
}
}; // namespace opg
}; // namespace KNN
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn_sparse.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/neighbors/knn_sparse.hpp>
#include <raft/core/handle.hpp>
#include <raft/sparse/selection/knn.cuh>
namespace ML {
namespace Sparse {
void brute_force_knn(raft::handle_t& handle,
const int* idx_indptr,
const int* idx_indices,
const float* idx_data,
size_t idx_nnz,
int n_idx_rows,
int n_idx_cols,
const int* query_indptr,
const int* query_indices,
const float* query_data,
size_t query_nnz,
int n_query_rows,
int n_query_cols,
int* output_indices,
float* output_dists,
int k,
size_t batch_size_index, // approx 1M
size_t batch_size_query,
raft::distance::DistanceType metric,
float metricArg)
{
raft::sparse::selection::brute_force_knn(idx_indptr,
idx_indices,
idx_data,
idx_nnz,
n_idx_rows,
n_idx_cols,
query_indptr,
query_indices,
query_data,
query_nnz,
n_query_rows,
n_query_cols,
output_indices,
output_dists,
k,
handle,
batch_size_index,
batch_size_query,
metric,
metricArg);
}
}; // namespace Sparse
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn_classify_mg.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_opg_common.cuh"
namespace ML {
namespace KNN {
namespace opg {
using namespace knn_common;
template struct KNN_CL_params<float, int64_t, float, int>;
void knn_classify(raft::handle_t& handle,
std::vector<Matrix::Data<int>*>* out,
std::vector<std::vector<float*>>* probas,
std::vector<Matrix::floatData_t*>& idx_data,
Matrix::PartDescriptor& idx_desc,
std::vector<Matrix::floatData_t*>& query_data,
Matrix::PartDescriptor& query_desc,
std::vector<std::vector<int*>>& y,
std::vector<int*>& uniq_labels,
std::vector<int>& n_unique,
bool rowMajorIndex,
bool rowMajorQuery,
bool probas_only,
int k,
size_t batch_size,
bool verbose)
{
knn_operation knn_op = probas_only ? knn_operation::class_proba : knn_operation::classification;
KNN_CL_params<float, int64_t, float, int> params(knn_op,
&idx_data,
&idx_desc,
&query_data,
&query_desc,
rowMajorIndex,
rowMajorQuery,
k,
batch_size,
verbose,
n_unique.size(),
&y,
&n_unique,
&uniq_labels,
out,
probas);
opg_knn(params, handle);
}
}; // namespace opg
}; // namespace KNN
}; // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <raft/core/handle.hpp>
#include <raft/label/classlabels.cuh>
#include <raft/spatial/knn/ann.cuh>
#include <raft/spatial/knn/ball_cover.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <rmm/device_uvector.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/neighbors/knn.hpp>
#include <ml_mg_utils.cuh>
#include <selection/knn.cuh>
#include <cstddef>
#include <sstream>
#include <vector>
namespace ML {
void brute_force_knn(const raft::handle_t& handle,
std::vector<float*>& input,
std::vector<int>& sizes,
int D,
float* search_items,
int n,
int64_t* res_I,
float* res_D,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
raft::distance::DistanceType metric,
float metric_arg)
{
ASSERT(input.size() == sizes.size(), "input and sizes vectors must be the same size");
raft::spatial::knn::brute_force_knn<int64_t, float, int>(handle,
input,
sizes,
D,
search_items,
n,
res_I,
res_D,
k,
rowMajorIndex,
rowMajorQuery,
nullptr,
metric,
metric_arg);
}
void rbc_build_index(const raft::handle_t& handle,
raft::spatial::knn::BallCoverIndex<int64_t, float, uint32_t>& index)
{
raft::spatial::knn::rbc_build_index(handle, index);
}
void rbc_knn_query(const raft::handle_t& handle,
raft::spatial::knn::BallCoverIndex<int64_t, float, uint32_t>& index,
uint32_t k,
const float* search_items,
uint32_t n_search_items,
int64_t* out_inds,
float* out_dists)
{
raft::spatial::knn::rbc_knn_query(
handle, index, k, search_items, n_search_items, out_inds, out_dists);
}
void approx_knn_build_index(raft::handle_t& handle,
raft::spatial::knn::knnIndex* index,
raft::spatial::knn::knnIndexParam* params,
raft::distance::DistanceType metric,
float metricArg,
float* index_array,
int n,
int D)
{
raft::spatial::knn::approx_knn_build_index(
handle, index, params, metric, metricArg, index_array, n, D);
}
void approx_knn_search(raft::handle_t& handle,
float* distances,
int64_t* indices,
raft::spatial::knn::knnIndex* index,
int k,
float* query_array,
int n)
{
raft::spatial::knn::approx_knn_search(handle, distances, indices, index, k, query_array, n);
}
void knn_classify(raft::handle_t& handle,
int* out,
int64_t* knn_indices,
std::vector<int*>& y,
size_t n_index_rows,
size_t n_query_rows,
int k)
{
cudaStream_t stream = handle.get_stream();
std::vector<rmm::device_uvector<int>> uniq_labels_v;
std::vector<int*> uniq_labels(y.size());
std::vector<int> n_unique(y.size());
for (std::size_t i = 0; i < y.size(); i++) {
uniq_labels_v.emplace_back(0, stream);
n_unique[i] = raft::label::getUniquelabels(uniq_labels_v.back(), y[i], n_index_rows, stream);
uniq_labels[i] = uniq_labels_v[i].data();
}
MLCommon::Selection::knn_classify(
handle, out, knn_indices, y, n_index_rows, n_query_rows, k, uniq_labels, n_unique);
}
void knn_regress(raft::handle_t& handle,
float* out,
int64_t* knn_indices,
std::vector<float*>& y,
size_t n_index_rows,
size_t n_query_rows,
int k)
{
MLCommon::Selection::knn_regress(handle, out, knn_indices, y, n_index_rows, n_query_rows, k);
}
void knn_class_proba(raft::handle_t& handle,
std::vector<float*>& out,
int64_t* knn_indices,
std::vector<int*>& y,
size_t n_index_rows,
size_t n_query_rows,
int k)
{
cudaStream_t stream = handle.get_stream();
std::vector<rmm::device_uvector<int>> uniq_labels_v;
std::vector<int*> uniq_labels(y.size());
std::vector<int> n_unique(y.size());
for (std::size_t i = 0; i < y.size(); i++) {
uniq_labels_v.emplace_back(0, stream);
n_unique[i] = raft::label::getUniquelabels(uniq_labels_v.back(), y[i], n_index_rows, stream);
uniq_labels[i] = uniq_labels_v[i].data();
}
MLCommon::Selection::class_probs(
handle, out, knn_indices, y, n_index_rows, n_query_rows, k, uniq_labels, n_unique);
}
}; // END NAMESPACE ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/knn/knn_api.cpp | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/neighbors/knn_api.h>
#include <common/cumlHandle.hpp>
#include <cuml/neighbors/knn.hpp>
#include <vector>
extern "C" {
namespace ML {
/**
* @brief Flat C API function to perform a brute force knn on a series of input
* arrays and combine the results into a single output array for indexes and
* distances.
*
* @param[in] handle the cuml handle to use
* @param[in] input an array of pointers to the input arrays
* @param[in] sizes an array of sizes of input arrays
* @param[in] n_params array size of input and sizes
* @param[in] D the dimensionality of the arrays
* @param[in] search_items array of items to search of dimensionality D
* @param[in] n number of rows in search_items
* @param[out] res_I the resulting index array of size n * k
* @param[out] res_D the resulting distance array of size n * k
* @param[in] k the number of nearest neighbors to return
* @param[in] rowMajorIndex is the index array in row major layout?
* @param[in] rowMajorQuery is the query array in row major layout?
* @param[in] metric_type distance metric to use. Specify the metric using the
* integer value of the enum `ML::MetricType`.
* @param[in] metric_arg the value of `p` for Minkowski (l-p) distances. This
* is ignored if the metric_type is not Minkowski.
* @param[in] expanded should lp-based distances be returned in their expanded
* form (e.g., without raising to the 1/p power).
*/
cumlError_t knn_search(const cumlHandle_t handle,
float** input,
int* sizes,
int n_params,
int D,
float* search_items,
int n,
int64_t* res_I,
float* res_D,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
int metric_type,
float metric_arg,
bool expanded)
{
cumlError_t status;
raft::handle_t* handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
raft::distance::DistanceType metric_distance_type =
static_cast<raft::distance::DistanceType>(metric_type);
std::vector<float*> input_vec(n_params);
std::vector<int> sizes_vec(n_params);
for (int i = 0; i < n_params; i++) {
input_vec.push_back(input[i]);
sizes_vec.push_back(sizes[i]);
}
if (status == CUML_SUCCESS) {
try {
ML::brute_force_knn(*handle_ptr,
input_vec,
sizes_vec,
D,
search_items,
n,
res_I,
res_D,
k,
rowMajorIndex,
rowMajorQuery,
metric_distance_type,
metric_arg);
} catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
}; // END NAMESPACE ML
}
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/hierarchy/linkage.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/cluster/linkage.hpp>
#include <raft/cluster/single_linkage.cuh>
#include <raft/core/handle.hpp>
namespace ML {
void single_linkage_pairwise(const raft::handle_t& handle,
const float* X,
size_t m,
size_t n,
raft::cluster::linkage_output<int>* out,
raft::distance::DistanceType metric,
int n_clusters)
{
raft::cluster::single_linkage<int, float, raft::cluster::LinkageDistance::PAIRWISE>(
handle, X, m, n, metric, out, 0, n_clusters);
}
void single_linkage_neighbors(const raft::handle_t& handle,
const float* X,
size_t m,
size_t n,
raft::cluster::linkage_output<int>* out,
raft::distance::DistanceType metric,
int c,
int n_clusters)
{
raft::cluster::single_linkage<int, float, raft::cluster::LinkageDistance::KNN_GRAPH>(
handle, X, m, n, metric, out, c, n_clusters);
}
struct distance_graph_impl_int_float
: public raft::cluster::detail::
distance_graph_impl<raft::cluster::LinkageDistance::PAIRWISE, int, float> {};
struct distance_graph_impl_int_double
: public raft::cluster::detail::
distance_graph_impl<raft::cluster::LinkageDistance::PAIRWISE, int, double> {};
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/svm_api.cpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/svm/svm_api.h>
#include <common/cumlHandle.hpp>
#include <cuml/svm/svc.hpp>
#include <raft/distance/distance_types.hpp>
#include <tuple>
extern "C" {
cumlError_t cumlSpSvcFit(cumlHandle_t handle,
float* input,
int n_rows,
int n_cols,
float* labels,
float C,
float cache_size,
int max_iter,
int nochange_steps,
float tol,
int verbosity,
cumlSvmKernelType kernel,
int degree,
float gamma,
float coef0,
int* n_support,
float* b,
float** dual_coefs,
float** x_support,
int** support_idx,
int* n_classes,
float** unique_labels)
{
ML::SVM::SvmParameter param;
param.C = C;
param.cache_size = cache_size;
param.max_iter = max_iter;
param.nochange_steps = nochange_steps;
param.tol = tol;
param.verbosity = verbosity;
raft::distance::kernels::KernelParams kernel_param;
kernel_param.kernel = (raft::distance::kernels::KernelType)kernel;
kernel_param.degree = degree;
kernel_param.gamma = gamma;
kernel_param.coef0 = coef0;
ML::SVM::SvmModel<float> model;
cumlError_t status;
raft::handle_t* handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
if (status == CUML_SUCCESS) {
try {
ML::SVM::svcFit(*handle_ptr,
input,
n_rows,
n_cols,
labels,
param,
kernel_param,
model,
static_cast<float*>(nullptr));
*n_support = model.n_support;
*b = model.b;
*dual_coefs = model.dual_coefs;
*x_support = model.support_matrix.data;
*support_idx = model.support_idx;
*n_classes = model.n_classes;
*unique_labels = model.unique_labels;
}
// TODO: Implement this
// catch (const MLCommon::Exception& e)
//{
// //log e.what()?
// status = e.getErrorCode();
//}
catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
cumlError_t cumlDpSvcFit(cumlHandle_t handle,
double* input,
int n_rows,
int n_cols,
double* labels,
double C,
double cache_size,
int max_iter,
int nochange_steps,
double tol,
int verbosity,
cumlSvmKernelType kernel,
int degree,
double gamma,
double coef0,
int* n_support,
double* b,
double** dual_coefs,
double** x_support,
int** support_idx,
int* n_classes,
double** unique_labels)
{
ML::SVM::SvmParameter param;
param.C = C;
param.cache_size = cache_size;
param.max_iter = max_iter;
param.nochange_steps = nochange_steps;
param.tol = tol;
param.verbosity = verbosity;
raft::distance::kernels::KernelParams kernel_param;
kernel_param.kernel = (raft::distance::kernels::KernelType)kernel;
kernel_param.degree = degree;
kernel_param.gamma = gamma;
kernel_param.coef0 = coef0;
ML::SVM::SvmModel<double> model;
cumlError_t status;
raft::handle_t* handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
if (status == CUML_SUCCESS) {
try {
ML::SVM::svcFit(*handle_ptr,
input,
n_rows,
n_cols,
labels,
param,
kernel_param,
model,
static_cast<double*>(nullptr));
*n_support = model.n_support;
*b = model.b;
*dual_coefs = model.dual_coefs;
*x_support = model.support_matrix.data;
*support_idx = model.support_idx;
*n_classes = model.n_classes;
*unique_labels = model.unique_labels;
}
// TODO: Implement this
// catch (const MLCommon::Exception& e)
//{
// //log e.what()?
// status = e.getErrorCode();
//}
catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
cumlError_t cumlSpSvcPredict(cumlHandle_t handle,
float* input,
int n_rows,
int n_cols,
cumlSvmKernelType kernel,
int degree,
float gamma,
float coef0,
int n_support,
float b,
float* dual_coefs,
float* x_support,
int n_classes,
float* unique_labels,
float* preds,
float buffer_size,
int predict_class)
{
raft::distance::kernels::KernelParams kernel_param;
kernel_param.kernel = (raft::distance::kernels::KernelType)kernel;
kernel_param.degree = degree;
kernel_param.gamma = gamma;
kernel_param.coef0 = coef0;
ML::SVM::SvmModel<float> model;
model.n_support = n_support;
model.b = b;
model.dual_coefs = dual_coefs;
model.support_matrix = {.data = x_support};
model.support_idx = nullptr;
model.n_classes = n_classes;
model.unique_labels = unique_labels;
cumlError_t status;
raft::handle_t* handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
if (status == CUML_SUCCESS) {
try {
ML::SVM::svcPredict(
*handle_ptr, input, n_rows, n_cols, kernel_param, model, preds, buffer_size, predict_class);
}
// TODO: Implement this
// catch (const MLCommon::Exception& e)
//{
// //log e.what()?
// status = e.getErrorCode();
//}
catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
cumlError_t cumlDpSvcPredict(cumlHandle_t handle,
double* input,
int n_rows,
int n_cols,
cumlSvmKernelType kernel,
int degree,
double gamma,
double coef0,
int n_support,
double b,
double* dual_coefs,
double* x_support,
int n_classes,
double* unique_labels,
double* preds,
double buffer_size,
int predict_class)
{
raft::distance::kernels::KernelParams kernel_param;
kernel_param.kernel = (raft::distance::kernels::KernelType)kernel;
kernel_param.degree = degree;
kernel_param.gamma = gamma;
kernel_param.coef0 = coef0;
ML::SVM::SvmModel<double> model;
model.n_support = n_support;
model.b = b;
model.dual_coefs = dual_coefs;
model.support_matrix = {.data = x_support};
model.support_idx = nullptr;
model.n_classes = n_classes;
model.unique_labels = unique_labels;
cumlError_t status;
raft::handle_t* handle_ptr;
std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle);
if (status == CUML_SUCCESS) {
try {
ML::SVM::svcPredict(
*handle_ptr, input, n_rows, n_cols, kernel_param, model, preds, buffer_size, predict_class);
}
// TODO: Implement this
// catch (const MLCommon::Exception& e)
//{
// //log e.what()?
// status = e.getErrorCode();
//}
catch (...) {
status = CUML_ERROR_UNKNOWN;
}
}
return status;
}
}
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/ws_util.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <limits.h>
#include <raft/util/cuda_utils.cuh>
namespace ML {
namespace SVM {
__global__ void set_unavailable(bool* available, int n_rows, const int* idx, int n_selected)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) { available[idx[tid]] = false; }
}
__global__ void update_priority(int* new_priority,
int n_selected,
const int* new_idx,
int n_ws,
const int* idx,
const int* priority)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) {
int my_new_idx = new_idx[tid];
// The working set size is limited (~1024 elements) so we just loop through it
for (int i = 0; i < n_ws; i++) {
if (idx[i] == my_new_idx) new_priority[tid] = priority[i] + 1;
}
}
}
} // namespace SVM
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/svr_impl.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/** @file svr_impl.cuh
* @brief Implementation of the stateless C++ functions to fit an SVM regressor.
*/
#include <iostream>
#include "kernelcache.cuh"
#include "smosolver.cuh"
#include "svc_impl.cuh"
#include <cublas_v2.h>
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <raft/core/handle.hpp>
#include <raft/distance/kernels.cuh>
#include <raft/linalg/unary_op.cuh>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
namespace ML {
namespace SVM {
template <typename math_t, typename MatrixViewType>
void svrFitX(const raft::handle_t& handle,
MatrixViewType matrix,
int n_rows,
int n_cols,
math_t* y,
const SvmParameter& param,
raft::distance::kernels::KernelParams& kernel_params,
SvmModel<math_t>& model,
const math_t* sample_weight)
{
ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one");
ASSERT(n_rows > 0, "Parameter n_rows: number of rows cannot be less than one");
// KernelCache could use multiple streams, not implemented currently
// See Issue #948.
// ML::detail::streamSyncer _(handle_impl.getImpl());
const raft::handle_t& handle_impl = handle;
cudaStream_t stream = handle_impl.get_stream();
raft::distance::kernels::GramMatrixBase<math_t>* kernel =
raft::distance::kernels::KernelFactory<math_t>::create(kernel_params);
SmoSolver<math_t> smo(handle_impl, param, kernel_params.kernel, kernel);
smo.Solve(matrix,
n_rows,
n_cols,
y,
sample_weight,
&(model.dual_coefs),
&(model.n_support),
&(model.support_matrix),
&(model.support_idx),
&(model.b),
param.max_iter);
model.n_cols = n_cols;
delete kernel;
}
template <typename math_t>
void svrFit(const raft::handle_t& handle,
math_t* X,
int n_rows,
int n_cols,
math_t* y,
const SvmParameter& param,
raft::distance::kernels::KernelParams& kernel_params,
SvmModel<math_t>& model,
const math_t* sample_weight)
{
auto dense_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>(
X, n_rows, n_cols, 0);
svrFitX(handle, dense_view, n_rows, n_cols, y, param, kernel_params, model, sample_weight);
}
template <typename math_t>
void svrFitSparse(const raft::handle_t& handle,
int* indptr,
int* indices,
math_t* data,
int n_rows,
int n_cols,
int nnz,
math_t* y,
const SvmParameter& param,
raft::distance::kernels::KernelParams& kernel_params,
SvmModel<math_t>& model,
const math_t* sample_weight)
{
auto csr_structure_view = raft::make_device_compressed_structure_view<int, int, int>(
indptr, indices, n_rows, n_cols, nnz);
auto csr_matrix_view = raft::make_device_csr_matrix_view(data, csr_structure_view);
svrFitX(handle, csr_matrix_view, n_rows, n_cols, y, param, kernel_params, model, sample_weight);
}
}; // end namespace SVM
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/results.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <limits>
#include <math.h>
#include <memory>
#include "sparse_util.cuh"
#include "ws_util.cuh"
#include <cub/device/device_select.cuh>
#include <cuml/svm/svm_model.h>
#include <raft/core/handle.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/init.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace ML {
namespace SVM {
template <typename math_t, typename Lambda>
__global__ void set_flag(bool* flag, const math_t* alpha, int n, Lambda op)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) flag[tid] = op(alpha[tid]);
}
template <typename math_t, typename MatrixViewType>
class Results {
public:
/*
* Helper class to collect the parameters of the SVC classifier after it is
* fitted using SMO.
*
* @param handle cuML handle implementation
* @param matrix training vectors in matrix format
* @param y target labels (values +/-1), size [n_train]
* @param n_rows number of training vectors
* @param n_cols number of features
* @param C penalty parameter
*/
Results(const raft::handle_t& handle,
MatrixViewType matrix,
int n_rows,
int n_cols,
const math_t* y,
const math_t* C,
SvmType svmType)
: rmm_alloc(rmm::mr::get_current_device_resource()),
stream(handle.get_stream()),
handle(handle),
n_rows(n_rows),
n_cols(n_cols),
matrix(matrix),
y(y),
C(C),
svmType(svmType),
n_train(svmType == EPSILON_SVR ? n_rows * 2 : n_rows),
cub_storage(0, stream),
d_num_selected(stream),
d_val_reduced(stream),
f_idx(n_train, stream),
idx_selected(n_train, stream),
val_selected(n_train, stream),
val_tmp(n_train, stream),
flag(n_train, stream)
{
InitCubBuffers();
raft::linalg::range(f_idx.data(), n_train, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* Collect the parameters found during training.
*
* After fitting, the non-zero dual coefs, the corresponding support vectors,
* and the constant b represent the parameters of the support vector classifier.
*
* On entry the output arrays should not be allocated.
* All output arrays will be allocated on the device.
* Note that b is not an array but a host scalar.
*
* @param [in] alpha dual coefficients, size [n_train]
* @param [in] f optimality indicator vector, size [n_train]
* @param [out] dual_coefs size [n_support]
* @param [out] n_support number of support vectors
* @param [out] idx the original training set indices of the support vectors, size [n_support]
* @param [out] x_support support vector matrix, size [n_support, n_cols]
* @param [out] b scalar constant in the decision function
*/
void Get(const math_t* alpha,
const math_t* f,
math_t** dual_coefs,
int* n_support,
int** idx,
SupportStorage<math_t>* support_matrix,
math_t* b)
{
CombineCoefs(alpha, val_tmp.data());
GetDualCoefs(val_tmp.data(), dual_coefs, n_support);
*b = CalcB(alpha, f, *n_support);
if (*n_support > 0) {
*idx = GetSupportVectorIndices(val_tmp.data(), *n_support);
*support_matrix = CollectSupportVectorMatrix(*idx, *n_support);
} else {
*dual_coefs = nullptr;
*idx = nullptr;
*support_matrix = {};
}
// Make sure that all pending GPU calculations finished before we return
handle.sync_stream(stream);
}
/**
* Collect support vectors into a matrix storage
*
* @param [in] idx indices of support vectors, size [n_support]
* @param [in] n_support number of support vectors
* @return pointer to a newly allocated device buffer that stores the support
* vectors, size [n_suppor*n_cols]
*/
SupportStorage<math_t> CollectSupportVectorMatrix(const int* idx, int n_support)
{
SupportStorage<math_t> support_matrix;
// allow ~1GB dense support matrix
if (isDenseType<MatrixViewType>() ||
((size_t)n_support * n_cols * sizeof(math_t) < (1 << 30))) {
support_matrix.data =
(math_t*)rmm_alloc->allocate(n_support * n_cols * sizeof(math_t), stream);
ML::SVM::extractRows<math_t>(matrix, support_matrix.data, idx, n_support, handle);
} else {
ML::SVM::extractRows<math_t>(matrix,
&(support_matrix.indptr),
&(support_matrix.indices),
&(support_matrix.data),
&(support_matrix.nnz),
idx,
n_support,
handle);
}
return support_matrix;
}
/**
* @brief Combine alpha parameters and labels to get SVM coefficients.
*
* The output coefficients are the values that can be used directly
* to calculate the decision function:
* \f[ f(\bm(x)) = \sum_{i=1}^{n_rows} coef_i K(\bm{x}_i,\bm{x}) + b. \f]
*
* Here coefs includes coefficients with zero value.
*
* For a classifier, \f$ coef_i = y_i * \alpha_i (i \in [0..n-1])\f$,
* For a regressor \f$ coef_i = y_i * \alpha_i + y_{i+n/2} * alpha_{i+n/2},
* (i \in [0..n/2-1]) \f$
*
* @param [in] alpha device array of dual coefficients, size [n_train]
* @param [out] coef device array of SVM coefficients size [n_rows]
*/
void CombineCoefs(const math_t* alpha, math_t* coef)
{
// Calculate dual coefficients = alpha * y
raft::linalg::binaryOp(
coef, alpha, y, n_train, [] __device__(math_t a, math_t y) { return a * y; }, stream);
if (svmType == EPSILON_SVR) {
// for regression the final coefficients are
// coef[0..n-rows-1] = alpha[0..nrows-1] - alpha[nrows..2*n_rows-1]
raft::linalg::add(coef, coef, coef + n_rows, n_rows, stream);
}
}
/** Return non zero dual coefficients.
*
* @param [in] val_tmp device pointer with dual coefficients
* @param [out] dual_coefs device pointer of non-zero dual coefficients,
* unallocated on entry, on exit size [n_support]
* @param [out] n_support number of support vectors
*/
void GetDualCoefs(const math_t* val_tmp, math_t** dual_coefs, int* n_support)
{
// Return only the non-zero coefficients
auto select_op = [] __device__(math_t a) { return 0 != a; };
*n_support = SelectByCoef(val_tmp, n_rows, val_tmp, select_op, val_selected.data());
*dual_coefs = (math_t*)rmm_alloc->allocate(*n_support * sizeof(math_t), stream);
raft::copy(*dual_coefs, val_selected.data(), *n_support, stream);
handle.sync_stream(stream);
}
/**
* Flag support vectors and also collect their indices.
* Support vectors are the vectors where alpha > 0.
*
* @param [in] coef dual coefficients, size [n_rows]
* @param [in] n_support number of support vectors
* @return indices of the support vectors, size [n_support]
*/
int* GetSupportVectorIndices(const math_t* coef, int n_support)
{
auto select_op = [] __device__(math_t a) -> bool { return 0 != a; };
SelectByCoef(coef, n_rows, f_idx.data(), select_op, idx_selected.data());
int* idx = (int*)rmm_alloc->allocate(n_support * sizeof(int), stream);
raft::copy(idx, idx_selected.data(), n_support, stream);
return idx;
}
/**
* Calculate the b constant in the decision function.
*
* @param [in] alpha dual coefficients, size [n_rows]
* @param [in] f optimality indicator vector, size [n_rows]
* @return the value of b
*/
math_t CalcB(const math_t* alpha, const math_t* f, int n_support)
{
if (n_support == 0) {
math_t f_sum;
cub::DeviceReduce::Sum(
cub_storage.data(), cub_bytes, f, d_val_reduced.data(), n_train, stream);
raft::update_host(&f_sum, d_val_reduced.data(), 1, stream);
return -f_sum / n_train;
}
// We know that for an unbound support vector i, the decision function
// (before taking the sign) has value F(x_i) = y_i, where
// F(x_i) = \sum_j y_j \alpha_j K(x_j, x_i) + b, and j runs through all
// support vectors. The constant b can be expressed from these formulas.
// Note that F and f denote different quantities. The lower case f is the
// optimality indicator vector defined as
// f_i = - y_i + \sum_j y_j \alpha_j K(x_j, x_i).
// For unbound support vectors f_i = -b.
// Select f for unbound support vectors (0 < alpha < C)
int n_free = SelectUnboundSV(alpha, n_train, f, val_selected.data());
if (n_free > 0) {
cub::DeviceReduce::Sum(
cub_storage.data(), cub_bytes, val_selected.data(), d_val_reduced.data(), n_free, stream);
math_t sum;
raft::update_host(&sum, d_val_reduced.data(), 1, stream);
return -sum / n_free;
} else {
// All support vectors are bound. Let's define
// b_up = min {f_i | i \in I_upper} and
// b_low = max {f_i | i \in I_lower}
// Any value in the interval [b_low, b_up] would be allowable for b,
// we will select in the middle point b = -(b_low + b_up)/2
math_t b_up = SelectReduce(alpha, f, true, set_upper);
math_t b_low = SelectReduce(alpha, f, false, set_lower);
return -(b_up + b_low) / 2;
}
}
/**
* @brief Select values for unbound support vectors (not bound by C).
* @tparam valType type of values that will be selected
* @param [in] alpha dual coefficients, size [n]
* @param [in] n number of dual coefficients
* @param [in] val values to filter, size [n]
* @param [out] out buffer size [n]
* @return number of selected elements
*/
template <typename valType>
int SelectUnboundSV(const math_t* alpha, int n, const valType* val, valType* out)
{
auto select = [] __device__(math_t a, math_t C) -> bool { return 0 < a && a < C; };
raft::linalg::binaryOp(flag.data(), alpha, C, n, select, stream);
cub::DeviceSelect::Flagged(
cub_storage.data(), cub_bytes, val, flag.data(), out, d_num_selected.data(), n, stream);
int n_selected;
raft::update_host(&n_selected, d_num_selected.data(), 1, stream);
handle.sync_stream(stream);
return n_selected;
}
rmm::mr::device_memory_resource* rmm_alloc;
private:
const raft::handle_t& handle;
cudaStream_t stream;
int n_rows; //!< number of rows in the training vector matrix
int n_cols; //!< number of features
MatrixViewType matrix; //!< training vector matrix
const math_t* y; //!< labels
const math_t* C; //!< penalty parameter
SvmType svmType; //!< SVM problem type: SVC or SVR
int n_train; //!< number of training vectors (including duplicates for SVR)
const int TPB = 256; // threads per block
// Temporary variables used by cub in GetResults
rmm::device_scalar<int> d_num_selected;
rmm::device_scalar<math_t> d_val_reduced;
rmm::device_uvector<char> cub_storage;
size_t cub_bytes = 0;
// Helper arrays for collecting the results
rmm::device_uvector<int> f_idx;
rmm::device_uvector<int> idx_selected;
rmm::device_uvector<math_t> val_selected;
rmm::device_uvector<math_t> val_tmp;
rmm::device_uvector<bool> flag;
/* Allocate cub temporary buffers for GetResults
*/
void InitCubBuffers()
{
size_t cub_bytes2 = 0;
// Query the size of required workspace buffer
math_t* p = nullptr;
cub::DeviceSelect::Flagged(NULL,
cub_bytes,
f_idx.data(),
flag.data(),
f_idx.data(),
d_num_selected.data(),
n_train,
stream);
cub::DeviceSelect::Flagged(
NULL, cub_bytes2, p, flag.data(), p, d_num_selected.data(), n_train, stream);
cub_bytes = max(cub_bytes, cub_bytes2);
cub::DeviceReduce::Sum(
NULL, cub_bytes2, val_selected.data(), d_val_reduced.data(), n_train, stream);
cub_bytes = max(cub_bytes, cub_bytes2);
cub::DeviceReduce::Min(
NULL, cub_bytes2, val_selected.data(), d_val_reduced.data(), n_train, stream);
cub_bytes = max(cub_bytes, cub_bytes2);
cub_storage.resize(cub_bytes, stream);
}
/**
* Filter values based on the corresponding alpha values.
* @tparam select_op lambda selection criteria
* @tparam valType type of values that will be selected
* @param [in] alpha dual coefficients, size [n]
* @param [in] n number of dual coefficients
* @param [in] val values to filter, size [n]
* @param [out] out buffer size [n]
* @return number of selected elements
*/
template <typename select_op, typename valType>
int SelectByCoef(const math_t* coef, int n, const valType* val, select_op op, valType* out)
{
set_flag<<<raft::ceildiv(n, TPB), TPB, 0, stream>>>(flag.data(), coef, n, op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
cub::DeviceSelect::Flagged(
cub_storage.data(), cub_bytes, val, flag.data(), out, d_num_selected.data(), n, stream);
int n_selected;
raft::update_host(&n_selected, d_num_selected.data(), 1, stream);
handle.sync_stream(stream);
return n_selected;
}
/** Select values from f, and do a min or max reduction on them.
* @param [in] alpha dual coefficients, size [n_train]
* @param [in] f optimality indicator vector, size [n_train]
* @param flag_op operation to flag values for selection (set_upper/lower)
* @param return the reduced value.
*/
math_t SelectReduce(const math_t* alpha,
const math_t* f,
bool min,
void (*flag_op)(bool*, int, const math_t*, const math_t*, const math_t*))
{
flag_op<<<raft::ceildiv(n_train, TPB), TPB, 0, stream>>>(flag.data(), n_train, alpha, y, C);
RAFT_CUDA_TRY(cudaPeekAtLastError());
cub::DeviceSelect::Flagged(cub_storage.data(),
cub_bytes,
f,
flag.data(),
val_selected.data(),
d_num_selected.data(),
n_train,
stream);
int n_selected;
raft::update_host(&n_selected, d_num_selected.data(), 1, stream);
handle.sync_stream(stream);
math_t res = 0;
ASSERT(n_selected > 0,
"Incorrect training: cannot calculate the constant in the decision "
"function");
if (min) {
cub::DeviceReduce::Min(cub_storage.data(),
cub_bytes,
val_selected.data(),
d_val_reduced.data(),
n_selected,
stream);
} else {
cub::DeviceReduce::Max(cub_storage.data(),
cub_bytes,
val_selected.data(),
d_val_reduced.data(),
n_selected,
stream);
}
raft::update_host(&res, d_val_reduced.data(), 1, stream);
return res;
}
}; // namespace SVM
}; // namespace SVM
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/smosolver.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/common/logger.hpp>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/gemv.cuh>
#include <raft/linalg/unary_op.cuh>
#include <iostream>
#include <limits>
#include <raft/core/handle.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <string>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <type_traits>
#include "kernelcache.cuh"
#include "smo_sets.cuh"
#include "smoblocksolve.cuh"
#include "workingset.cuh"
#include "ws_util.cuh"
#include <raft/distance/distance_types.hpp>
#include <raft/distance/kernels.cuh>
#include <raft/linalg/gemv.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/sparse/linalg/norm.cuh>
#include "results.cuh"
#include <cassert>
#include <sstream>
#include <string>
#include <chrono>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
namespace ML {
namespace SVM {
/**
* @brief Solve the quadratic optimization problem using two level decomposition
* and Sequential Minimal Optimization (SMO).
*
* The general decomposition idea by Osuna is to choose q examples from all the
* training examples, and solve the QP problem for this subset (discussed in
* section 11.2 by Joachims [1]). SMO is the extreme case where we choose q=2.
*
* Here we follow [2] and [3] and use two level decomposition. First we set
* q_1=1024, and solve the QP sub-problem for that (let's call it QP1). This is
* the outer iteration, implemented in SmoSolver::Solve.
*
* To solve QP1, we use another decomposition, specifically the SMO (q_2 = 2),
* which is implemented in SmoBlockSolve.
*
* References:
* - [1] Joachims, T. Making large-scale support vector machine learning
* practical. In B. Scholkopf, C. Burges, & A. Smola (Eds.), Advances in
* kernel methods: Support vector machines. Cambridge, MA: MIT Press (1998)
* - [2] J. Vanek et al. A GPU-Architecture Optimized Hierarchical Decomposition
* Algorithm for Support VectorMachine Training, IEEE Transactions on
* Parallel and Distributed Systems, vol 28, no 12, 3330, (2017)
* - [3] Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs, Journal
* of Machine Learning Research, 19, 1-5 (2018)
*/
template <typename math_t>
class SmoSolver {
public:
SmoSolver(const raft::handle_t& handle,
SvmParameter param,
raft::distance::kernels::KernelType kernel_type,
raft::distance::kernels::GramMatrixBase<math_t>* kernel)
: handle(handle),
C(param.C),
tol(param.tol),
kernel(kernel),
kernel_type(kernel_type),
cache_size(param.cache_size),
nochange_steps(param.nochange_steps),
epsilon(param.epsilon),
svmType(param.svmType),
stream(handle.get_stream()),
return_buff(2, stream),
alpha(0, stream),
C_vec(0, stream),
delta_alpha(0, stream),
f(0, stream),
y_label(0, stream)
{
ML::Logger::get().setLevel(param.verbosity);
}
void GetNonzeroDeltaAlpha(const math_t* vec,
int n_ws,
const int* idx,
math_t* nz_vec,
int* n_nz,
int* nz_idx,
cudaStream_t stream)
{
thrust::device_ptr<math_t> vec_ptr(const_cast<math_t*>(vec));
thrust::device_ptr<math_t> nz_vec_ptr(nz_vec);
thrust::device_ptr<int> idx_ptr(const_cast<int*>(idx));
thrust::device_ptr<int> nz_idx_ptr(nz_idx);
auto nonzero = [] __device__(math_t a) { return a != 0; };
thrust::device_ptr<int> nz_end = thrust::copy_if(
thrust::cuda::par.on(stream), idx_ptr, idx_ptr + n_ws, vec_ptr, nz_idx_ptr, nonzero);
*n_nz = nz_end - nz_idx_ptr;
thrust::copy_if(thrust::cuda::par.on(stream), vec_ptr, vec_ptr + n_ws, nz_vec_ptr, nonzero);
}
#define SMO_WS_SIZE 1024
/**
* @brief Solve the quadratic optimization problem.
*
* The output arrays (dual_coefs, support_matrix, idx) will be allocated on the
* device, they should be unallocated on entry.
*
* @param [in] matrix training vectors in matrix format(MLCommon::Matrix::Matrix),
* size [n_rows x * n_cols]
* @param [in] n_rows number of rows (training vectors)
* @param [in] n_cols number of columns (features)
* @param [in] y labels (values +/-1), size [n_rows]
* @param [in] sample_weight device array of sample weights (or nullptr if not
* applicable)
* @param [out] dual_coefs size [n_support] on exit
* @param [out] n_support number of support vectors
* @param [out] support_matrix support vectors in matrix format, size [n_support, n_cols]
* @param [out] idx the original training set indices of the support vectors, size [n_support]
* @param [out] b scalar constant for the decision function
* @param [in] max_outer_iter maximum number of outer iteration (default 100 * n_rows)
* @param [in] max_inner_iter maximum number of inner iterations (default 10000)
*/
template <typename MatrixViewType>
void Solve(MatrixViewType matrix,
int n_rows,
int n_cols,
math_t* y,
const math_t* sample_weight,
math_t** dual_coefs,
int* n_support,
SupportStorage<math_t>* support_matrix,
int** idx,
math_t* b,
int max_outer_iter = -1,
int max_inner_iter = 10000)
{
// Prepare data structures for SMO
WorkingSet<math_t> ws(handle, stream, n_rows, SMO_WS_SIZE, svmType);
n_ws = ws.GetSize();
Initialize(&y, sample_weight, n_rows, n_cols);
KernelCache<math_t, MatrixViewType> cache(
handle, matrix, n_rows, n_cols, n_ws, kernel, kernel_type, cache_size, svmType);
// Init counters
max_outer_iter = GetDefaultMaxIter(n_train, max_outer_iter);
n_iter = 0;
int n_inner_iter = 0;
diff_prev = 0;
n_small_diff = 0;
n_increased_diff = 0;
report_increased_diff = true;
bool keep_going = true;
rmm::device_uvector<math_t> nz_da(n_ws, stream);
rmm::device_uvector<int> nz_da_idx(n_ws, stream);
while (n_iter < max_outer_iter && keep_going) {
RAFT_CUDA_TRY(cudaMemsetAsync(delta_alpha.data(), 0, n_ws * sizeof(math_t), stream));
raft::common::nvtx::push_range("SmoSolver::ws_select");
ws.Select(f.data(), alpha.data(), y, C_vec.data());
raft::common::nvtx::pop_range();
RAFT_CUDA_TRY(cudaPeekAtLastError());
raft::common::nvtx::push_range("SmoSolver::Kernel");
cache.InitWorkingSet(ws.GetIndices());
math_t* cacheTile = cache.getSquareTileWithoutCaching();
raft::common::nvtx::pop_range();
raft::common::nvtx::push_range("SmoSolver::SmoBlockSolve");
SmoBlockSolve<math_t, SMO_WS_SIZE><<<1, n_ws, 0, stream>>>(y,
n_train,
alpha.data(),
n_ws,
delta_alpha.data(),
f.data(),
cacheTile,
cache.getKernelIndices(true),
C_vec.data(),
tol,
return_buff.data(),
max_inner_iter,
svmType);
RAFT_CUDA_TRY(cudaPeekAtLastError());
raft::update_host(host_return_buff, return_buff.data(), 2, stream);
raft::common::nvtx::pop_range();
raft::common::nvtx::push_range("SmoSolver::UpdateF");
raft::common::nvtx::push_range("SmoSolver::UpdateF::getNnzDaRows");
int nnz_da;
GetNonzeroDeltaAlpha(delta_alpha.data(),
n_ws,
cache.getKernelIndices(false),
nz_da.data(),
&nnz_da,
nz_da_idx.data(),
stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
// The following should be performed only for elements with nonzero delta_alpha
if (nnz_da > 0) {
auto batch_descriptor = cache.InitFullTileBatching(nz_da_idx.data(), nnz_da);
while (cache.getNextBatchKernel(batch_descriptor)) {
raft::common::nvtx::pop_range();
raft::common::nvtx::push_range("SmoSolver::UpdateF::updateBatch");
// do (partial) update
UpdateF(f.data() + batch_descriptor.offset,
batch_descriptor.batch_size,
nz_da.data(),
nnz_da,
batch_descriptor.kernel_data);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
handle.sync_stream(stream);
raft::common::nvtx::pop_range();
raft::common::nvtx::pop_range(); // ("SmoSolver::UpdateF");
math_t diff = host_return_buff[0];
keep_going = CheckStoppingCondition(diff);
n_inner_iter += host_return_buff[1];
n_iter++;
if (n_iter % 500 == 0) { CUML_LOG_DEBUG("SMO iteration %d, diff %lf", n_iter, (double)diff); }
}
CUML_LOG_DEBUG(
"SMO solver finished after %d outer iterations, total inner %d"
" iterations, and diff %lf",
n_iter,
n_inner_iter,
diff_prev);
Results<math_t, MatrixViewType> res(handle, matrix, n_rows, n_cols, y, C_vec.data(), svmType);
res.Get(alpha.data(), f.data(), dual_coefs, n_support, idx, support_matrix, b);
ReleaseBuffers();
}
/**
* @brief Update the f vector after a block solve step.
*
* \f[ f_i = f_i + \sum_{k\in WS} K_{i,k} * \Delta \alpha_k, \f]
* where i = [0..n_train-1], WS is the set of workspace indices,
* and \f$K_{i,k}\f$ is the kernel function evaluated for training vector x_i and workspace vector
* x_k.
*
* @param f size [n_train]
* @param n_rows
* @param delta_alpha size [n_ws]
* @param n_ws
* @param cacheTile kernel function evaluated for the following set K[X,x_ws],
* size [n_rows, n_ws]
*/
void UpdateF(math_t* f, int n_rows, const math_t* delta_alpha, int n_ws, const math_t* cacheTile)
{
// multipliers used in the equation : f = 1*cachtile * delta_alpha + 1*f
math_t one = 1;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_N,
n_rows,
n_ws,
&one,
cacheTile,
n_rows,
delta_alpha,
1,
&one,
f,
1,
stream));
if (svmType == EPSILON_SVR) {
// SVR has doubled the number of training vectors and we need to update
// alpha for both batches individually
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(handle.get_cublas_handle(),
CUBLAS_OP_N,
n_rows,
n_ws,
&one,
cacheTile,
n_rows,
delta_alpha,
1,
&one,
f + n_rows,
1,
stream));
}
}
/** @brief Initialize the problem to solve.
*
* Both SVC and SVR are solved as a classification problem.
* The optimization target (W) does not appear directly in the SMO
* formulation, only its derivative through f (optimality indicator vector):
* \f[ f_i = y_i \frac{\partial W }{\partial \alpha_i}. \f]
*
* The f_i values are initialized here, and updated at every solver iteration
* when alpha changes. The update step is the same for SVC and SVR, only the
* init step differs.
*
* Additionally, we zero init the dual coefficients (alpha), and initialize
* class labels for SVR.
*
* @param[inout] y on entry class labels or target values,
* on exit device pointer to class labels
* @param[in] sample_weight sample weights (can be nullptr, otherwise device
* array of size [n_rows])
* @param[in] n_rows
* @param[in] n_cols
*/
void Initialize(math_t** y, const math_t* sample_weight, int n_rows, int n_cols)
{
this->n_rows = n_rows;
this->n_cols = n_cols;
n_train = (svmType == EPSILON_SVR) ? n_rows * 2 : n_rows;
ResizeBuffers(n_train, n_cols);
// Zero init alpha
RAFT_CUDA_TRY(cudaMemsetAsync(alpha.data(), 0, n_train * sizeof(math_t), stream));
InitPenalty(C_vec.data(), sample_weight, n_rows);
// Init f (and also class labels for SVR)
switch (svmType) {
case C_SVC: SvcInit(*y); break;
case EPSILON_SVR:
SvrInit(*y, n_rows, y_label.data(), f.data());
// We return the pointer to the class labels (the target values are
// not needed anymore, they are incorporated in f).
*y = y_label.data();
break;
default: THROW("SMO initialization not implemented SvmType=%d", svmType);
}
}
void InitPenalty(math_t* C_vec, const math_t* sample_weight, int n_rows)
{
if (sample_weight == nullptr) {
thrust::device_ptr<math_t> c_ptr(C_vec);
thrust::fill(thrust::cuda::par.on(stream), c_ptr, c_ptr + n_train, C);
} else {
math_t C = this->C;
raft::linalg::unaryOp(
C_vec, sample_weight, n_rows, [C] __device__(math_t w) { return C * w; }, stream);
if (n_train > n_rows) {
// Set the same penalty parameter for the duplicate set of vectors
raft::linalg::unaryOp(
C_vec + n_rows,
sample_weight,
n_rows,
[C] __device__(math_t w) { return C * w; },
stream);
}
}
}
/** @brief Initialize Support Vector Classification
*
* We would like to maximize the following quantity
* \f[ W(\mathbf{\alpha}) = -\mathbf{\alpha}^T \mathbf{1}
* + \frac{1}{2} \mathbf{\alpha}^T Q \mathbf{\alpha}, \f]
*
* We initialize f as:
* \f[ f_i = y_i \frac{\partial W(\mathbf{\alpha})}{\partial \alpha_i} =
* -y_i + y_j \alpha_j K(\mathbf{x}_i, \mathbf{x}_j) \f]
*
* @param [in] y device pointer of class labels size [n_rows]
*/
void SvcInit(const math_t* y)
{
raft::linalg::unaryOp(
f.data(), y, n_rows, [] __device__(math_t y) { return -y; }, stream);
}
/**
* @brief Initializes the solver for epsilon-SVR.
*
* For regression we are optimizing the following quantity
* \f[
* W(\alpha^+, \alpha^-) =
* \epsilon \sum_{i=1}^l (\alpha_i^+ + \alpha_i^-)
* - \sum_{i=1}^l yc_i (\alpha_i^+ - \alpha_i^-)
* + \frac{1}{2} \sum_{i,j=1}^l
* (\alpha_i^+ - \alpha_i^-)(\alpha_j^+ - \alpha_j^-) K(\bm{x}_i, \bm{x}_j)
* \f]
*
* Then \f$ f_i = y_i \frac{\partial W(\alpha}{\partial \alpha_i} \f$
* \f$ = yc_i*epsilon - yr_i \f$
*
* Additionally we set class labels for the training vectors.
*
* References:
* [1] B. Schölkopf et. al (1998): New support vector algorithms,
* NeuroCOLT2 Technical Report Series, NC2-TR-1998-031, Section 6
* [2] A.J. Smola, B. Schölkopf (2004): A tutorial on support vector
* regression, Statistics and Computing 14, 199–222
* [3] Orchel M. (2011) Support Vector Regression as a Classification Problem
* with a Priori Knowledge in the Form of Detractors,
* Man-Machine Interactions 2. Advances in Intelligent and Soft Computing,
* vol 103
*
* @param [in] yr device pointer with values for regression, size [n_rows]
* @param [in] n_rows
* @param [out] yc device pointer to classes associated to the dual
* coefficients, size [n_rows*2]
* @param [out] f device pointer f size [n_rows*2]
*/
void SvrInit(const math_t* yr, int n_rows, math_t* yc, math_t* f)
{
// Init class labels to [1, 1, 1, ..., -1, -1, -1, ...]
thrust::device_ptr<math_t> yc_ptr(yc);
thrust::constant_iterator<math_t> one(1);
thrust::copy(thrust::cuda::par.on(stream), one, one + n_rows, yc_ptr);
thrust::constant_iterator<math_t> minus_one(-1);
thrust::copy(thrust::cuda::par.on(stream), minus_one, minus_one + n_rows, yc_ptr + n_rows);
// f_i = epsilon - y_i, for i \in [0..n_rows-1]
math_t epsilon = this->epsilon;
raft::linalg::unaryOp(
f, yr, n_rows, [epsilon] __device__(math_t y) { return epsilon - y; }, stream);
// f_i = -epsilon - y_i, for i \in [n_rows..2*n_rows-1]
raft::linalg::unaryOp(
f + n_rows, yr, n_rows, [epsilon] __device__(math_t y) { return -epsilon - y; }, stream);
}
private:
const raft::handle_t& handle;
cudaStream_t stream;
int n_rows = 0; //!< training data number of rows
int n_cols = 0; //!< training data number of columns
int n_ws = 0; //!< size of the working set
int n_train = 0; //!< number of training vectors (including duplicates for SVR)
// Buffers for the domain [n_train]
rmm::device_uvector<math_t> alpha; //!< dual coordinates
rmm::device_uvector<math_t> f; //!< optimality indicator vector
rmm::device_uvector<math_t> y_label; //!< extra label for regression
rmm::device_uvector<math_t> C_vec; //!< penalty parameter vector
// Buffers for the working set [n_ws]
//! change in alpha parameter during a blocksolve step
rmm::device_uvector<math_t> delta_alpha;
// Buffers to return some parameters from the kernel (iteration number, and
// convergence information)
rmm::device_uvector<math_t> return_buff;
math_t host_return_buff[2];
math_t C;
math_t tol; //!< tolerance for stopping condition
math_t epsilon; //!< epsilon parameter for epsiolon-SVR
raft::distance::kernels::GramMatrixBase<math_t>* kernel;
raft::distance::kernels::KernelType kernel_type;
float cache_size; //!< size of kernel cache in MiB
SvmType svmType; ///!< Type of the SVM problem to solve
// Variables to track convergence of training
math_t diff_prev;
int n_small_diff;
int nochange_steps;
int n_increased_diff;
int n_iter;
bool report_increased_diff;
bool CheckStoppingCondition(math_t diff)
{
if (diff > diff_prev * 1.5 && n_iter > 0) {
// Ideally, diff should decrease monotonically. In practice we can have
// small fluctuations (10% increase is not uncommon). Here we consider a
// 50% increase in the diff value large enough to indicate a problem.
// The 50% value is an educated guess that triggers the convergence debug
// message for problematic use cases while avoids false alarms in many
// other cases.
n_increased_diff++;
}
if (report_increased_diff && n_iter > 100 && n_increased_diff > n_iter * 0.1) {
CUML_LOG_DEBUG(
"Solver is not converging monotonically. This might be caused by "
"insufficient normalization of the feature columns. In that case "
"MinMaxScaler((0,1)) could help. Alternatively, for nonlinear kernels, "
"you can try to increase the gamma parameter. To limit execution time, "
"you can also adjust the number of iterations using the max_iter "
"parameter.");
report_increased_diff = false;
}
bool keep_going = true;
if (abs(diff - diff_prev) < 0.001 * tol) {
n_small_diff++;
} else {
diff_prev = diff;
n_small_diff = 0;
}
if (n_small_diff > nochange_steps) {
CUML_LOG_ERROR(
"SMO error: Stopping due to unchanged diff over %d"
" consecutive steps",
nochange_steps);
keep_going = false;
}
if (diff < tol) keep_going = false;
if (isnan(diff)) {
std::string txt;
if (std::is_same<float, math_t>::value) {
txt +=
" This might be caused by floating point overflow. In such case using"
" fp64 could help. Alternatively, try gamma='scale' kernel"
" parameter.";
}
THROW("SMO error: NaN found during fitting.%s", txt.c_str());
}
return keep_going;
}
/// Return the number of maximum iterations.
int GetDefaultMaxIter(int n_train, int max_outer_iter)
{
if (max_outer_iter == -1) {
max_outer_iter = n_train < std::numeric_limits<int>::max() / 100
? n_train * 100
: std::numeric_limits<int>::max();
max_outer_iter = max(100000, max_outer_iter);
}
// else we have user defined iteration count which we do not change
return max_outer_iter;
}
void ResizeBuffers(int n_train, int n_cols)
{
// This needs to know n_train, therefore it can be only called during solve
alpha.resize(n_train, stream);
C_vec.resize(n_train, stream);
f.resize(n_train, stream);
delta_alpha.resize(n_ws, stream);
if (svmType == EPSILON_SVR) y_label.resize(n_train, stream);
}
void ReleaseBuffers()
{
alpha.release();
delta_alpha.release();
f.release();
y_label.release();
}
};
}; // end namespace SVM
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/kernelcache.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "sparse_util.cuh"
#include <cuml/svm/svm_parameter.h>
#include <raft/core/handle.hpp>
#include <raft/distance/kernels.cuh>
#include <raft/linalg/init.cuh>
#include <raft/util/cache.cuh>
#include <raft/util/cache_util.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reverse.h>
#include <cuml/common/logger.hpp>
#include <cub/cub.cuh>
#include <algorithm>
#include <cstddef>
namespace ML {
namespace SVM {
namespace { // unnamed namespace to avoid multiple definition error
/**
* @brief Re-raise working set indexes to SVR scope [0..2*n_rows)
*
* On exit, out is the permutation of n_ws such that out[k]%n_rows == n_ws_perm[k]
* In case n_ws_perm contains duplicates they are considered to
* represent the subspace [0..n_rows) first and [n_rows..2*n_rows) second
*
* @param [in] ws array with working set indices, size [n_ws]
* @param [in] n_ws number of elements in the working set
* @param [in] n_rows number of rows in the original problem
* @param [in] n_ws_perm array with indices of vectors in the working set, size [n_ws]
* @param [out] out array with workspace idx to column idx mapping, size [n_ws]
*/
__global__ void mapColumnIndicesToSVRSpace(
const int* ws, int n_ws, int n_rows, const int* n_ws_perm, int* out)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_ws) {
int wsx = ws[tid];
int idx = wsx % n_rows;
bool is_upper = idx < wsx;
int k = -1;
// we have only max 1024 elements, we do a linear search
for (int i = 0; i < n_ws; i++) {
if (n_ws_perm[i] == idx && (is_upper || k < 0)) k = i;
// since the array is derived from ws, the search will always return
// a) the first occurrence k within [0..n_rows)
// b) the last occurrence k within [n_rows..2*n_rows)
}
out[k] = wsx;
}
}
template <typename math_t>
struct select_at_index : public thrust::unary_function<int, math_t> {
const math_t* dot_;
select_at_index(const math_t* dot) : dot_(dot) {}
__device__ math_t operator()(const int& i) const { return dot_[i]; }
};
/**
* @brief Helper class to to allow batch-wise interaction with Cache
*
* Allows for partial row updates with an underlying cache.
* It is assumed that 'AssignAndStoreVecs' operations are always performed
* for all batches starting at batch 0.
*
*/
template <typename math_t>
class BatchCache : public raft::cache::Cache<math_t> {
public:
/**
* @brief BatchCache Constructor
*
* @param [in] n_rows number of elements in a single vector that is stored in a
* cache entry
* @param [in] cache_size in MiB
* @param [out] tmp_buffer temporary buffer of size [2*n_ws]
* @param [in] stream cuda stream
*/
BatchCache(int n_rows, float cache_size, cudaStream_t stream)
: raft::cache::Cache<math_t>(stream, n_rows, cache_size), d_temp_storage(0, stream)
{
}
~BatchCache() {}
/**
* @brief Initialize BatchCache
*
* This will initialize internal tmp structures for cub
*
* @param [in] batch_size_base maximum number of rows in a batch
* @param [in] n_ws array with indices of vectors in the working set, size [n_ws]
* @param [out] tmp_buffer temporary buffer of size [2*n_ws]
* @param [in] stream cuda stream
*/
void Initialize(int batch_size_base, int n_ws, int* tmp_buffer, cudaStream_t stream)
{
this->batch_size_base = batch_size_base;
RAFT_CUDA_TRY(cudaMemsetAsync(tmp_buffer, 0, n_ws * 2 * sizeof(int), stream));
// Init cub buffers
cub::DeviceRadixSort::SortPairs(NULL,
d_temp_storage_size,
tmp_buffer,
tmp_buffer,
tmp_buffer,
tmp_buffer,
n_ws,
0,
sizeof(int) * 8,
stream);
d_temp_storage.resize(d_temp_storage_size, stream);
}
/**
* @brief Prepare sort order of indices
*
* This will reorder the keys w.r.t. the state of the cache in a way that
* - the keys are partitioned in cached, uncached
* - the uncached elements are sorted based on the value returned
* in cache_idx (which refers to the target cache set for uncached)
* This will ensure that neither cache retrieval nor cache updates will
* require additional reordering of keys.
*
* @param [inout] keys key indices to be reordered
* @param [in] n number of keys
* @param [out] cache_idx tmp buffer for cache indices [n]
* @param [out] reorder_buffer tmp buffer for reordering of size [2*n]
* @param [in] stream cuda stream
*/
void PreparePartitionedIdxOrder(
int* keys, int n, int* cache_idx, int* reorder_buffer, cudaStream_t stream)
{
int n_cached = 0;
raft::cache::Cache<math_t>::GetCacheIdxPartitioned(keys, n, cache_idx, &n_cached, stream);
int n_uncached = n - n_cached;
if (n_uncached > 1) {
// we also need to make sure that the next cache assignment
// does not need to rearrange. This way the resulting ws_idx_mod
// keys won't change during the cache update
cub::DeviceRadixSort::SortPairs(d_temp_storage.data(),
d_temp_storage_size,
cache_idx + n_cached,
reorder_buffer,
keys + n_cached,
reorder_buffer + n,
n_uncached,
0,
sizeof(int) * 8,
stream);
// We can skip cache_idx as we are only interested in keys here
raft::copy(keys + n_cached, reorder_buffer + n, n_uncached, stream);
}
}
/**
* @brief Get cache indices for keys
*
* This will return the cache indices for cached keys as well as the
* cache set for uncached keys.
* When this is called with keys coming from 'PreparePartitionedIdxOrder'
* the keys should be unchanged upon return.
*
* @param [in] keys key indices
* @param [in] n number of keys
* @param [out] cache_idx buffer for cache indices [n]
* @param [out] n_cached number of cached keys
* @param [in] stream cuda stream
*/
void GetCacheIdxPartitionedStable(
int* keys, int n, int* cache_idx, int* n_cached, cudaStream_t stream)
{
raft::cache::Cache<math_t>::GetCacheIdxPartitioned(keys, n, cache_idx, n_cached, stream);
int n_uncached = n - *n_cached;
if (n_uncached > 1) {
// reverse the uncached values (due to cub::DevicePartition:Flagged)
thrust::device_ptr<int> keys_v(keys + *n_cached);
thrust::reverse(thrust::cuda::par.on(stream), keys_v, keys_v + n_uncached);
thrust::device_ptr<int> cache_idx_v(cache_idx + *n_cached);
thrust::reverse(thrust::cuda::par.on(stream), cache_idx_v, cache_idx_v + n_uncached);
}
}
/**
* @brief Retrieve cached rows
*
* This will retrieve cached rows at the given positions for a given batch.
*
* @param [in] batch_idx batch id
* @param [in] batch_size batch size
* @param [in] idx indices to be retrieved
* @param [in] n number of indices
* @param [out] out buffer for cache rows, should be at least [n*batch_size]
* @param [in] stream cuda stream
*/
void GetVecs(
int batch_idx, int batch_size, const int* idx, int n, math_t* out, cudaStream_t stream)
{
if (n > 0) {
size_t offset = raft::cache::Cache<math_t>::GetSize() * batch_size_base * batch_idx;
rmm::device_uvector<math_t>& cache = raft::cache::Cache<math_t>::cache;
raft::cache::get_vecs<<<raft::ceildiv(n * batch_size, TPB), TPB, 0, stream>>>(
cache.data() + offset, batch_size, idx, n, out);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/**
* @brief Store rows to cache
*
* This will store new rows for a given batch.
*
* @param [in] batch_idx batch id
* @param [in] batch_size batch size
* @param [in] keys keys for rows to store
* @param [in] n number of keys
* @param [inout] cache_idx cache set ids
* @param [in] tile rows store, should be at least [n*batch_size]
* @param [in] stream cuda stream
*/
void AssignAndStoreVecs(int batch_idx,
int batch_size,
int* keys,
int n,
int* cache_idx,
const math_t* tile,
cudaStream_t stream)
{
// here we assume that the input keys are already ordered by cache_idx
// this will prevent AssignCacheIdx to modify it further
if (n > 0) {
if (batch_idx == 0) {
// we only need to do this for the initial batch
raft::cache::Cache<math_t>::AssignCacheIdx(keys, n, cache_idx, stream);
}
size_t offset = raft::cache::Cache<math_t>::GetSize() * batch_size_base * batch_idx;
rmm::device_uvector<math_t>& cache = raft::cache::Cache<math_t>::cache;
raft::cache::store_vecs<<<raft::ceildiv(n * batch_size, TPB), TPB, 0, stream>>>(
tile,
n,
batch_size,
nullptr,
n,
cache_idx,
cache.data() + offset,
raft::cache::Cache<math_t>::GetSize());
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
private:
int batch_size_base; //!< maximum number of rows per batch
// tmp storage for cub sort
rmm::device_uvector<char> d_temp_storage;
size_t d_temp_storage_size = 0;
const int TPB = 256; //!< threads per block for kernels launched
};
} // end unnamed namespace
/**
* @brief KernelCache to provide kernel tiles
*
* We calculate the kernel matrix elements for the vectors in the working set.
*
* Two tiles can be calculated:
* - SquareTile[i,j] = K(x_i, x_j) where i,j are vector indices from the working set
* - FullTile[i,j] = K(x_i, x_j) where i=0.._rows-1, and j is a vector index from the working set
* The smaller square tile is calculated without caching. The larger tile can load already cached
* columns from the cache. The large tile can be also computed batch wise to limit memory usage.
*
* This cache supports large matrix dimensions as well as sparse data.
* - For large n_rows the FullTile will be processed batch-wise.
* - For large n_cols the intermediate storages are kept sparse.
*
*/
template <typename math_t, typename MatrixViewType>
class KernelCache {
public:
/**
* Construct an object to manage kernel cache
*
* @param handle reference to raft::handle_t implementation
* @param matrix device matrix of training vectors [n_rows x n_cols]
* @param n_rows number of training vectors
* @param n_cols number of features
* @param n_ws size of working set
* @param kernel pointer to kernel
* @param kernel_type kernel type
* @param cache_size (default 200 MiB)
* @param svmType is this SVR or SVC (default)
* @param kernel_tile_byte_limit maximum kernel size (default 1GB)
* Larger kernels will result in batching.
* @param dense_extract_byte_limit sparse rows will be extracted as dense
* up to this limit to speed up kernel computation. Only valid
* for sparse input. (default 1GB)
*/
KernelCache(const raft::handle_t& handle,
MatrixViewType matrix,
int n_rows,
int n_cols,
int n_ws,
raft::distance::kernels::GramMatrixBase<math_t>* kernel,
raft::distance::kernels::KernelType kernel_type,
float cache_size = 200,
SvmType svmType = C_SVC,
size_t kernel_tile_byte_limit = 1 << 30,
size_t dense_extract_byte_limit = 1 << 30)
: batch_cache(n_rows, cache_size, handle.get_stream()),
handle(handle),
kernel(kernel),
kernel_type(kernel_type),
matrix(matrix),
n_rows(n_rows),
n_cols(n_cols),
n_ws(n_ws),
svmType(svmType),
kernel_tile(0, handle.get_stream()),
matrix_l2(0, handle.get_stream()),
matrix_l2_ws(0, handle.get_stream()),
ws_idx_mod(n_ws, handle.get_stream()),
ws_idx_mod_svr(svmType == EPSILON_SVR ? n_ws : 0, handle.get_stream()),
x_ws_csr(nullptr),
x_ws_dense(0, handle.get_stream()),
indptr_batched(0, handle.get_stream()),
ws_cache_idx(n_ws * 2, handle.get_stream())
{
ASSERT(kernel != nullptr, "Kernel pointer required for KernelCache!");
stream = handle.get_stream();
batching_enabled = false;
is_csr = !isDenseType<MatrixViewType>();
sparse_extract = false;
batch_size_base = n_rows;
// enable batching for kernel > 1 GB (default)
if ((size_t)n_rows * n_ws * sizeof(math_t) > kernel_tile_byte_limit) {
batching_enabled = true;
// only select based on desired big-kernel size
batch_size_base = std::max(1ul, kernel_tile_byte_limit / n_ws / sizeof(math_t));
}
batch_cache.Initialize(batch_size_base, n_ws, ws_cache_idx.data(), stream);
kernel_tile.reserve(n_ws * std::max<size_t>(batch_size_base, n_ws), stream);
// enable sparse row extraction for sparse input where n_ws * n_cols > 1 GB
// Warning: kernel computation will be much slower!
if (is_csr && ((size_t)n_cols * n_ws * sizeof(math_t) > dense_extract_byte_limit)) {
sparse_extract = true;
}
if (sparse_extract) {
x_ws_csr =
std::make_unique<raft::device_csr_matrix<math_t, int, int, int>>(handle, n_ws, n_cols);
// we need to make an initial sparsity init before we can retrieve the structure_view
x_ws_csr->initialize_sparsity(10);
} else {
x_ws_dense.resize(n_ws * static_cast<size_t>(n_cols), stream);
}
// store matrix l2 norm for RBF kernels
if (kernel_type == raft::distance::kernels::KernelType::RBF) {
matrix_l2.resize(n_rows, stream);
matrix_l2_ws.resize(n_ws, stream);
ML::SVM::matrixRowNorm(handle, matrix, matrix_l2.data(), raft::linalg::NormType::L2Norm);
}
// additional row pointer information needed for batched CSR access
// copy matrix row pointer to host to compute partial nnz on the fly
if (is_csr && batching_enabled) {
host_indptr.resize(n_rows + 1);
indptr_batched.resize(batch_size_base + 1, stream);
copyIndptrToHost(matrix, host_indptr.data(), stream);
}
}
~KernelCache(){};
/**
* Helper object to pass batch information of cache while iterating batches
*/
struct BatchDescriptor {
int batch_id;
int offset;
int batch_size;
math_t* kernel_data;
int* nz_da_idx;
int nnz_da;
int n_cached;
};
// debugging
enum CacheState {
READY = 0,
WS_INITIALIZED = 1,
BATCHING_INITIALIZED = 2,
};
/**
* @brief Initialize cache for new working set
*
* Will initialize the cache for a new working set.
* In particular the indices will be re-ordered to allow for cache retrieval and update.
* The re-ordered indices will stored and are accessible via 'getKernelIndices'.
*
* @param [in] ws_idx indices of size [n_ws]
*/
void InitWorkingSet(const int* ws_idx)
{
ASSERT(cache_state != CacheState::WS_INITIALIZED, "Working set has already been initialized!");
ASSERT(cache_state != CacheState::BATCHING_INITIALIZED, "Previous batching step incomplete!");
this->ws_idx = ws_idx;
if (svmType == EPSILON_SVR) {
raft::copy(ws_idx_mod_svr.data(), ws_idx, n_ws, stream);
GetVecIndices(ws_idx, n_ws, ws_idx_mod.data());
} else {
raft::copy(ws_idx_mod.data(), ws_idx, n_ws, stream);
}
if (batch_cache.GetSize() > 0) {
// perform reordering of indices to partition into cached/uncached
// batch_id 0 should behave the same as all other batches
// provide currently unused 'kernel_tile' as temporary storage
batch_cache.PreparePartitionedIdxOrder(
ws_idx_mod.data(), n_ws, ws_cache_idx.data(), (int*)kernel_tile.data(), stream);
// re-compute original indices that got flattened by GetVecIndices
if (svmType == EPSILON_SVR) {
mapColumnIndicesToSVRSpace<<<raft::ceildiv(n_ws, TPB), TPB, 0, stream>>>(
ws_idx, n_ws, n_rows, ws_idx_mod.data(), ws_idx_mod_svr.data());
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
cache_state = CacheState::WS_INITIALIZED;
}
/**
* @brief Retrieve kernel indices
*
* Returns the reordered (!) workspace indices corresponding
* to the order used in the provided kernel matrices.
*
* If allow_svr is true, input indices >= n_rows (only valid for SVR)
* will be returned as such. Otherwise they will be projected to [0,nrows).
*
* This should only be called after 'InitWorkingSet'.
*
* @param [in] allow_svr allows indices >= n_rows (only SVR)
* @return pointer to indices corresponding to kernel
*/
int* getKernelIndices(int allow_svr)
{
ASSERT(cache_state != CacheState::READY, "Working set not initialized!");
if (allow_svr && svmType == EPSILON_SVR) {
return ws_idx_mod_svr.data();
} else {
return ws_idx_mod.data();
}
}
/**
* @brief Retrieve kernel matrix for n_ws*n_ws square
*
* Computes and returns the square kernel tile corresponding to the indices
* provided by 'InitWorkingSet'.
*
* TODO: utilize cache read without update
*
* @return pointer to kernel matrix
*/
math_t* getSquareTileWithoutCaching()
{
ASSERT(cache_state != CacheState::READY, "Working set not initialized!");
ASSERT(cache_state != CacheState::BATCHING_INITIALIZED, "Previous batching step incomplete!");
if (sparse_extract) {
ML::SVM::extractRows<math_t>(matrix, *x_ws_csr, ws_idx_mod.data(), n_ws, handle);
} else {
ML::SVM::extractRows<math_t>(matrix, x_ws_dense.data(), ws_idx_mod.data(), n_ws, handle);
}
// extract dot array for RBF
if (kernel_type == raft::distance::kernels::KernelType::RBF) {
selectValueSubset(matrix_l2_ws.data(), matrix_l2.data(), ws_idx_mod.data(), n_ws);
}
// compute kernel
{
if (sparse_extract) {
auto ws_view = getViewWithFixedDimension(*x_ws_csr, n_ws, n_cols);
KernelOp(handle,
kernel,
ws_view,
ws_view,
kernel_tile.data(),
matrix_l2_ws.data(),
matrix_l2_ws.data());
} else {
KernelOp(handle,
kernel,
x_ws_dense.data(),
n_ws,
n_cols,
x_ws_dense.data(),
n_ws,
kernel_tile.data(),
matrix_l2_ws.data(),
matrix_l2_ws.data());
}
}
return kernel_tile.data();
}
/**
* @brief Initialize the (batched) kernel retrieval for full rows
*
* Note: Values of nz_da_idx should be a subset of kernel indices with unmodified ordering!
*
* @param [in] nz_da_idx sub-set of working set indices to be requested
* @param [in] nnz_da size of nz_da_idx
* @return initialized batch descriptor object for iterating batches
*/
BatchDescriptor InitFullTileBatching(int* nz_da_idx, int nnz_da)
{
ASSERT(cache_state != CacheState::READY, "Working set not initialized!");
ASSERT(cache_state != CacheState::BATCHING_INITIALIZED, "Previous batching step incomplete!");
int n_cached = 0;
if (batch_cache.GetSize() > 0) {
// we only do this once per workingset for
batch_cache.GetCacheIdxPartitionedStable(
nz_da_idx, nnz_da, ws_cache_idx.data() + n_ws, &n_cached, stream);
// the second instance will be permuted during the assign step
raft::copy(ws_cache_idx.data(), ws_cache_idx.data() + n_ws, nnz_da, stream);
}
int n_uncached = nnz_da - n_cached;
if (n_uncached > 0) {
if (sparse_extract) {
ML::SVM::extractRows<math_t>(matrix, *x_ws_csr, nz_da_idx + n_cached, n_uncached, handle);
} else {
ML::SVM::extractRows<math_t>(
matrix, x_ws_dense.data(), nz_da_idx + n_cached, n_uncached, handle);
}
// extract dot array for RBF
if (kernel_type == raft::distance::kernels::KernelType::RBF) {
selectValueSubset(matrix_l2_ws.data(), matrix_l2.data(), nz_da_idx + n_cached, n_uncached);
}
}
cache_state = CacheState::BATCHING_INITIALIZED;
return {.batch_id = -1,
.offset = 0,
.batch_size = 0,
.kernel_data = nullptr,
.nz_da_idx = nz_da_idx,
.nnz_da = nnz_da,
.n_cached = n_cached};
}
// workaround to create a view based on an owning csr_matrix that fixes
// the initial dimensions
// TODO: remove once not needed anymore
raft::device_csr_matrix_view<math_t, int, int, int> getViewWithFixedDimension(
raft::device_csr_matrix<math_t, int, int, int>& tmp_matrix, int n_rows, int n_cols)
{
auto csr_struct_in = tmp_matrix.structure_view();
auto csr_struct_out =
raft::make_device_compressed_structure_view<int, int, int>(csr_struct_in.get_indptr().data(),
csr_struct_in.get_indices().data(),
n_rows,
n_cols,
csr_struct_in.get_nnz());
return raft::make_device_csr_matrix_view(tmp_matrix.get_elements().data(), csr_struct_out);
}
/**
* @brief Iterate batches of full kernel tile [n_rows, nnz_da]
*
* In order to keep the cache consistent the function should always be called
* until it returns false and all batches have been processed.
*
* @param [inout] batch_descriptor batching state information
* @return true if there is still a batch to be processed
*/
bool getNextBatchKernel(BatchDescriptor& batch_descriptor)
{
ASSERT(cache_state == CacheState::BATCHING_INITIALIZED, "Batching step not initialized!");
int offset = batch_descriptor.offset + batch_descriptor.batch_size;
if (offset >= n_rows) {
cache_state = CacheState::READY;
return false;
}
int batch_size = std::min(batch_size_base, n_rows - offset);
int batch_id = offset / batch_size_base;
ASSERT(offset % batch_size_base == 0, "Inconsistent offset!");
ASSERT(batch_id == batch_descriptor.batch_id + 1, "Inconsistent batch_id!");
int nnz_da = batch_descriptor.nnz_da;
int n_cached = batch_descriptor.n_cached;
int n_uncached = nnz_da - n_cached;
// fill in n_cached ids from cache
if (n_cached > 0) {
batch_cache.GetVecs(
batch_id, batch_size, ws_cache_idx.data(), n_cached, kernel_tile.data(), stream);
}
if (n_uncached > 0) {
int* ws_idx_new = batch_descriptor.nz_da_idx + n_cached;
math_t* tile_new = kernel_tile.data() + (size_t)n_cached * batch_size;
auto batch_matrix = getMatrixBatch(
matrix, batch_size, offset, host_indptr.data(), indptr_batched.data(), stream);
// compute kernel
math_t* norm_with_offset = matrix_l2.data() != nullptr ? matrix_l2.data() + offset : nullptr;
if (sparse_extract) {
auto ws_view = getViewWithFixedDimension(*x_ws_csr, n_uncached, n_cols);
KernelOp(
handle, kernel, batch_matrix, ws_view, tile_new, norm_with_offset, matrix_l2_ws.data());
} else {
KernelOp(handle,
kernel,
batch_matrix,
x_ws_dense.data(),
n_uncached,
tile_new,
norm_with_offset,
matrix_l2_ws.data());
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
if (batch_cache.GetSize() > 0 && n_uncached > 0) {
// AssignCacheIdx should not permute ws_idx_new anymore as we have sorted
// it already during InitWorkingSet
batch_cache.AssignAndStoreVecs(batch_id,
batch_size,
ws_idx_new,
n_uncached,
ws_cache_idx.data() + n_ws + n_cached,
tile_new,
stream);
}
}
batch_descriptor.batch_id = batch_id;
batch_descriptor.offset = offset;
batch_descriptor.batch_size = batch_size;
batch_descriptor.kernel_data = kernel_tile.data();
return true;
}
/** @brief Select a subset of values
*
* Select a subset of values
*
* @param [out] target array of values selected, size at least [num_indices]
* @param [in] source source array
* @param [in] indices indices within range [0,source.size)
* @param [in] num_indices number of indices
*/
void selectValueSubset(math_t* target, const math_t* source, const int* indices, int num_indices)
{
thrust::device_ptr<const int> indices_ptr(indices);
thrust::device_ptr<math_t> target_ptr(target);
thrust::transform(thrust::cuda::par.on(stream),
indices_ptr,
indices_ptr + num_indices,
target_ptr,
select_at_index(source));
}
/** @brief Get the original training vector idx.
*
* Only used for SVR (for SVC this is identity operation).
*
* For SVR we have duplicate set of training vectors, we return the original
* idx, which is simply ws_idx % n_rows.
*
* @param [in] ws_idx array of working set indices, size [n_ws]
* @param [in] n_ws number of elements in the working set
* @param [out] vec_idx original training vector indices, size [n_ws]
*/
void GetVecIndices(const int* ws_idx, int n_ws, int* vec_idx)
{
int n = n_rows;
raft::linalg::unaryOp(
vec_idx, ws_idx, n_ws, [n] __device__(math_t y) { return y < n ? y : y - n; }, stream);
}
private:
MatrixViewType matrix;
const int* ws_idx; //!< ptr to the original working set
bool batching_enabled;
bool is_csr;
bool sparse_extract;
int batch_size_base;
// cache state
CacheState cache_state = CacheState::READY;
rmm::device_uvector<math_t> kernel_tile;
// permutation of working set indices to partition cached/uncached
rmm::device_uvector<int> ws_idx_mod;
rmm::device_uvector<int> ws_idx_mod_svr;
// tmp storage for row extractions
// needs to ne a ptr atm as there is no way to resize rows
std::unique_ptr<raft::device_csr_matrix<math_t, int, int, int>> x_ws_csr;
rmm::device_uvector<math_t> x_ws_dense;
// matrix l2 norm for RBF kernels
rmm::device_uvector<math_t> matrix_l2;
rmm::device_uvector<math_t> matrix_l2_ws;
// additional row pointer information needed for batched CSR access
// copy matrix row pointer to host to compute partial nnz on the fly
std::vector<int> host_indptr;
rmm::device_uvector<int> indptr_batched;
raft::distance::kernels::GramMatrixBase<math_t>* kernel;
raft::distance::kernels::KernelType kernel_type;
int n_rows; //!< number of rows in x
int n_cols; //!< number of columns in x
int n_ws; //!< number of elements in the working set
// cache position of a workspace vectors
// will fit n_ws twice in order to backup values
rmm::device_uvector<int> ws_cache_idx;
const raft::handle_t handle;
BatchCache<math_t> batch_cache;
cudaStream_t stream;
SvmType svmType;
const int TPB = 256; //!< threads per block for kernels launched
};
}; // end namespace SVM
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/svr.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "kernelcache.cuh"
#include "smosolver.cuh"
#include "svr_impl.cuh"
#include <cuml/svm/svc.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/distance/kernels.cuh>
#include <raft/label/classlabels.cuh>
#include <raft/linalg/unary_op.cuh>
namespace ML {
namespace SVM {
using namespace raft::distance::kernels;
// Explicit instantiation for the library
template void svrFit<float>(const raft::handle_t& handle,
float* X,
int n_rows,
int n_cols,
float* y,
const SvmParameter& param,
KernelParams& kernel_params,
SvmModel<float>& model,
const float* sample_weight);
template void svrFit<double>(const raft::handle_t& handle,
double* X,
int n_rows,
int n_cols,
double* y,
const SvmParameter& param,
KernelParams& kernel_params,
SvmModel<double>& model,
const double* sample_weight);
template void svrFitSparse<float>(const raft::handle_t& handle,
int* indptr,
int* indices,
float* data,
int n_rows,
int n_cols,
int nnz,
float* y,
const SvmParameter& param,
KernelParams& kernel_params,
SvmModel<float>& model,
const float* sample_weight);
template void svrFitSparse<double>(const raft::handle_t& handle,
int* indptr,
int* indices,
double* data,
int n_rows,
int n_cols,
int nnz,
double* y,
const SvmParameter& param,
KernelParams& kernel_params,
SvmModel<double>& model,
const double* sample_weight);
}; // namespace SVM
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/src | rapidsai_public_repos/cuml/cpp/src/svm/smoblocksolve.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**@file smoblocksolve.cuh contains implementation of the blocke SMO solver
*/
#pragma once
#include "smo_sets.cuh"
#include <cuml/svm/svm_parameter.h>
#include <raft/util/cuda_utils.cuh>
#include <selection/kselection.cuh>
#include <stdlib.h>
namespace ML {
namespace SVM {
/**
* @brief Solve the optimization problem for the actual working set.
*
* Based on Platt's SMO [1], using improvements from Keerthy and Shevade [2].
* A concise summary of the math can be found in Appendix A1 of [3].
* We solve the QP subproblem for the vectors in the working set (WS).
*
* Let us first discuss classification (C-SVC):
*
* We would like to maximize the following quantity
* \f[ W(\mathbf{\alpha}) = -\mathbf{\alpha}^T \mathbf{1}
* + \frac{1}{2} \mathbf{\alpha}^T Q \mathbf{\alpha}, \f]
* subject to
* \f[ \mathbf{\alpha}^T \mathbf{y} = 0 \\
* \mathbf{0} \le \mathbf{\alpha}\le C \mathbf{1},\f]
* where \f$ Q_{i,j} = y_i y_j K(\mathbf{x}_i, \mathbf{x}_j)\f$
*
* This corresponds to Lagrangian for the dual is:
* \f[ L = \frac{1}{2} \sum_{i,j}\alpha_i Q \alpha_j - \sum_i \alpha_i
* -\sum_i \delta_i \alpha_i + \sum_i\mu_i(\alpha_i -C)
* - \beta \sum_i \alpha_i y_i
*\f]
*
* Let us define the optimality indicator vector
* \f[ f_i = y_i
* \frac{\partial W(\mathbf{\alpha})}{\partial \alpha_i} =
* -y_i + y_j \alpha_j K(\mathbf{x}_i, \mathbf{x}_j) =
* -y_i + y_i Q_{i,j} \alpha_j.
* \f]
* The Karush-Kuhn-Tucker conditions are necessary and sufficient for optimality.
* According to [2], the conditions simplify to
* \f[ \beta \le f_i, \forall i \in I_\mathrm{upper}, \quad
* \beta \ge f_i \forall i \in I_\mathrm{lower}. \f]
*
* If \f$ \max\{f_i | i \in I_\mathrm{lower}\} \le \min\{f_i| i\in I_\mathrm{upper}\}\f$,
* then we are converged because any beta value in this interval would lead to
* an optimal solution. Otherwise we modify the alpha parameters until the
* corresponding changes in f lead to an on optimal solution.
*
* Before the first iteration, one should set \f$ \alpha_i = 0\f$, and
* \f$ f_i = -y_i \f$, for each \f$ i \in [0..n_{rows}]\f$.
*
* To find the optimal alpha parameters, we use the SMO method: we select two
* vectors u and l from the WS and update the dual coefficients of these vectors.
* We iterate several times, and accumulate the change in the dual coeffs in
* \f$\Delta\alpha\f$.
*
* In every iteration we select the two vectors using the following formulas
* \f[ u = \mathrm{argmin}_{i=1}^{n_{ws}}\left[ f_i |
* x_i \in X_\mathrm{upper} \right] \f]
*
* \f[ l = \mathrm{argmax}_{i=1}^{n_{ws}} \left[
* \frac{(f_u-f_i)^2}{\eta_i}| f_u < f_i \land x_i \in
* X_{\mathrm{lower}}\right], \f]
* where \f[\eta_i = K(x_u, x_u) + K(x_i, x_i) - 2K(x_u, x_i). \f]
*
* We update the values of the dual coefs according to (additionally we clip
* values so that the coefficients stay in the [0, C] interval)
* \f[ \Delta \alpha_l = - y_l \frac{f_l - f_u}{\eta_l} = -y_l q, \f]
* \f[ \alpha_l += \Delta \alpha_l, \f]
* \f[ \Delta \alpha_u = -y_u y_l \Delta \alpha_l = y_u q, \f]
* \f[ \alpha_u += \Delta \alpha_u. \f]
*
* We also update the optimality indicator vector for the WS:
* \f[ f_i += \Delta\alpha_u y_u K(x_u,x_i) + \Delta\alpha_l y_l K(x_l, x_i) \f]
*
* During the inner iterations, the f values are updated only for the WS
* (since we are solving the optimization subproblem for the WS subproblem).
* For consistency, f is kept as an input parameter, the changed values are
* not saved back to global memory. After this solver finishes, all the f
* values (WS and outside WS) f should be updated using the delta_alpha output
* parameter.
*
* For SVR, we do the same steps to solve the problem. The difference is the
* optimization objective (W), which enters only as the initial value of f:
*
* \f[
* W(\alpha^+, \alpha^-) =
* \epsilon \sum_{i=1}^l (\alpha_i^+ + \alpha_i^-)
* - \sum_{i=1}^l yc_i (\alpha_i^+ - \alpha_i^-)
* + \frac{1}{2} \sum_{i,j=1}^l
* (\alpha_i^+ - \alpha_i^-)(\alpha_j^+ - \alpha_j^-) K(\bm{x}_i, \bm{x}_j)
* \f]
*
* References:
* - [1] J. C. Platt Sequential Minimal Optimization: A Fast Algorithm for
* Training Support Vector Machines, Technical Report MS-TR-98-14 (1998)
* - [2] S.S. Keerthi et al. Improvements to Platt's SMO Algorithm for SVM
* Classifier Design, Neural Computation 13, 637-649 (2001)
* - [3] Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs, Journal
* of Machine Learning Research, 19, 1-5 (2018)
*
* @tparam math_t floating point data type
* @tparam WSIZE working set size (max 1024)
* @param [in] y_array target labels size [n_train]
* @param [in] n_train number of training vectors
* @param [inout] alpha dual coefficients, size [n_train]
* @param [in] n_ws number of elements in the working set
* @param [out] delta_alpha change in the dual coeff of vectors in the working
* set, size [n_ws]
* @param [in] f_array optimality indicator vector, size [n_train]
* @param [in] kernel kernel function calculated between the working set and all
* other training vectors, size [n_ws * n_ws]
* @param [in] ws_idx indices of traning vectors in the working set, size [n_ws]
* @param [in] C_vec penalty parameter vector including class and sample weights
* size [n_train]
* @param [in] eps tolerance, iterations will stop if the duality gap is smaller
* than this value (or if the gap is smaller than 0.1 times the initial gap)
* @param [out] return_buff, two values are returned: duality gap and the number
* of iterations
* @param [in] max_iter maximum number of iterations
* @param [in] svmType type of the SVM problem to solve
*/
template <typename math_t, int WSIZE>
__global__ __launch_bounds__(WSIZE) void SmoBlockSolve(math_t* y_array,
int n_train,
math_t* alpha,
int n_ws,
math_t* delta_alpha,
math_t* f_array,
const math_t* kernel,
const int* ws_idx,
const math_t* C_vec,
math_t eps,
math_t* return_buff,
int max_iter = 10000,
SvmType svmType = C_SVC)
{
typedef MLCommon::Selection::KVPair<math_t, int> Pair;
typedef cub::BlockReduce<Pair, WSIZE> BlockReduce;
typedef cub::BlockReduce<math_t, WSIZE> BlockReduceFloat;
__shared__ union {
typename BlockReduce::TempStorage pair;
typename BlockReduceFloat::TempStorage single;
} temp_storage;
// From Platt [1]: "Under unusual circumstances \eta will not be positive.
// A negative \eta will occur if the kernel K does note obey Mercer's
// condition [...]. A zero \eta can occur even with a correct kernel, if more
// than one training example has the input vector x." We set a lower limit to
// \eta, to ensure correct behavior of SMO.
constexpr math_t ETA_EPS = 1.0e-12; // minimum value for \eta
__shared__ math_t f_u;
__shared__ int u;
__shared__ int l;
__shared__ math_t tmp_u, tmp_l;
__shared__ math_t Kd[WSIZE]; // diagonal elements of the kernel matrix
int tid = threadIdx.x;
int idx = ws_idx[tid];
// store values in registers
math_t y = y_array[idx];
math_t f = f_array[idx];
math_t a = alpha[idx];
math_t a_save = a;
math_t C = C_vec[idx];
__shared__ math_t diff_end;
__shared__ math_t diff;
Kd[tid] = kernel[tid + tid * n_ws];
int n_iter = 0;
for (; n_iter < max_iter; n_iter++) {
// mask values outside of X_upper
math_t f_tmp = in_upper(a, y, C) ? f : INFINITY;
Pair pair{f_tmp, tid};
Pair res = BlockReduce(temp_storage.pair).Reduce(pair, cub::Min(), n_ws);
if (tid == 0) {
f_u = res.val;
u = res.key;
}
// select f_max to check stopping condition
f_tmp = in_lower(a, y, C) ? f : -INFINITY;
__syncthreads(); // needed because we are reusing the shared memory buffer
// and also the u shared value
math_t Kui = kernel[u * n_ws + tid];
math_t f_max = BlockReduceFloat(temp_storage.single).Reduce(f_tmp, cub::Max(), n_ws);
if (tid == 0) {
// f_max-f_u is used to check stopping condition.
diff = f_max - f_u;
if (n_iter == 0) {
return_buff[0] = diff;
diff_end = max(eps, 0.1f * diff);
}
}
__syncthreads();
if (diff < diff_end) { break; }
if (f_u < f && in_lower(a, y, C)) {
math_t eta_ui = max(Kd[tid] + Kd[u] - 2 * Kui, ETA_EPS);
f_tmp = (f_u - f) * (f_u - f) / eta_ui;
} else {
f_tmp = -INFINITY;
}
pair = Pair{f_tmp, tid};
res = BlockReduce(temp_storage.pair).Reduce(pair, cub::Max(), n_ws);
if (tid == 0) { l = res.key; }
__syncthreads();
math_t Kli = kernel[l * n_ws + tid];
// Update alpha
// Let's set q = \frac{f_l - f_u}{\eta_{ul}
// Ideally we would have a'_u = a_u + y_u*q and a'_l = a_l - y_l*q
// We know that 0 <= a <= C, and the updated values (a') should also stay in
// this range. Therefore
// 0 <= a_u + y_u *q <= C --> -a_u <= y_u * q <= C - a_u
// Based on the value of y_u we have two branches:
// y == 1: -a_u <= q <= C-a_u and y == -1: a_u >= q >= a_u - C
// Knowing that q > 0 (since f_l > f_u and \eta_ul > 0), and 0 <= a_u <= C,
// the constraints are simplified as
// y == 1: q <= C-a_u, and y == -1: q <= a_u
// Similarly we can say for a'_l:
// y == 1: q <= a_l, and y ==- 1: q <= C - a_l
// We clip q accordingly before we do the update of a.
if (threadIdx.x == u) tmp_u = y > 0 ? C - a : a;
if (threadIdx.x == l) {
tmp_l = y > 0 ? a : C - a;
// note: Kui == Kul for this thread
math_t eta_ul = max(Kd[u] + Kd[l] - 2 * Kui, ETA_EPS);
tmp_l = min(tmp_l, (f - f_u) / eta_ul);
}
__syncthreads();
math_t q = min(tmp_u, tmp_l);
if (threadIdx.x == u) a += q * y;
if (threadIdx.x == l) a -= q * y;
f += q * (Kui - Kli);
if (q == 0) {
// Probably fp underflow
break;
}
}
// save results to global memory before exit
alpha[idx] = a;
// it is actually y * \Delta \alpha
// This is equivalent with: delta_alpha[tid] = (a - a_save) * y;
delta_alpha[tid] = (a - a_save) * y;
// f is recalculated in f_update, therefore we do not need to save that
return_buff[1] = n_iter;
}
}; // end namespace SVM
}; // end namespace ML
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.