text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <ctime> #include <random> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include "core/components/fill_array_kernels.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/curand_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The IDR solver namespace. * * @ingroup idr */ namespace idr { constexpr int default_block_size = 512; constexpr int default_dot_dim = 32; constexpr int default_dot_size = default_dot_dim * default_dot_dim; #include "common/cuda_hip/solver/idr_kernels.hpp.inc" namespace { template <typename ValueType> void initialize_m(const size_type nrhs, matrix::Dense<ValueType>* m, Array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto m_stride = m->get_stride(); const auto grid_dim = ceildiv(m_stride * subspace_dim, default_block_size); initialize_m_kernel<<<grid_dim, default_block_size>>>( subspace_dim, nrhs, as_cuda_type(m->get_values()), m_stride, as_cuda_type(stop_status->get_data())); } template <typename ValueType> void initialize_subspace_vectors(matrix::Dense<ValueType>* subspace_vectors, bool deterministic) { if (deterministic) { auto subspace_vectors_data = matrix_data<ValueType>( subspace_vectors->get_size(), std::normal_distribution<>(0.0, 1.0), std::ranlux48(15)); subspace_vectors->read(subspace_vectors_data); } else { auto gen = curand::rand_generator(time(NULL), CURAND_RNG_PSEUDO_DEFAULT); curand::rand_vector( gen, subspace_vectors->get_size()[0] * subspace_vectors->get_stride(), 0.0, 1.0, subspace_vectors->get_values()); } } template <typename ValueType> void orthonormalize_subspace_vectors(matrix::Dense<ValueType>* subspace_vectors) { orthonormalize_subspace_vectors_kernel<default_block_size> <<<1, default_block_size>>>( subspace_vectors->get_size()[0], subspace_vectors->get_size()[1], as_cuda_type(subspace_vectors->get_values()), subspace_vectors->get_stride()); } template <typename ValueType> void solve_lower_triangular(const size_type nrhs, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* c, const Array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs, default_block_size); solve_lower_triangular_kernel<<<grid_dim, default_block_size>>>( subspace_dim, nrhs, as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(f->get_const_values()), f->get_stride(), as_cuda_type(c->get_values()), c->get_stride(), as_cuda_type(stop_status->get_const_data())); } template <typename ValueType> void update_g_and_u(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, const Array<stopping_status>* stop_status) { const auto size = g->get_size()[0]; const auto p_stride = p->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = 0; i < k; i++) { const auto p_i = p->get_const_values() + i * p_stride; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, alpha->get_values(), nrhs, zero<ValueType>()); multidot_kernel<<<grid_dim, block_dim>>>( size, nrhs, as_cuda_type(p_i), as_cuda_type(g_k->get_values()), g_k->get_stride(), as_cuda_type(alpha->get_values()), as_cuda_type(stop_status->get_const_data())); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_values(), g_k->get_stride(), alpha->get_values()); } update_g_k_and_u_kernel<default_block_size> <<<ceildiv(size * g_k->get_stride(), default_block_size), default_block_size>>>( k, i, size, nrhs, as_cuda_type(alpha->get_const_values()), as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(g_k->get_values()), g_k->get_stride(), as_cuda_type(u->get_values()), u->get_stride(), as_cuda_type(stop_status->get_const_data())); } update_g_kernel<default_block_size> <<<ceildiv(size * g_k->get_stride(), default_block_size), default_block_size>>>( k, size, nrhs, as_cuda_type(g_k->get_const_values()), g_k->get_stride(), as_cuda_type(g->get_values()), g->get_stride(), as_cuda_type(stop_status->get_const_data())); } template <typename ValueType> void update_m(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* m, const Array<stopping_status>* stop_status) { const auto size = g_k->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto p_stride = p->get_stride(); const auto m_stride = m->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = k; i < subspace_dim; i++) { const auto p_i = p->get_const_values() + i * p_stride; auto m_i = m->get_values() + i * m_stride + k * nrhs; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, m_i, nrhs, zero<ValueType>()); multidot_kernel<<<grid_dim, block_dim>>>( size, nrhs, as_cuda_type(p_i), as_cuda_type(g_k->get_const_values()), g_k->get_stride(), as_cuda_type(m_i), as_cuda_type(stop_status->get_const_data())); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_const_values(), g_k->get_stride(), m_i); } } } template <typename ValueType> void update_x_r_and_f(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* g, const matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* r, matrix::Dense<ValueType>* x, const Array<stopping_status>* stop_status) { const auto size = x->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(size * x->get_stride(), default_block_size); update_x_r_and_f_kernel<<<grid_dim, default_block_size>>>( k, size, subspace_dim, nrhs, as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(u->get_const_values()), u->get_stride(), as_cuda_type(f->get_values()), f->get_stride(), as_cuda_type(r->get_values()), r->get_stride(), as_cuda_type(x->get_values()), x->get_stride(), as_cuda_type(stop_status->get_const_data())); components::fill_array(exec, f->get_values() + k * f->get_stride(), nrhs, zero<ValueType>()); } } // namespace template <typename ValueType> void initialize(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* subspace_vectors, bool deterministic, Array<stopping_status>* stop_status) { initialize_m(nrhs, m, stop_status); initialize_subspace_vectors(subspace_vectors, deterministic); orthonormalize_subspace_vectors(subspace_vectors); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_INITIALIZE_KERNEL); template <typename ValueType> void step_1(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, const matrix::Dense<ValueType>* residual, const matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* v, const Array<stopping_status>* stop_status) { solve_lower_triangular(nrhs, m, f, c, stop_status); const auto num_rows = v->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); step_1_kernel<<<grid_dim, default_block_size>>>( k, num_rows, subspace_dim, nrhs, as_cuda_type(residual->get_const_values()), residual->get_stride(), as_cuda_type(c->get_const_values()), c->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(v->get_values()), v->get_stride(), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_1_KERNEL); template <typename ValueType> void step_2(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* omega, const matrix::Dense<ValueType>* preconditioned_vector, const matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* u, const Array<stopping_status>* stop_status) { const auto num_rows = preconditioned_vector->get_size()[0]; const auto subspace_dim = u->get_size()[1] / nrhs; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); step_2_kernel<<<grid_dim, default_block_size>>>( k, num_rows, subspace_dim, nrhs, as_cuda_type(omega->get_const_values()), as_cuda_type(preconditioned_vector->get_const_values()), preconditioned_vector->get_stride(), as_cuda_type(c->get_const_values()), c->get_stride(), as_cuda_type(u->get_values()), u->get_stride(), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_2_KERNEL); template <typename ValueType> void step_3(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* residual, matrix::Dense<ValueType>* x, const Array<stopping_status>* stop_status) { update_g_and_u(exec, nrhs, k, p, m, alpha, g, g_k, u, stop_status); update_m(exec, nrhs, k, p, g_k, m, stop_status); update_x_r_and_f(exec, nrhs, k, m, g, u, f, residual, x, stop_status); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_3_KERNEL); template <typename ValueType> void compute_omega( std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const remove_complex<ValueType> kappa, const matrix::Dense<ValueType>* tht, const matrix::Dense<remove_complex<ValueType>>* residual_norm, matrix::Dense<ValueType>* omega, const Array<stopping_status>* stop_status) { const auto grid_dim = ceildiv(nrhs, config::warp_size); compute_omega_kernel<<<grid_dim, config::warp_size>>>( nrhs, kappa, as_cuda_type(tht->get_const_values()), as_cuda_type(residual_norm->get_const_values()), as_cuda_type(omega->get_values()), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_COMPUTE_OMEGA_KERNEL); } // namespace idr } // namespace cuda } // namespace kernels } // namespace gko
the_stack
namespace mn { using block_signed_distance_field_ = structural<structural_type::dense, decorator<structural_allocation_policy::full_allocation, structural_padding_policy::sum_pow2_align>, BlockDomain, attrib_layout::soa, f32_, f32_, f32_, f32_>; ///< sdis, gradx, grady, gradz using signed_distance_field_ = structural<structural_type::dense, decorator<structural_allocation_policy::full_allocation, structural_padding_policy::sum_pow2_align>, GridDomain, attrib_layout::aos, block_signed_distance_field_>; enum class boundary_t { Sticky, Slip, Separate }; struct SignedDistanceGrid : Instance<signed_distance_field_> { using base_t = Instance<signed_distance_field_>; constexpr auto &self() noexcept { return static_cast<base_t &>(*this); } template <typename Allocator> SignedDistanceGrid(Allocator allocator) : base_t{spawn<signed_distance_field_, orphan_signature>(allocator)} { _rotMat.set(0.f); _rotMat(0, 0) = _rotMat(1, 1) = _rotMat(2, 2) = 1.f; _trans.set(0.f); _transVel.set(0.f); _omega.set(0.f); _dsdt = 0.f; _scale = 1.f; _friction = 0.3f; _type = boundary_t::Sticky; } void init(base_t &hostData, cudaStream_t stream) { checkCudaErrors(cudaMemcpyAsync(&this->ch(_0, 0, 0, 0).val_1d(_0, 0), &hostData.ch(_0, 0, 0, 0).val_1d(_0, 0), signed_distance_field_::base_t::size, cudaMemcpyDefault, stream)); } constexpr vec3 get_material_velocity(const vec3 &X) { vec3 radius = X - _trans; vec3 vel{}; vec_crossMul_vec_3D(vel.data(), _omega.data(), radius.data()); vel += _transVel; return vel; } __forceinline__ __device__ auto rot_angle_to_matrix(const float omega, const int dim) -> vec3x3 { vec3x3 res; res.set(0.f); if (dim == 0) { res(0, 0) = 1; res(1, 1) = res(2, 2) = cosf(omega); res(2, 1) = res(1, 2) = sinf(omega); res(1, 2) = -res(1, 2); } else if (dim == 1) { res(1, 1) = 1; res(0, 0) = res(2, 2) = cosf(omega); res(2, 0) = res(0, 2) = sinf(omega); res(2, 0) = -res(2, 0); } else if (dim == 2) { res(2, 2) = 1; res(0, 0) = res(1, 1) = cosf(omega); res(1, 0) = res(0, 1) = sinf(omega); res(0, 1) = -res(0, 1); } return res; } constexpr auto vec3_cross_vec3(vec3 v1, vec3 v2) { vec3 res{v1[1] * v2[2] + v1[2] * v2[1], v1[2] * v2[0] + v1[0] * v2[2], v1[0] * v2[1] + v1[1] * v2[0]}; return res; } //< return signed distance value + set normal __forceinline__ __device__ float get_signed_distance_and_normal(const vec3 &X, vec3 &normal) { //< g_cid <=> global cell ID ivec3 g_cid = (X / config::g_dx).cast<int>(); // 1. init float sdis_res{0.f}; normal.set(0.f); // 1.1 prepare float W[2][2][2]; //< linear interpolation weight { vec3 dis_lb = X - (g_cid.cast<float>() * config::g_dx); //< distance to the left-corner node float W_1d[3][2]; //< 1d weight, [dim][node] for (int d = 0; d < 3; ++d) { W_1d[d][0] = 1 - dis_lb[d] / config::g_dx; W_1d[d][1] = dis_lb[d] / config::g_dx; } for (int i = 0; i < 2; ++i) for (int j = 0; j < 2; ++j) for (int k = 0; k < 2; ++k) W[i][j][k] = W_1d[0][i] * W_1d[1][j] * W_1d[2][k]; } // 2. compute signed distance and normal for (int i = 0; i < 2; ++i) for (int j = 0; j < 2; ++j) for (int k = 0; k < 2; ++k) { sdis_res += W[i][j][k] * (this->ch(_0, ((g_cid[0] + i) / config::g_blocksize), ((g_cid[1] + j) / config::g_blocksize), ((g_cid[2] + k) / config::g_blocksize)) .val(_0, (g_cid[0] + i) % config::g_blocksize, (g_cid[1] + j) % config::g_blocksize, (g_cid[2] + k) % config::g_blocksize)); normal[0] += W[i][j][k] * (this->ch(_0, ((g_cid[0] + i) / config::g_blocksize), ((g_cid[1] + j) / config::g_blocksize), ((g_cid[2] + k) / config::g_blocksize)) .val(_1, (g_cid[0] + i) % config::g_blocksize, (g_cid[1] + j) % config::g_blocksize, (g_cid[2] + k) % config::g_blocksize)); normal[1] += W[i][j][k] * (this->ch(_0, ((g_cid[0] + i) / config::g_blocksize), ((g_cid[1] + j) / config::g_blocksize), ((g_cid[2] + k) / config::g_blocksize)) .val(_2, (g_cid[0] + i) % config::g_blocksize, (g_cid[1] + j) % config::g_blocksize, (g_cid[2] + k) % config::g_blocksize)); normal[2] += W[i][j][k] * (this->ch(_0, ((g_cid[0] + i) / config::g_blocksize), ((g_cid[1] + j) / config::g_blocksize), ((g_cid[2] + k) / config::g_blocksize)) .val(_3, (g_cid[0] + i) % config::g_blocksize, (g_cid[1] + j) % config::g_blocksize, (g_cid[2] + k) % config::g_blocksize)); } normal /= sqrtf(normal.l2NormSqr()); return sdis_res; } __forceinline__ __device__ bool query_sdf(vec3 &normal, const vec3 &X) { if (X[0] < g_bc * config::g_dx * config::g_blocksize || X[0] >= (GridDomain::range(_0) - g_bc) * config::g_blocksize * config::g_dx || X[1] < g_bc * config::g_dx * config::g_blocksize || X[1] >= (GridDomain::range(_1) - g_bc) * config::g_blocksize * config::g_dx || X[2] < g_bc * config::g_dx * config::g_blocksize || X[2] >= (GridDomain::range(_2) - g_bc) * config::g_blocksize * config::g_dx) return false; return get_signed_distance_and_normal(X, normal) <= 0.f; } //< detect if there is collision with the object, if there is, reset grid // velocity < call this inside grid update kernel /* Takes a position and its velocity, * project the grid velocity, //? [to check] and returns a normal if the * collision happened as a SLIP collsion. * * derivation: * * x = \phi(X,t) = R(t)s(t)X+b(t) * X = \phi^{-1}(x,t) = (1/s) R^{-1} (x-b) * V(X,t) = \frac{\partial \phi}{\partial t} * = R'sX + Rs'X + RsX' + b' * v(x,t) = V(\phi^{-1}(x,t),t) * = R'R^{-1}(x-b) + (s'/s)(x-b) + RsX' + b' * = omega \cross (x-b) + (s'/s)(x-b) + RsV + b'*/ __forceinline__ __device__ void detect_and_resolve_collision(const ivec3 block_id, const ivec3 cell_id, float currentTime, vec3 &vel) { vec3 x_minus_trans = (block_id * config::g_blocksize + cell_id).cast<float>() * config::g_dx - (_trans + _transVel * currentTime); // material space vec3 X; vec3x3 rotMat = _rotMat; { vec3 X0 = x_minus_trans * (1.f / (1.f + _dsdt * currentTime)); vec3x3 rot_tmp = rot_angle_to_matrix(_omega[0] * currentTime, 0); vec3x3 prevRot = rotMat; matrixMatrixMultiplication3d(prevRot.data(), rot_tmp.data(), rotMat.data()); rot_tmp = rot_angle_to_matrix(_omega[1] * currentTime, 1); prevRot = rotMat; matrixMatrixMultiplication3d(prevRot.data(), rot_tmp.data(), rotMat.data()); rot_tmp = rot_angle_to_matrix(_omega[2] * currentTime, 2); prevRot = rotMat; matrixMatrixMultiplication3d(prevRot.data(), rot_tmp.data(), rotMat.data()); matT_mul_vec_3D(X.data(), rotMat.data(), X0.data()); } X = X * _scale + _trans; //< enforce BC if inside LS vec3 obj_normal; bool hit = query_sdf(obj_normal, X); //< material space normal if (hit) { ///< calculate object velocity in deformation space vec3 v_object = vec3_cross_vec3(_omega, x_minus_trans); v_object += x_minus_trans * (_dsdt / _scale); { vec3 rot_V; matrixVectorMultiplication3d( rotMat.data(), get_material_velocity(X).data(), rot_V.data()); v_object += rot_V * _scale + _transVel; } vel -= v_object; /// sticky if (_type == boundary_t::Sticky) vel.set(0.f); /// slip else if (_type == boundary_t::Slip) { { vec3 n; matrixVectorMultiplication3d(rotMat.data(), obj_normal.data(), n.data()); obj_normal = n; } float v_dot_n = obj_normal.dot(vel); vel -= (obj_normal * v_dot_n); if (_friction > 0.f) { if (v_dot_n < 0) { float velNorm = sqrtf(vel.l2NormSqr()); if (-v_dot_n * _friction < velNorm) vel += (vel / velNorm * (v_dot_n * _friction)); else vel.set(0.f); } } } /// sticky else if (_type == boundary_t::Separate) { if (obj_normal[0] == 0.f && obj_normal[1] == 0.f && obj_normal[2] == 0.f) { vel.set(0.f); return; } { vec3 n; matrixVectorMultiplication3d(rotMat.data(), obj_normal.data(), n.data()); obj_normal = n; } float v_dot_n = obj_normal.dot(vel); if (v_dot_n < 0) { vel -= (obj_normal * v_dot_n); if (_friction != 0) { float velNorm = sqrtf(vel.l2NormSqr()); if (-v_dot_n * _friction < velNorm) vel += (vel / velNorm * (v_dot_n * _friction)); else vel.set(0.f); } } } else printf("[ERROR] Wrong Boundary Type!\n"); vel += v_object; } } vec3x3 _rotMat; vec3 _trans, _transVel, _omega; float _dsdt, _scale; float _friction; boundary_t _type; }; template <place_id Chn> void fillSignedDistanceField(std::integral_constant<place_id, Chn> chn, const std::vector<float> &s_dis, Instance<signed_distance_field_> &hostData) { int insideNodeNum = 0; for (auto bx = 0; bx < GridDomain::range(_0); ++bx) for (auto by = 0; by < GridDomain::range(_1); ++by) for (auto bz = 0; bz < GridDomain::range(_2); ++bz) { auto sdis_block = hostData.ch(_0, bx, by, bz); for (auto cx = 0; cx < config::g_blocksize; ++cx) { auto i = bx * config::g_blocksize + cx; for (auto cy = 0; cy < config::g_blocksize; ++cy) { auto j = by * config::g_blocksize + cy; for (auto cz = 0; cz < config::g_blocksize; ++cz) { auto k = bz * config::g_blocksize + cz; auto idx = (i * GridDomain::range(_1) * config::g_blocksize * GridDomain::range(_2) * config::g_blocksize) + (j * GridDomain::range(_2) * config::g_blocksize) + k; sdis_block.val(chn, cx, cy, cz) = s_dis[idx]; if (Chn == 0) if (s_dis[idx] <= 0.f) insideNodeNum++; } } } } if (Chn == 0) fmt::print("[Collision Object]\n\t[from saved signed_distance_field] " "Finish init signed distance buffer, inside node num = {}.\n", insideNodeNum); else fmt::print("[Collision Object]\n\t[from saved signed_distance_field] " "Finish init signed distance gradient [{}].\n", Chn); } void initFromSignedDistanceFile(std::string filename, vec<std::size_t, 3> resolution, Instance<signed_distance_field_> &hostData) { std::string fileAddr = std::string(AssetDirPath) + "vdbSDF/"; std::vector<float> sdisf(resolution.prod()); auto readFile = [&](std::string suffix) { auto fn = fopen((fileAddr + filename + suffix).c_str(), "rb"); std::size_t readNum = std::fread((float *)sdisf.data(), sizeof(float), sdisf.size(), fn); if (readNum != (std::size_t)resolution.prod()) { printf("Error in loading file [%s]: read in %d entries, should be %d\n", filename.c_str(), (int)readNum, (int)resolution.prod()); exit(0); } std::fclose(fn); }; readFile("_sdf.bin"); fillSignedDistanceField(_0, sdisf, hostData); readFile("_grad_0.bin"); fillSignedDistanceField(_1, sdisf, hostData); readFile("_grad_1.bin"); fillSignedDistanceField(_2, sdisf, hostData); readFile("_grad_2.bin"); fillSignedDistanceField(_3, sdisf, hostData); } } // namespace mn #endif
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #else // #ifdef __CUDACC__ void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ); void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], const real* const PresIn ); #endif // #ifdef __CUDACC__ ... else ... //------------------------------------------------------------------------------------------------------- // Function : Hydro_RiemannSolver_HLLE // Description : Approximate Riemann solver of Harten, Lax, and van Leer // // Note : 1. Input data should be conserved variables // 2. Ref : (a) Riemann Solvers and Numerical Methods for Fluid Dynamics - A Practical Introduction // ~ by Eleuterio F. Toro // (b) Stone et al., ApJS, 178, 137 (2008) // (c) Einfeldt et al., J. Comput. Phys., 92, 273 (1991) // 3. Wave-speed estimator is set by HLLE_WAVESPEED in CUFLU.h // 4. Support general EoS // 5. Shared by MHM, MHM_RP, and CTU schemes // // Parameter : XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // Flux_Out : Array to store the output flux // L/R_In : Input left/right states (conserved variables) // MinDens/Pres : Density and pressure floors // EoS_DensEint2Pres : EoS routine to compute the gas pressure // EoS_DensPres2CSqr : EoS routine to compute the sound speed squared // EoS_AuxArray_* : Auxiliary arrays for the EoS routines // EoS_Table : EoS tables //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_RiemannSolver_HLLE( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ) { // 1. reorder the input variables for different spatial directions real L[NCOMP_TOTAL_PLUS_MAG], R[NCOMP_TOTAL_PLUS_MAG]; for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) { L[v] = L_In[v]; R[v] = R_In[v]; } Hydro_Rotate3D( L, XYZ, true, MAG_OFFSET ); Hydro_Rotate3D( R, XYZ, true, MAG_OFFSET ); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(L[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", L[0], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(R[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", R[0], __FILE__, __LINE__, __FUNCTION__ ); # endif // 2. estimate the maximum wave speeds // 2-1. compute the left/right states const real ZERO = (real)0.0; const real ONE = (real)1.0; const real _TWO = (real)0.5; const bool CheckMinPres_Yes = true; real _RhoL, _RhoR, u_L, u_R, Emag_L, Emag_R, P_L, P_R, a2_L, a2_R, Cf_L, Cf_R, MaxV_L, MaxV_R; # ifdef MHD real Bx_L, By_L, Bz_L, Bx_R, By_R, Bz_R, Bx2_L, Bt2_L, Bx2_R, Bt2_R, B2_L, B2_R; real Cax2_L, Cat2_L, Ca2_plus_a2_L, Ca2_min_a2_L, Cf2_min_Cs2_L, Cf2_L; // "plus"="+", "min"="-", Cs=slow wave real Cax2_R, Cat2_R, Ca2_plus_a2_R, Ca2_min_a2_R, Cf2_min_Cs2_R, Cf2_R; # endif _RhoL = ONE / L[0]; _RhoR = ONE / R[0]; u_L = _RhoL*L[1]; u_R = _RhoR*R[1]; # ifdef MHD Bx_L = L[ MAG_OFFSET + 0 ]; By_L = L[ MAG_OFFSET + 1 ]; Bz_L = L[ MAG_OFFSET + 2 ]; Bx_R = R[ MAG_OFFSET + 0 ]; By_R = R[ MAG_OFFSET + 1 ]; Bz_R = R[ MAG_OFFSET + 2 ]; Bx2_L = SQR( Bx_L ); Bx2_R = SQR( Bx_R ); Bt2_L = SQR( By_L ) + SQR( Bz_L ); Bt2_R = SQR( By_R ) + SQR( Bz_R ); B2_L = Bx2_L + Bt2_L; B2_R = Bx2_R + Bt2_R; Emag_L = _TWO*B2_L; Emag_R = _TWO*B2_R; # else Emag_L = NULL_REAL; Emag_R = NULL_REAL; # endif P_L = Hydro_Con2Pres( L[0], L[1], L[2], L[3], L[4], L+NCOMP_FLUID, CheckMinPres_Yes, MinPres, Emag_L, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); P_R = Hydro_Con2Pres( R[0], R[1], R[2], R[3], R[4], R+NCOMP_FLUID, CheckMinPres_Yes, MinPres, Emag_R, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); a2_L = EoS_DensPres2CSqr( L[0], P_L, L+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); a2_R = EoS_DensPres2CSqr( R[0], P_R, R+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(P_L) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", P_L, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(P_R) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", P_R, __FILE__, __LINE__, __FUNCTION__ ); # endif # if ( defined GAMER_DEBUG && defined MHD ) // longitudinal B field in the left and right states should be the same if ( Bx_L != Bx_R ) printf( "ERROR : Bx_L (%24.17e) != Bx_R (%24.17e) for XYZ %d at file <%s>, line <%d>, function <%s>!!\n", Bx_L, Bx_R, XYZ, __FILE__, __LINE__, __FUNCTION__ ); # endif // fast wave speed (Cf) # ifdef MHD // left state Cax2_L = Bx2_L*_RhoL; Cat2_L = Bt2_L*_RhoL; Ca2_plus_a2_L = Cat2_L + Cax2_L + a2_L; Ca2_min_a2_L = Cat2_L + Cax2_L - a2_L; Cf2_min_Cs2_L = SQRT( SQR(Ca2_min_a2_L) + (real)4.0*a2_L*Cat2_L ); if ( Cat2_L == ZERO ) { if ( Cax2_L >= a2_L ) Cf2_L = Cax2_L; else Cf2_L = a2_L; } else { if ( Cax2_L == ZERO ) Cf2_L = a2_L + Cat2_L; else Cf2_L = _TWO*( Ca2_plus_a2_L + Cf2_min_Cs2_L ); } // if ( Cat2_L == ZERO ) ... else ... Cf_L = SQRT( Cf2_L ); // Cf2_L is positive definite using the above formula // right state Cax2_R = Bx2_R*_RhoR; Cat2_R = Bt2_R*_RhoR; Ca2_plus_a2_R = Cat2_R + Cax2_R + a2_R; Ca2_min_a2_R = Cat2_R + Cax2_R - a2_R; Cf2_min_Cs2_R = SQRT( SQR(Ca2_min_a2_R) + (real)4.0*a2_R*Cat2_R ); if ( Cat2_R == ZERO ) { if ( Cax2_R >= a2_R ) Cf2_R = Cax2_R; else Cf2_R = a2_R; } else { if ( Cax2_R == ZERO ) Cf2_R = a2_R + Cat2_R; else Cf2_R = _TWO*( Ca2_plus_a2_R + Cf2_min_Cs2_R ); } // if ( Cat2_R == ZERO ) ... else ... Cf_R = SQRT( Cf2_R ); // Cf2_R is positive definite using the above formula # else // #ifdef MHD Cf_L = SQRT( a2_L ); Cf_R = SQRT( a2_R ); # endif // #ifdef MHD ... else ... // 2-2a. use the Roe average eigenvalues # if ( HLLE_WAVESPEED == HLL_WAVESPEED_ROE ) // Roe averages real H_L, H_R, RhoL_sqrt, RhoR_sqrt, _RhoL_sqrt, _RhoR_sqrt, _RhoLR_sqrt_sum; real Rho_Roe, _Rho_Roe, u_Roe, v_Roe, w_Roe, H_Roe; # ifdef MHD real Bx_Roe, By_Roe, Bz_Roe, Bt2_Roe, B2_Roe, B2_Rho_Roe; # endif H_L = ( L[4] + P_L )*_RhoL; H_R = ( R[4] + P_R )*_RhoR; # ifdef MHD H_L += _TWO*B2_L*_RhoL; H_R += _TWO*B2_R*_RhoR; # endif RhoL_sqrt = SQRT( L[0] ); RhoR_sqrt = SQRT( R[0] ); _RhoL_sqrt = ONE/RhoL_sqrt; _RhoR_sqrt = ONE/RhoR_sqrt; _RhoLR_sqrt_sum = ONE/(RhoL_sqrt + RhoR_sqrt); Rho_Roe = RhoL_sqrt*RhoR_sqrt; _Rho_Roe = ONE/Rho_Roe; u_Roe = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[1] + _RhoR_sqrt*R[1] ); v_Roe = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[2] + _RhoR_sqrt*R[2] ); w_Roe = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[3] + _RhoR_sqrt*R[3] ); H_Roe = _RhoLR_sqrt_sum*( RhoL_sqrt*H_L + RhoR_sqrt*H_R ); # ifdef MHD Bx_Roe = Bx_L; By_Roe = _RhoLR_sqrt_sum*( RhoL_sqrt*By_R + RhoR_sqrt*By_L ); Bz_Roe = _RhoLR_sqrt_sum*( RhoL_sqrt*Bz_R + RhoR_sqrt*Bz_L ); Bt2_Roe = SQR( By_Roe ) + SQR( Bz_Roe ); B2_Roe = SQR( Bx_Roe ) + Bt2_Roe; B2_Rho_Roe = B2_Roe*_Rho_Roe; # endif // fast wave speed (Cf_Roe) //###NOTE: we have assumed a constant-gamma EoS here // --> otherwise, one needs to specify how to convert (H-0.5*V2, Rho) to Cs^2 // --> see Eq. [A4] in Coleman 2020 # if ( EOS != EOS_GAMMA ) # error : ERROR : HLL_WAVESPEED_ROE only works with EOS_GAMMA !! # endif const real Gamma = (real)EoS_AuxArray_Flt[0]; const real Gamma_m1 = (real)EoS_AuxArray_Flt[1]; const real _Gamma = (real)EoS_AuxArray_Flt[3]; # ifdef MHD const real Gamma_m2 = Gamma - (real)2.0; # endif real V2_Roe, a2_Roe, Cf2_Roe, Cf_Roe; # ifdef MHD real X, Y; // Eqs. (B15) and (B16) in ref-b real Cax2_Roe, Cat2_Roe, Ca2_plus_a2_Roe, Ca2_min_a2_Roe, Cf2_min_Cs2_Roe; # endif V2_Roe = SQR( u_Roe ) + SQR( v_Roe ) + SQR( w_Roe ); a2_Roe = Gamma_m1*( H_Roe - _TWO*V2_Roe ); # ifdef MHD a2_Roe -= Gamma_m1*B2_Rho_Roe; // H = 0.5*v^2 + B^2/rho + gamma/(gamma-1)*P/rho # endif a2_Roe = Gamma*_Rho_Roe*Hydro_CheckMinPres( a2_Roe*Rho_Roe*_Gamma, MinPres ); // apply pressure floor # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(a2_Roe) ) printf( "ERROR : invalid a2_Roe (%14.7e) at file <%s>, line <%d>, function <%s>\n", a2_Roe, __FILE__, __LINE__, __FUNCTION__ ); # endif // #ifdef CHECK_NEGATIVE_IN_FLUID # ifdef MHD X = _TWO*( SQR(By_R-By_L) + SQR(Bz_R-Bz_L) )*SQR( _RhoLR_sqrt_sum ); X *= Gamma_m2; # ifdef EULERY Y = _TWO*( L[0] + R[0] )*_Rho_Roe; # else Y = ONE; # endif // #ifdef EULER ... else ... Y *= Gamma_m2; a2_Roe -= X; Cax2_Roe = SQR(Bx_Roe)*_Rho_Roe; Cat2_Roe = ( Gamma_m1 - Y )*Bt2_Roe*_Rho_Roe; Ca2_plus_a2_Roe = Cat2_Roe + Cax2_Roe + a2_Roe; Ca2_min_a2_Roe = Cat2_Roe + Cax2_Roe - a2_Roe; Cf2_min_Cs2_Roe = SQRT( SQR(Ca2_min_a2_Roe) + (real)4.0*a2_Roe*Cat2_Roe ); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(a2_Roe) ) printf( "ERROR : invalid a2_Roe (%14.7e) at file <%s>, line <%d>, function <%s>\n", a2_Roe, __FILE__, __LINE__, __FUNCTION__ ); # ifdef MHD if ( Hydro_CheckNegative(Gamma_m1-Y) ) printf( "ERROR : invalid Gamma_m1-Y (%14.7e, Gamma_m1 %14.7e, Y %14.7e) at file <%s>, line <%d>, function <%s>\n", Gamma_m1-Y, Gamma_m1, Y, __FILE__, __LINE__, __FUNCTION__ ); # endif # endif // #ifdef CHECK_NEGATIVE_IN_FLUID if ( Cat2_Roe == ZERO ) { if ( Cax2_Roe == a2_Roe ) Cf2_Roe = a2_Roe; else if ( Cax2_Roe > a2_Roe ) Cf2_Roe = Cax2_Roe; else Cf2_Roe = a2_Roe; } else { if ( Cax2_Roe == ZERO ) Cf2_Roe = a2_Roe + Cat2_Roe; else Cf2_Roe = _TWO*( Ca2_plus_a2_Roe + Cf2_min_Cs2_Roe ); } // if ( Cat2_Roe == ZERO ) ... else ... # else // #ifdef MHD Cf2_Roe = a2_Roe; # endif // #ifdef MHD ... else ... Cf_Roe = SQRT( Cf2_Roe ); // maximum and minimum eigenvalues const real EVal_min = u_Roe - Cf_Roe; const real EVal_max = u_Roe + Cf_Roe; // left/right maximum wave speeds MaxV_L = FMIN( EVal_min, u_L-Cf_L ); MaxV_R = FMAX( EVal_max, u_R+Cf_R ); MaxV_L = FMIN( MaxV_L, ZERO ); MaxV_R = FMAX( MaxV_R, ZERO ); // 2-2b. use the primitive variable Riemann solver (PVRS) # elif ( HLLE_WAVESPEED == HLL_WAVESPEED_PVRS ) # ifdef MHD # error : HLL_WAVESPEED_PVRS does not support MHD !! # endif // As=a=sound speed in PVRS real Rho_PVRS, As_PVRS, RhoAs_PVRS, P_PVRS, Gamma_SL, Gamma_SR, q_L, q_R; Rho_PVRS = _TWO*( L[0] + R[0] ); As_PVRS = _TWO*( Cf_L + Cf_R ); // Cf=As for hydro RhoAs_PVRS = Rho_PVRS * As_PVRS; P_PVRS = _TWO*( ( P_L + P_R ) + ( u_L - u_R )*RhoAs_PVRS ); P_PVRS = Hydro_CheckMinPres( P_PVRS, MinPres ); // for EOS_GAMMA/EOS_ISOTHERMAL, the calculations of Gamma_SL/R can be greatly simplified // --> results should be exactly the same except for round-off errors # if ( EOS == EOS_GAMMA ) Gamma_SL = (real)EoS_AuxArray_Flt[0]; Gamma_SR = (real)EoS_AuxArray_Flt[0]; # elif ( EOS == EOS_ISOTHERMAL ) Gamma_SL = ONE; Gamma_SR = ONE; # else real u_PVRS, Rho_As_PVRS, Rho_SL, Rho_SR, _P; u_PVRS = _TWO*( ( u_L + u_R ) + ( P_L - P_R )/RhoAs_PVRS ); Rho_As_PVRS = Rho_PVRS / As_PVRS; Rho_SL = L[0] + ( u_L - u_PVRS )*Rho_As_PVRS; Rho_SR = R[0] + ( u_PVRS - u_R )*Rho_As_PVRS; Rho_SL = FMAX( Rho_SL, MinDens ); Rho_SR = FMAX( Rho_SR, MinDens ); _P = ONE / P_PVRS; // see Eq. [9.8] in Toro 1999 for passive scalars Gamma_SL = EoS_DensPres2CSqr( Rho_SL, P_PVRS, L+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table )*Rho_SL*_P; Gamma_SR = EoS_DensPres2CSqr( Rho_SR, P_PVRS, R+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table )*Rho_SR*_P; # endif // EOS q_L = ( P_PVRS <= P_L ) ? ONE : SQRT( ONE + _TWO*( Gamma_SL + ONE )/Gamma_SL*( P_PVRS/P_L - ONE ) ); q_R = ( P_PVRS <= P_R ) ? ONE : SQRT( ONE + _TWO*( Gamma_SR + ONE )/Gamma_SR*( P_PVRS/P_R - ONE ) ); MaxV_L = u_L - Cf_L*q_L; // Cf=As for hydro MaxV_R = u_R + Cf_R*q_R; MaxV_L = FMIN( MaxV_L, ZERO ); MaxV_R = FMAX( MaxV_R, ZERO ); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(q_L) ) printf( "ERROR : invalid q_L (%14.7e) at file <%s>, line <%d>, function <%s>\n", q_L, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(q_R) ) printf( "ERROR : invalid q_R (%14.7e) at file <%s>, line <%d>, function <%s>\n", q_R, __FILE__, __LINE__, __FUNCTION__ ); # endif // 2-2c. use the min/max of the left and right eigenvalues # elif ( HLLE_WAVESPEED == HLL_WAVESPEED_DAVIS ) MaxV_L = FMIN( u_L-Cf_L, u_R-Cf_R ); MaxV_R = FMAX( u_L+Cf_L, u_R+Cf_R ); MaxV_L = FMIN( MaxV_L, ZERO ); MaxV_R = FMAX( MaxV_R, ZERO ); # else # error : ERROR : unsupported HLLE_WAVESPEED !! # endif // HLLE_WAVESPEED // 3. evaluate the left and right fluxes along the maximum wave speeds # ifdef MHD const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4, MAG_OFFSET+1, MAG_OFFSET+2 }; # else const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4 }; # endif real Flux_L[NCOMP_TOTAL_PLUS_MAG], Flux_R[NCOMP_TOTAL_PLUS_MAG]; // use NCOMP_TOTAL_PLUS_MAG for Hydro_Con2Flux() Hydro_Con2Flux( 0, Flux_L, L, MinPres, NULL, NULL, NULL, NULL, &P_L ); Hydro_Con2Flux( 0, Flux_R, R, MinPres, NULL, NULL, NULL, NULL, &P_R ); for (int v=0; v<NWAVE; v++) { Flux_L[ idx_wave[v] ] -= MaxV_L*L[ idx_wave[v] ]; Flux_R[ idx_wave[v] ] -= MaxV_R*R[ idx_wave[v] ]; } // 4. evaluate the HLLE fluxes // deal with the special case of MaxV_L=MaxV_R=0 //###REVISE: should it return zero flux due to symmetry? if ( MaxV_L == ZERO && MaxV_R == ZERO ) { for (int v=0; v<NWAVE; v++) Flux_Out[ idx_wave[v] ] = Flux_L[ idx_wave[v] ]; // assuming Flux_L=Flux_R } else { const real _MaxV_R_minus_L = ONE / ( MaxV_R - MaxV_L ); for (int v=0; v<NWAVE; v++) Flux_Out[ idx_wave[v] ] = _MaxV_R_minus_L*( MaxV_R*Flux_L[ idx_wave[v] ] - MaxV_L*Flux_R[ idx_wave[v] ] ); } // longitudinal magnetic flux is always zero # ifdef MHD Flux_Out[MAG_OFFSET] = ZERO; # endif // 5. evaluate the fluxes of passive scalars # if ( NCOMP_PASSIVE > 0 ) if ( Flux_Out[FLUX_DENS] >= ZERO ) { const real vx = Flux_Out[FLUX_DENS]*_RhoL; for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = L[v]*vx; } else { const real vx = Flux_Out[FLUX_DENS]*_RhoR; for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = R[v]*vx; } # endif // 7. restore the correct order Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET ); } // FUNCTION : Hydro_RiemannSolver_HLLE #endif // #if ( MODEL == HYDRO ) #endif // #ifndef __CUFLU_RIEMANNSOLVER_HLLE__
the_stack
#include <stdio.h> texture<float4, 1, cudaReadModeElementType> texNodeSize; texture<float4, 1, cudaReadModeElementType> texNodeCenter; texture<float4, 1, cudaReadModeElementType> texMultipole; texture<float4, 1, cudaReadModeElementType> texBody; __device__ int ngb_cnt(float3 pos_i, float h_i, float3 pos_j) { float3 dr = {pos_i.x - pos_j.x, pos_i.y - pos_j.y, pos_i.z - pos_j.z}; float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; return (ds2 <= h_i*h_i); } // __device__ bool split_node_ngb(float4 node_pos, // float4 group_pos) // { // float s = node_pos.w + group_pos.w; // // float3 dr = {fabs(group_pos.x - node_pos.x), // fabs(group_pos.y - node_pos.y), // fabs(group_pos.z - node_pos.z)}; // // return ((dr.x < s) && (dr.y < s) && (dr.z < s)); // } __device__ bool split_node_ngb(float4 nodeCenter, float4 nodeSize, float4 groupCenter, float4 groupSize) { //Compute the distance between the group and the cell float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x), fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y), fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)}; dr.x += fabs(dr.x); dr.x *= 0.5f; dr.y += fabs(dr.y); dr.y *= 0.5f; dr.z += fabs(dr.z); dr.z *= 0.5f; //Distance squared, no need to do sqrt since opening criteria has been squared float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; return (ds2 <= groupCenter.w); } #define TEXTURES #define OLDPREFIX #define DOGRAV template<int DIM2, int SHIFT> __device__ uint get_n_ngb(int DIM2x, int DIM2y, int tid, int tx, int ty, int body_i, float4 pos_i, real4 group_pos, uint2 node_begend, real4 *body_pos, int *shmem, int *lmem, int &ngb, int &direCount, volatile float4 *boxSizeInfo, float4 groupSize, volatile float4 *boxCenterInfo, float &ds2_min, float h_group) { ngb = -1; int n_ngb = 0; // float ds2_min = 1.0e10f; ds2_min = 1.0e10f; /*********** set necessary thread constants **********/ const int DIMx = 1 << DIM2x; const int DIMy = 1 << DIM2y; const int DIM = 1 << DIM2; const int offs = ty << DIM2x; /*********** shared memory distribution **********/ // begin, end, size // ----------------------- int *approx = (int*)&shmem [ 0]; // 0*DIM, 2*DIM, 2*DIM int *direct = (int*)&approx[ 2*DIM]; // 2*DIM, 3*DIM, 1*DIM int *nodes = (int*)&direct[ DIM]; // 3*DIM, 13*DIM, 10*DIM int *prefix = (int*)&nodes [10*DIM]; // 13*DIM, 15*DIM, 2*DIM int *body_list = (int* )&nodes [ DIM]; // 4*DIM, 8*DIM, 4*DIM float *sh_mass = (float* )&body_list[4*DIM]; // 8*DIM, 9*DIM, 1*DIM float3 *sh_pos = (float3*)&sh_mass [ DIM]; // 9*DIM, 12*DIM 3*DIM int *sh_jid = (int* )&sh_pos[DIM]; //Reduction at the end float *sh_ds2 = (float*)&shmem[DIM]; int *sh_ngb = (int* )&sh_ds2[DIM]; /*********** stack **********/ int *nstack = lmem; /*********** begin tree-walk **********/ int n_direct = 0; for (int root_node = node_begend.x; root_node < node_begend.y; root_node += DIM) { int n_nodes0 = min(node_begend.y - root_node, DIM); int n_stack0 = 0; int n_stack_pre = 0; { nstack[ACCS<SHIFT>(n_stack0)] = root_node + tid; n_stack0++; } /*********** walk each level **********/ while (n_nodes0 > 0) { int n_nodes1 = 0; int n_offset = 0; int n_stack1 = n_stack0; int c_stack0 = n_stack_pre; /*********** walk a level **********/ while(c_stack0 < n_stack0) { /*** **** --> fetch the list of nodes rom LMEM ***/ bool use_node = tid < n_nodes0; { prefix[tid] = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; } __syncthreads(); int node = prefix[min(tid, n_nodes0 - 1)]; if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug n_nodes0 -= DIM; } /*** **** --> process each of the nodes in the list in parallel ***/ #ifndef TEXTURES float4 nodeSize = get_float4(boxSizeInfo[node]); //Fetch the size of the box. Size.w = child info float4 node_pos = get_float4(boxCenterInfo[node]); //Fetch the center of the box. center.w = opening info #else float4 nodeSize = tex1Dfetch(texNodeSize, node); float4 node_pos = tex1Dfetch(texNodeCenter, node); #endif int node_data = __float_as_int(nodeSize.w); //TODO Fix This group_pos.w = h_group; //The looking radus //Check if a cell has to be opened bool split = split_node_ngb(node_pos, nodeSize, group_pos, groupSize); //Check if node should be split bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf // split = true; uint mask = BTEST((split && !leaf) && use_node); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero int child = node_data & 0x0FFFFFFF; //Index to the first child of the node int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has /*** **** --> calculate prefix ***/ int *prefix0 = &prefix[ 0]; int *prefix1 = &prefix[DIM]; #ifdef OLDPREFIX int n_total = calc_prefix<DIM2>(prefix, tid, nchild); prefix[tid] += n_offset - nchild; __syncthreads(); #else inclusive_scan_block<ADDOP<int>, int>(prefix, nchild, tid); // inclusive scan to compute memory offset of each child int n_total = prefix[blockDim.x - 1]; // fetch total number of children, i.e. offset of the last child -1 __syncthreads(); // thread barrier to make sure that warps completed their jobs prefix[tid] += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose __syncthreads(); // thread barrier #endif for (int i = n_offset; i < n_offset + n_total; i += DIM) //nullify part of the array that will be filled with children nodes[tid + i] = 0; //but do not touch those parts which has already been filled __syncthreads(); //Thread barrier to make sure all warps finished writing data bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split if (flag) nodes[prefix[tid]] = child; //Thread with the node that is about to be split __syncthreads(); //writes the first child in the array of nodes /*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/ if (flag && nodes[prefix[tid] + 1] == 0) nodes[prefix[tid] + 1] = child + 1; __syncthreads(); if (flag && nodes[prefix[tid] + 2] == 0) nodes[prefix[tid] + 2] = child + 2; __syncthreads(); if (flag && nodes[prefix[tid] + 3] == 0) nodes[prefix[tid] + 3] = child + 3; __syncthreads(); if (flag && nodes[prefix[tid] + 4] == 0) nodes[prefix[tid] + 4] = child + 4; __syncthreads(); if (flag && nodes[prefix[tid] + 5] == 0) nodes[prefix[tid] + 5] = child + 5; __syncthreads(); if (flag && nodes[prefix[tid] + 6] == 0) nodes[prefix[tid] + 6] = child + 6; __syncthreads(); if (flag && nodes[prefix[tid] + 7] == 0) nodes[prefix[tid] + 7] = child + 7; __syncthreads(); n_offset += n_total; //Increase the offset in the array by the number of newly added nodes /*** **** --> save list of nodes to LMEM ***/ /*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/ while(n_offset >= DIM) { n_offset -= DIM; const int offs1 = ACCS<SHIFT>(n_stack1); nstack[offs1] = nodes[n_offset + tid]; n_stack1++; n_nodes1 += DIM; if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) { //We overwrote our current stack direCount = -1; return 0; } } __syncthreads(); #if 1 /***********************************/ /****** DIRECT ******/ /***********************************/ int *sh_body = &approx[DIM]; flag = split && leaf && use_node; //flag = split + leaf + use_node int jbody = node_data & BODYMASK; //the first body in the leaf int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag body_list[tid] = direct[tid]; //copy list of bodies from previous pass to body_list sh_body [tid] = jbody; //store the leafs first body id into shared memory // step 1 #ifdef OLDPREFIX calc_prefix<DIM2>(prefix0, tid, flag); #else inclusive_scan_block<ADDOP<int>, int>(prefix0, (int)flag, tid); // inclusive scan on flags to construct array #endif if (flag) prefix1[prefix0[tid] - 1] = tid; //with tidś whose leaves have to be opened __syncthreads(); //thread barrier, make sure all warps completed the job // step 2 #ifdef OLDPREFIX int n_bodies = calc_prefix<DIM2>(prefix0, tid, nbody); #else inclusive_scan_block<ADDOP<int>, int>(prefix0, nbody, tid); // inclusive scan to compute memory offset for each body int n_bodies = prefix0[blockDim.x - 1]; //Total number of bides extract from the leaves __syncthreads(); // thread barrier to make sure that warps completed their jobs #endif direct [tid] = prefix0[tid]; //Store a copy of inclusive scan in direct prefix0[tid] -= nbody; //convert inclusive int oexclusive scan prefix0[tid] += 1; //add unity, since later prefix0[tid] == 0 used to check barrier int nl_pre = 0; //Number of leaves that have already been processed #define NJMAX (DIM*4) while (n_bodies > 0) { int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed //the amount of allocated shared memory // step 0 //nullify part of the body_list that will be filled with bodies for (int i = n_direct; i < n_direct + nb; i += DIM){ //from the leaves that are being processed body_list[i + tid] = 0; } __syncthreads(); //step 1: if (flag && (direct[tid] <= nb) && (prefix0[tid] > 0)) //make sure that the thread indeed carries a leaf body_list[n_direct + prefix0[tid] - 1] = 1; //whose bodies will be extracted __syncthreads(); //step 2: #ifdef OLDPREFIX int nl = calc_prefix<DIM2>(nb, &body_list[n_direct], tid); #else int nl = inclusive_scan_array<ADDOP<int>, int> // inclusive scan to compute number of leaves to process (&body_list[n_direct], nb, tid); // to make sure that there is enough shared memory for bodies #endif nb = direct[prefix1[nl_pre + nl - 1]]; // number of bodies stored in these leaves // step 3: for (int i = n_direct; i < n_direct + nb; i += DIM) { //segmented fill of the body_list int j = prefix1[nl_pre + body_list[i + tid] - 1]; // compute the first body in shared j-body array body_list[i + tid] = (i + tid - n_direct) - //add to the index of the first j-body in a child (prefix0[j] - 1) + sh_body[j]; //the index of the first child in body_list array } __syncthreads(); /************************************************** * example of what is accomplished in steps 0-4 * * --------------------------- * * step 0: body_list = 000000000000000000000 * * step 1: body_list = 100010001000000100100 * * step 2: body_list = 111122223333333444555 * * step 3: body_list = 012301230123456012012 * * assuming that sh_body[j] = 0 * ***************************************************/ n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted nl_pre += nl; //increase the number of leaves that where processed direct [tid] -= nb; //subtract the number of extracted bodies in this pass prefix0[tid] = max(prefix0[tid] - nb, 0); //same here, but do not let the number be negative (GT200 bug!?) n_direct += nb; //increase the number of bodies to be procssed while(n_direct >= DIM) { n_direct -= DIM; float4 posj = body_pos[body_list[n_direct + tid]]; // float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]); sh_pos [tid] = (float3){posj.x, posj.y, posj.z}; sh_jid [tid] = body_list[n_direct + tid]; __syncthreads(); #pragma unroll for (int j = 0; j < DIMx; j++) { //TODO should we check on selfGrav? int selfGrav = (body_i != sh_jid[offs + j]); // if (body_i != sh_jid[offs + j]) //If statement replaced by multiplication { direCount++; #ifdef DONGBCOUNT n_ngb += ngb_count(pos_i, h_i, sh_pos[offs + j]); #endif } }//End for j < DIMx __syncthreads(); }// end while n_direct >= DIM }// end while n_bodies > 0 direct[tid] = body_list[tid]; __syncthreads(); #endif } //end lvl n_nodes1 += n_offset; if (n_offset > 0) { nstack[ACCS<SHIFT>(n_stack1)] = nodes[tid]; n_stack1++; if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) { //We overwrote our current stack direCount = -1; return 0; } } __syncthreads(); /*** **** --> copy nodes1 to nodes0: done by reassigning the pointers ***/ n_nodes0 = n_nodes1; n_stack_pre = n_stack0; n_stack0 = n_stack1; }//end while levels }//end for if(n_direct > 0) { if (tid < n_direct) { float4 posj = body_pos[direct[tid]]; // float4 posj = tex1Dfetch(texBody, direct[tid]); sh_pos [tid] = (float3){posj.x, posj.y, posj.z}; sh_jid [tid] = direct[tid]; } else { sh_jid [tid] = -1; sh_pos [tid] = (float3){1.0e10f, 1.0e10f, 1.0e10f}; } __syncthreads(); #pragma unroll for (int j = 0; j < DIMx; j++) { if ((sh_jid[offs + j] >= 0)) { int selfGrav = (body_i != sh_jid[offs + j]); direCount++; #ifdef DONGBCOUNT n_ngb += ngb_count(pos_i, h_i, sh_pos[offs + j]); #endif } } __syncthreads(); } /*** **** --> reduce data between threads ***/ //Sum the interaction counters //and the number of neighbours sh_ds2[tid] = direCount; sh_ngb[tid] = n_ngb; __syncthreads(); if (ty == 0) { #pragma unroll for (int i = 1; i < DIMy; i++){ int idx = (i << DIM2x) + tx; direCount += sh_ds2[idx]; n_ngb += sh_ngb[idx]; } } __syncthreads(); return n_ngb; } extern "C" __global__ void __launch_bounds__(NTHREAD) dev_get_n_ngb(const int n_active_groups, int n_bodies, uint2 node_begend, real4 *body_pos, real4 *group_body_pos, int *ngb_out, int *active_inout, int2 *interactions, uint2 *group_list, float4 *boxSizeInfo, float4 *boxCenterInfo, int *MEM_BUF) { const int blockDim2 = NTHREAD2; __shared__ int shmem[15*(1 << blockDim2)]; // __shared__ int shmem[24*(1 << blockDim2)]; is possible on FERMI // int lmem[LMEM_STACK_SIZE]; /*********** check if this block is linked to a leaf **********/ int bid = gridDim.x * blockIdx.y + blockIdx.x; while(true) { if(threadIdx.x == 0) { bid = atomicAdd(&active_inout[n_bodies], 1); shmem[0] = bid; } __syncthreads(); bid = shmem[0]; if (bid >= n_active_groups) return; int tid = threadIdx.y * blockDim.x + threadIdx.x; int grpOffset = 0; // volatile int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x + threadIdx.x*LMEM_STACK_SIZE]; // int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x + threadIdx.x*LMEM_STACK_SIZE]; int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x]; /*********** set necessary thread constants **********/ uint2 grpInfo = group_list[bid]; uint body_i = grpInfo.x; uint nb_i = (grpInfo.y - grpInfo.x) + 1; int DIM2x = 0; while (((nb_i - 1) >> DIM2x) > 0) DIM2x++; DIM2x = max(DIM2x,4); int DIM2y = blockDim2 - DIM2x; int tx = tid & ((1 << DIM2x) - 1); int ty = tid >> DIM2x; body_i += tx%nb_i; //float4 pos_i = tex1Dfetch(bodies_pos_ref, body_i); // texture read: 4 floats float4 pos_i = group_body_pos[body_i]; real4 group_pos; real4 curGroupSize; computeGroupProps(group_pos, curGroupSize, pos_i, shmem); int ngb_i; uint n_ngb; float ds2; float h_group = 0; int direCount = 0; n_ngb = get_n_ngb<blockDim2, 0>( DIM2x, DIM2y, tid, tx, ty, body_i, pos_i, group_pos, node_begend, body_pos, shmem, lmem, ngb_i, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,ds2, h_group); if(direCount < 0) { //Try to get access to the big stack, only one block per time is allowed if(threadIdx.x == 0) { int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep int waitCounter = 0; while(res != 0) { //Sleep for(int i=0; i < (1024); i++) { waitCounter += 1; } //Test again shmem[0] = waitCounter; res = atomicExch(&active_inout[n_bodies+1], 1); } } __syncthreads(); lmem = &MEM_BUF[gridDim.x*LMEM_STACK_SIZE*blockDim.x]; //Use the extra large buffer direCount = 0; n_ngb = get_n_ngb<blockDim2, 8>(DIM2x, DIM2y, tid, tx, ty, body_i, pos_i, group_pos, node_begend, body_pos, shmem, lmem, ngb_i, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,ds2, h_group); lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x]; //Back to normal location if(threadIdx.x == 0) { atomicExch(&active_inout[n_bodies+1], 0); //Release the lock } }//end if apprCount < 0 if (tid < nb_i) { ngb_out [body_i] = ngb_i; active_inout[body_i] = 1; interactions[body_i].x = n_ngb; interactions[body_i].y = direCount ; } } //end while }
the_stack
using namespace std; typedef uint8_t uint8; typedef unsigned int uint32; typedef unsigned long long int uint64; #define STREAM_BLOCK 16 #define BLOCK_SIZE 32 #define BLOCK_D_SIZE 64 #define INTEGRAL_BLOCK_SIZE 8 #define XDIM_MAX_THREADS 1024 #define SHARED_MEMORY 49152 #define XDIM_H_THREADS 512 #define XDIM_Q_THREADS 256 __global__ void CensusTransformKernel(const float* image, uint64* census ,int rows, int cols, int wsize, int chunks ){ const int shift = blockIdx.x*wsize; extern __shared__ float cens_slice[]; int Row = blockIdx.y; int Col = blockIdx.x*blockDim.x+ threadIdx.x-shift; int wc = wsize/2; float center; uint pos = 0; uint64 cens=0; int ch=0; if(Col < cols-wsize && Row< rows-wsize) center = image[(Row+wc)*cols + Col+wc ]; if(Col < cols && Row< rows-wsize){ for(int i=0; i<wsize; i++){ cens_slice[threadIdx.x] = image[(Row+i)*cols + Col ]; __syncthreads(); if(threadIdx.x < blockDim.x-wsize && Col<cols-wsize){ for(int ww=0; ww<wsize;ww++){ if( center < cens_slice[threadIdx.x+ww]) cens ^= 1UL << pos; pos++; if( (pos & 63) == 63 ){ census[ ((Row+wc)*cols + (Col+wc))*chunks+ch ] = cens; ch++; cens=0; pos=0; } } } __syncthreads(); } if(threadIdx.x < blockDim.x-wsize && ch<chunks){ census[ ((Row+wc)*cols + (Col+wc))*chunks+ch] = cens; } } } __global__ void CensusSADKernel(uint64* censusl, uint64* censusr, float* cost, int rows, int cols, int ndisp, int wsize,int maxcost,int chunks,int sm_offset ){ extern __shared__ uint64 rc_sm[]; const int Row = blockIdx.y; const int Col =blockIdx.x*blockDim.x + threadIdx.x; int wc = wsize/2; uint64* lbs = &rc_sm[sm_offset]; int threaddispl = 0; if(blockIdx.x >0){ threaddispl=ndisp; } float rp = ceil( (float)ndisp/blockDim.x ); for(int b=0; b<rp; b++){ if(blockIdx.x > 0 && threadIdx.x < ndisp && (int)(Col -(ndisp-b*blockDim.x))>=0 ){ for (int ch=0; ch< chunks; ch++){ rc_sm[(threadIdx.x+b*blockDim.x)*chunks+ch] = censusr[ ((Row+wc)*cols + (Col -(ndisp-b*blockDim.x)+wc))*chunks +ch ]; } } } __syncthreads(); if(Row < rows-wsize && Col < cols-wsize){ const int index = ((Row+wc)*cols+ (Col+wc))*chunks; for(int ch=0; ch< chunks; ch++){ lbs[threadIdx.x*chunks+ch] = censusl[index+ch]; rc_sm[(threaddispl+ threadIdx.x )*chunks+ch] = censusr[index+ch ]; } __syncthreads(); for (int d=0; d< ndisp; d++){ const int dindex = threaddispl+threadIdx.x-d; if(Col < cols-wsize && dindex >=0 && (int)Col-d>=0){ float sum =0; for(int ch=0; ch<chunks; ch++){ uint64 r = lbs[ threadIdx.x*chunks+ch ]^rc_sm[dindex*chunks + ch ]; sum +=(float)__popcll(r); } cost[d*rows*cols+(Row+wc)*cols + (Col+wc)] = sum; } } } } __global__ void CensusTransformKernelgen(const float* image, uint64* census ,int rows, int cols, int wsize, int chunks ){ const int shift = blockIdx.x*wsize; extern __shared__ float cens_slice[]; int Row = blockIdx.y; int Col = blockIdx.x*blockDim.x+ threadIdx.x-shift; int wc = wsize/2; float center; uint pos = 0; uint64 cens=0; int ch=0; if(Col < cols-wsize && Row< rows-wsize){ center = image[(Row+wc)*cols + Col+wc ]; } __syncthreads(); if(Col < cols && Row< rows-wsize){ for(int i=0; i<wsize; i++){ cens_slice[threadIdx.x] = image[(Row+i)*cols + Col ]; __syncthreads(); if(threadIdx.x < blockDim.x-wsize){ for(int ww=0; ww<wsize;ww++){ if( center < cens_slice[threadIdx.x+ww]) cens ^= 1UL << pos; pos++; if( (pos & 63) == 63 ){ census[ (ch*rows+ (Row+wc))*cols + (Col+wc) ] = cens; ch++; cens=0; pos=0; } } } __syncthreads(); } if(threadIdx.x < blockDim.x-wsize){ census[ (ch*rows+ (Row+wc))*cols + (Col+wc) ] = cens; } } } __global__ void CensusSADKernelgen(uint64* censusl, uint64* censusr, float* cost, int rows, int cols, int ndisp, int wsize,int maxcost,int chunks ){ extern __shared__ uint64 rc_sm[]; const int Row = blockIdx.y; const int Col =blockIdx.x*blockDim.x + threadIdx.x; int wc = wsize/2; uint64 lbs; int threaddispl = 0; if(blockIdx.x >0){ threaddispl=ndisp; } float rp = ceil( (float)ndisp/blockDim.x ); for(int ch=0; ch<chunks;ch++){ for(int b=0; b<rp; b++){ if(blockIdx.x > 0 && threadIdx.x < ndisp && (int)(Col -(ndisp-b*blockDim.x)) >=0){ rc_sm[(threadIdx.x+b*blockDim.x)] = censusr[ (ch*rows + (Row+wc))*cols + (Col -(ndisp-b*blockDim.x)+wc) ]; } } if(Row < rows-wsize && Col < cols-wsize){ const int index = (ch*rows + (Row+wc))*cols + (Col+wc); lbs = censusl[index]; rc_sm[threaddispl+ threadIdx.x] = censusr[index ]; } __syncthreads(); if(Row < rows-wsize && Col < cols-wsize){ for (int d=0; d< ndisp; d++){ const int dind = threaddispl+threadIdx.x-d; if(dind >0){ cost[(d*rows+(Row+wc))*cols + (Col+wc)] -= 64 - (float)__popcll(lbs^rc_sm[dind]); } } } __syncthreads(); } } __global__ void inti_cost( float* cost, int rows, int cols, int ndisp,float maxcost ){ int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x; if( Row < rows && Col < cols){ for(int i=0; i<ndisp; i++){ cost[ i*rows*cols+Row*cols+Col ] = maxcost; } } } void usage(void){ std::cout << "Census genmeric CUDA implementation" << std::endl; std::cout << "Arguments" << std::endl; std::cout << "-l:\t\t Left image | File containing names of the left images" << std::endl; std::cout << "-r:\t\t Right image | File containing the names of the right images" << std::endl; std::cout << "-ndisp:\t\t Number of Disparities" << std::endl; std::cout << "-wsize:\t\t Window size" << std::endl; std::cout << "-dopost:\t Default false. If set, activates sgm cost optimization" << std::endl; std::cout << "-list:\t\t Default is single file. If set, left and right files should be lists of images." << std::endl; std::cout << "-out:\t\t Output directory for disparity images." << std::endl; std::cout << "-out_type:\t Output image type. Supports pgm|pfm|png|disp(uint16 png format)." << std::endl; std::cout << "-postconf:\t Optional configuration file for post-processing." << std::endl; std::cout << "-h:\t\t Prints this help" << std::endl; } int main(int argc, char* argv[]){ string leftfile; string rightfile; string out=string("."); string out_t=string("disp"); int wsize=9; int ndisp=256; bool post=false; bool single=true; int argsassigned = 0; int required=0; postparams params; //sgm params params.pi1=30; params.pi2=500; params.tau_so=1; params.alpha1=2; params.sgm_q1=3; params.sgm_q2=2; params.alpha2=6; params.sigma = 5.99; params.kernel_size=5; int direction =-1; for(int i=0; i<argc; i++){ if( !strcmp(argv[i], "-l") ){ leftfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-r") ){ rightfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-ndisp") ){ ndisp= atoi(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-wsize") ){ wsize= atoi(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i], "-dopost") ){ post= true; argsassigned++; }else if(!strcmp(argv[i],"-list")){ single=false; argsassigned++; }else if(!strcmp(argv[i],"-out")){ out=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-out_type")){ out_t=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-postconf")){ parseConf(params ,string(argv[++i])); argsassigned++; }else if(!strcmp(argv[i],"-h")){ usage(); return 0; } } if(argsassigned == 0){ usage(); return 0; } if(argsassigned ==1){ leftfile = string("../../leftimg.txt"); rightfile = string("../../rightimg.txt"); } else if( required < 4 ){ usage(); return 0; } std::vector<string> limg; std::vector<string> rimg; if (single){ limg.push_back(leftfile); rimg.push_back(rightfile); }else{ limg = getImages(leftfile); rimg = getImages(rightfile); } imgio* imgutil = new imgio(); imgutil->read_image_meta(limg[0].c_str()); //######################### Allocate memory on the device ###########################################// float* imgl; size_t ibytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &imgl, ibytes ); float* imgr; cudaMallocHost( (void**) &imgr, ibytes ); float* cost_d; size_t bytes = imgutil->getWidth()*imgutil->getHeight()*ndisp*sizeof(float); cudaMalloc( (void**) &cost_d, bytes ); float* post_cost_d; cudaMalloc( (void**) &post_cost_d, bytes ); int vecsize = wsize*wsize; if(vecsize%64 > 0) vecsize += 64-(vecsize&63); int tchuncks = vecsize/64; float maxcost = tchuncks*64; uint64 *census_l_d; cudaMalloc(&census_l_d, imgutil->getWidth()*imgutil->getHeight()*tchuncks*sizeof(uint64)); cudaMemsetAsync(census_l_d, 0,imgutil->getWidth()*imgutil->getHeight()*tchuncks*sizeof(uint64),0); uint64 *census_r_d; cudaMalloc(&census_r_d, imgutil->getWidth()*imgutil->getHeight()*tchuncks*sizeof(uint64)); cudaMemsetAsync(census_r_d, 0,imgutil->getWidth()*imgutil->getHeight()*tchuncks*sizeof(uint64),0); float* disp_h; size_t dbytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &disp_h, dbytes ); float * disp_d; cudaMalloc(&disp_d, dbytes); cudaMemsetAsync(disp_d, 0,imgutil->getWidth()*imgutil->getHeight()*sizeof(float),0); float * disp_tmp; cudaMalloc(&disp_tmp, dbytes); cudaMemsetAsync(disp_tmp, 0,imgutil->getWidth()*imgutil->getHeight()*sizeof(float),0); float* imgl_d; cudaMalloc(&imgl_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); float* imgr_d; cudaMalloc(&imgr_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); dim3 swapBlock(BLOCK_D_SIZE,16,1); dim3 swapGrid(ceil((float)imgutil->getWidth()*imgutil->getHeight()/BLOCK_D_SIZE),ceil((float) ndisp/BLOCK_D_SIZE )); dim3 dimBlockCens(XDIM_MAX_THREADS); float blockx = (float)imgutil->getWidth() / XDIM_MAX_THREADS; dim3 dimGridCens(ceil((float) blockx) + (blockx*wsize)/XDIM_MAX_THREADS ,imgutil->getHeight()-wsize); dim3 dimBlock(XDIM_Q_THREADS); dim3 dimGrid(ceil((float)imgutil->getWidth() /XDIM_Q_THREADS),imgutil->getHeight()-wsize); dim3 argBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 argGrid(ceil((float) imgutil->getWidth() / BLOCK_SIZE),ceil( (float)imgutil->getHeight()/ BLOCK_SIZE)); int width = imgutil->getWidth(); int height = imgutil->getHeight(); int size1 = height*ndisp; int size2 = width*ndisp; float * tmp_d; cudaMalloc(&tmp_d, width*ndisp*sizeof(float)); cudaMemsetAsync(tmp_d,0 , width*ndisp*sizeof(float),0); float* left_cross; cudaMalloc(&left_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(left_cross,0 , 4*height*width*sizeof(float),0); float* right_cross; cudaMalloc(&right_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(right_cross,0 , 4*height*width*sizeof(float),0); int kr = ceil(params.sigma*3); int ks = kr*2+1; float * kernel = (float*)calloc(ks*ks,sizeof(float)); for (int i=0; i<ks; i++){ for(int j=0; j<ks; j++){ int y= (i-1)-kr; int x= (j-1)-kr; kernel[i*ks+j] = exp( -(x*x+y*y)/(2*params.sigma*params.sigma) ); } } float *kernel_d; cudaMalloc(&kernel_d, ks*ks*sizeof(float)); cudaMemcpy( kernel_d, kernel, ks*ks*sizeof(float), cudaMemcpyHostToDevice); cudaStream_t stream1; cudaStream_t stream2; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); //#######################################################################################################################// for(size_t i=0; i<limg.size(); i++){ imgutil->read_image(limg[i],imgl); imgutil->read_image(rimg[i],imgr); inti_cost<<< argGrid, argBlock,0,stream1 >>>( cost_d, height,width,ndisp,maxcost ); cudaMemcpyAsync( imgl_d, imgl, width*height*sizeof(float), cudaMemcpyHostToDevice,stream1); cudaMemcpyAsync( imgr_d, imgr, width*height*sizeof(float), cudaMemcpyHostToDevice,stream2); if( ((2*XDIM_Q_THREADS+ndisp)*tchuncks)*sizeof(uint64)< SHARED_MEMORY){ CensusTransformKernel<<<dimGridCens, dimBlockCens,XDIM_MAX_THREADS*sizeof(float)>>>(imgl_d,census_l_d,height, width,wsize,tchuncks); CensusTransformKernel<<<dimGridCens, dimBlockCens,XDIM_MAX_THREADS*sizeof(float)>>>(imgr_d,census_r_d,height, width,wsize,tchuncks); CensusSADKernel<<<dimGrid, dimBlock,((2*XDIM_Q_THREADS+ndisp)*tchuncks)*sizeof(uint64)>>>(census_l_d,census_r_d,cost_d,height, width,ndisp ,wsize,maxcost,tchuncks,((XDIM_Q_THREADS+ndisp)*tchuncks)); }else{ //Generic no limit. slower CensusTransformKernelgen<<<dimGridCens, dimBlockCens,XDIM_MAX_THREADS*sizeof(float)>>>(imgl_d,census_l_d,height, width,wsize,tchuncks); CensusTransformKernelgen<<<dimGridCens, dimBlockCens,XDIM_MAX_THREADS*sizeof(float)>>>(imgr_d,census_r_d,height, width,wsize,tchuncks); CensusSADKernelgen<<<dimGrid, dimBlock,(XDIM_Q_THREADS+ndisp)*sizeof(uint64)>>>(census_l_d,census_r_d,cost_d,height, width,ndisp,wsize,maxcost,tchuncks); } if(post){ swap_axis<<< swapGrid, swapBlock >>>( cost_d, post_cost_d,height,width,ndisp ); cudaMemset(cost_d,0 , height*width*ndisp*sizeof(float)); for (int step = 0; step < width; step++) { sgm_loop<0><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < width; step++) { sgm_loop<1><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<2><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<3><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } argmin<<<argGrid, argBlock>>>( disp_d, cost_d, height, width,ndisp ); subpixel_enchancement<<<(height*width - 1) / TB + 1, TB>>>( disp_d, cost_d, disp_tmp, height*width, height*width, ndisp); median2d<<<(height*width - 1) / TB + 1, TB>>>( disp_tmp, disp_d, height*width, height, width, params.kernel_size / 2); mean2d<<<(height*width - 1) / TB + 1, TB>>>( disp_d, kernel_d, disp_tmp, height*width, ks / 2, height, width, params.alpha2); }else{ argmin_d<<<argGrid, argBlock>>>( disp_tmp, cost_d, height, width,ndisp ); } cudaMemcpy( disp_h, disp_tmp, height*width*sizeof(float), cudaMemcpyDeviceToHost ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); imgutil->write_image(out + string("/") +limg[i].substr(limg[i].find_last_of("/")+1) ,disp_h,out_t); } cudaFreeHost(imgl); cudaFreeHost(imgr); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaFree(left_cross); cudaFree(right_cross); cudaFree(tmp_d); cudaFreeHost(imgl); cudaFreeHost(imgr); cudaFreeHost(disp_h); cudaFree(disp_d); cudaFree(disp_tmp); cudaFree(imgl_d); cudaFree(imgr_d); cudaFree(cost_d); cudaFree(post_cost_d); cudaFree(census_l_d); cudaFree(census_r_d); delete imgutil; return 0; }
the_stack
using namespace FW; //------------------------------------------------------------------------ // Global variables. //------------------------------------------------------------------------ __constant__ int4 c_input[(sizeof(AmbientInput) + sizeof(int4) - 1) / sizeof(int4)]; __device__ S32 g_warpCounter; //------------------------------------------------------------------------ // Common helper functions. //------------------------------------------------------------------------ __device__ inline const AmbientInput& getInput(void) { return *(const AmbientInput*)c_input; } __device__ inline void updateCounter(PerfCounter counter, int amount = 1) { } __device__ inline void updateCountersForGlobalAccess (int sizeLog2, S32* addr) { } __device__ inline void updateCountersForLocalAccess (int sizeLog2, int id) { } //------------------------------------------------------------------------ // Utility routines. //------------------------------------------------------------------------ #include "Util.inl" #include "Raycast.inl" #include "AttribLookup.inl" //------------------------------------------------------------------------ // Private definitions. //------------------------------------------------------------------------ #define F3COPY(a,b) a.x=b.x, a.y=b.y, a.z=b.z struct Aux { float rx; float ry; float3 normal; float3 orig; float pad; }; // 2 times S16 packed into a 32-bit value __constant__ S32 c_aotable[256] = { 0x9029cc81,0x92f8c801,0xa31fb481,0xa5eeb001,0xac40c201,0xae5ca701,0xb4aecb01, 0xb6c9b901,0xbb009501,0xbc68a881,0xbf37bc01,0xc2bac081,0xc7a4b301,0xc90c9c81, 0xcbdb9801,0xd012a101,0xd4488f01,0xd5b0af01,0xd664ca01,0xd87fb801,0xdcb69101, 0xe0edb501,0xe2549881,0xe73fc701,0xe95aa601,0xed918b01,0xeef9bc81,0xf1c8a001, 0xf54bc881,0xf5fe8e01,0xfa35bb01,0xfb9d8081,0x8384f081,0x8654ec01,0x8a8ada01, 0x8ec1e301,0x972ffe01,0x9b65d701,0x9ccdef01,0x9f9cf801,0xa3d3d101,0xa80af501, 0xa971d881,0xb077e601,0xb615fc81,0xb8e5e001,0xbd1bce01,0xc152fb01,0xc589d401, 0xc9c0e901,0xcdf6dd01,0xcf5ee481,0xd22df201,0xd5560000,0xda9bfd01,0xdc02d481, 0xded1d001,0xe308ee01,0xe8a6e081,0xeb76f401,0xefacd901,0xf3e3e501,0xf81adc01, 0xfc51fa01,0xfe6c9401,0x02a2a901,0x06d89d01,0x0840a481,0x08f4c101,0x0b0fb201, 0x0f469a01,0x1161c401,0x137da301,0x14e48c81,0x17b38801,0x1beabe01,0x20219701, 0x2189b081,0x2458ac01,0x27dbc281,0x2aaac001,0x2cc5a501,0x30fc9c01,0x3533ba01, 0x39699301,0x3ad1ac81,0x3b85c901,0x3da0a801,0x4123c481,0x460eb701,0x47759f01, 0x4c60c301,0x4e7bb101,0x541ab881,0x5f56ab01,0x6e16c601,0x0086d301,0x01eeec81, 0x04bde801,0x0d2bf701,0x0e92df01,0x1598f101,0x19cfcd01,0x1b36f881,0x1e06e201, 0x223cd601,0x2673eb01,0x2ee1f601,0x3317db01,0x347fff01,0x374ee401,0x3fbced01, 0x43f2d201,0x4829ea01,0x4dc7f481,0x5097f001,0x54cdde01,0x5904e701,0x5a6cd081, 0x5d3bcc01,0x6172f901,0x65a8d501,0x6710e881,0x69dffc01,0x724df301,0x73b4dc81, 0x7683d801,0x7abae101,0x810f09ff,0x86ae147f,0x897d0fff,0x8db32dff,0x91ea06ff, 0x9352207f,0x9a5818ff,0x9e8e24ff,0x9ff6087f,0xa2c51bff,0xab3312ff,0xac9a2c7f, 0xaf6927ff,0xb3a000ff,0xb93f1eff,0xbc0e03ff,0xc04430ff,0xc47b0cff,0xc8b221ff, 0xcce915ff,0xd11f2aff,0xd287027f,0xd6be227f,0xddc41aff,0xe1fa23ff,0xe63108ff, 0xea682cff,0xebcf047f,0xee9f11ff,0xf2d529ff,0xf70c02ff,0xfb432fff,0x962133ff, 0x9c733fff,0xa6fc39ff,0xad4e48ff,0xb2ec447f,0xb5bc51ff,0xb7d736ff,0xb9f269ff, 0xbe2942ff,0xc2606fff,0xc5e3387f,0xc6975dff,0xcacd66ff,0xcc35507f,0xcf044bff, 0xd77244ff,0xd8d9707f,0xd98d35ff,0xdba86bff,0xdf2b3eff,0xdfdf59ff,0xe41662ff, 0xe57d4c7f,0xe84d47ff,0xec837dff,0xf0ba56ff,0xf2226eff,0xf4f177ff,0xf874347f, 0xf92850ff,0xfd5e74ff,0xfec6587f,0xff7a1dff,0x03af26ff,0x0517107f,0x07e60bff, 0x105414ff,0x11bb287f,0x18c105ff,0x1cf832ff,0x1e5f1c7f,0x212f17ff,0x256520ff, 0x299c0eff,0x2b042eff,0x320a10ff,0x37a8187f,0x3a7701ff,0x3eae25ff,0x42e50aff, 0x471b1fff,0x4b520dff,0x50f0007f,0x53c013ff,0x57f628ff,0x5c2d1cff,0x5d95247f, 0x606431ff,0x649b19ff,0x68d122ff,0x6a390c7f,0x6d0807ff,0x757616ff,0x7de304ff, 0x019441ff,0x05cb65ff,0x0a024aff,0x0b697c7f,0x0c1d38ff,0x0e385fff,0x126f4dff, 0x148a3bff,0x16a67aff,0x180d407f,0x1add53ff,0x1f1368ff,0x234a5cff,0x24b2647f, 0x278171ff,0x2bb849ff,0x2dd337ff,0x3156547f,0x34254fff,0x364034ff,0x385c6dff, 0x3c9346ff,0x3dfa607f,0x444c3c7f,0x450058ff,0x493764ff,0x4a9e487f,0x4d6e5bff, 0x4f893aff,0x55db52ff,0x5e4940ff,0x66b643ff }; //------------------------------------------------------------------------ extern "C" __global__ void ambientKernel(void) { const AmbientInput& input = getInput(); __shared__ Aux auxbuf[AMBK_BLOCK_WIDTH * AMBK_BLOCK_HEIGHT]; volatile S32& aux0 = *((S32*)&auxbuf[AMBK_BLOCK_WIDTH * threadIdx.y]); volatile Aux& aux = auxbuf[threadIdx.x + AMBK_BLOCK_WIDTH * threadIdx.y]; const OctreeMatrices& mtx = getInput().octreeMatrices; // fetch first warp of work if (threadIdx.x == 0) aux0 = atomicAdd(&g_warpCounter, 1); int warp = aux0; if (warp >= input.numRequests) return; // terminate before starting at all // notice that work is being done in this warp slot ((S32*)input.activeWarps)[threadIdx.y + blockIdx.x * AMBK_BLOCK_HEIGHT] = 1; CastResult castRes; CastStack stack; // main warp loop for (;;) { // request index int ridx = warp; if (ridx >= input.numRequests) return; { AmbientRequest& req = ((AmbientRequest*)input.requestPtr)[ridx]; // construct node position const U64* node = (const U64*)getInput().rootNode; S32 stackPtr = CAST_STACK_DEPTH - 1; int rlevel = req.level; int cidx = 0; // find the node do { // determine child idx U32 smask = 1 << stackPtr; cidx = 0; if (req.pos.x & smask) cidx |= 1; if (req.pos.y & smask) cidx |= 2; if (req.pos.z & smask) cidx |= 4; if (stackPtr <= rlevel) break; // move down U32 nodeData = *(const U32*)node; S32 bits = nodeData << (8-cidx); stack.write(stackPtr, (S32*)node, 0.0f); stackPtr--; int ofs = nodeData >> 17; node += (nodeData & 0x10000) ? *(const S32*)(node + ofs) : ofs; node += popc8(bits & 0xFF); } while (stackPtr >= 0); // always true // construct request position in float float3 rpos; rpos.x = __int_as_float(req.pos.x + 0x3f800000u); rpos.y = __int_as_float(req.pos.y + 0x3f800000u); rpos.z = __int_as_float(req.pos.z + 0x3f800000u); // set up position struct castRes.node = (S32*)node; castRes.stackPtr = stackPtr; castRes.childIdx = cidx; castRes.pos = rpos; float3 orig = rpos; // sample color and normal at request position, adjust ray origin F32 vsize = __int_as_float((127 - ::min(CAST_STACK_DEPTH - rlevel, 13)) << 23); float4 color; // dummy float3 normal; lookupVoxelColorNormal(color, normal, castRes, stack); normal = normalize(normal); float nlen = 1.f / fmaxf3(fabsf(normal.x), fabsf(normal.y), fabsf(normal.z)); orig += normal * (vsize * nlen); F3COPY(aux.normal, normal); F3COPY(aux.orig, orig); // construct 2d rotation for samples U32 ix = __float_as_int(rpos.x); U32 iy = __float_as_int(rpos.y); U32 iz = __float_as_int(rpos.z); jenkinsMix(ix, iy, iz); ix ^= req.level; float rx, ry, rlen; do { jenkinsMix(ix, iy, iz); rx = (float)ix / (4.f * (1u << 30)) * 2.f - 1.f; ry = (float)iy / (4.f * (1u << 30)) * 2.f - 1.f; rlen = rx*rx+ry*ry; } while (rlen > 1.f); rlen = rsqrtf(rlen); aux.rx = rx * rlen; aux.ry = ry * rlen; } // construct ray Ray ray; F3COPY(ray.orig, aux.orig); ray.orig_sz = 0.f; ray.dir_sz = 0.f; // light vector float3 L = { -.4f, .5f, -.3f }; L = normalize(L); // cast the ao rays float3 illum; #ifdef FLIP_NORMALS for (int pass = 0; pass < 2; pass++) #endif { illum = make_float3(0.f, 0.f, 0.f); for (int i=threadIdx.x; i < input.raysPerNode; i += 32) { // use ao table S32 ao32 = c_aotable[i]; float sy = (float)ao32 * __int_as_float(0x30000000); ao32 <<= 16; float sx = (float)ao32 * __int_as_float(0x30000000); // rotate in 2d float x = aux.rx*sx + aux.ry*sy; float y = aux.ry*sx - aux.rx*sy; // construct basis for normal float3 normal; F3COPY(normal, aux.normal); float3 b1 = normalize(perpendicular(normal)); float3 b2 = cross(normal, b1); // set ray direction float z = sqrtf(fabsf(1.f - x*x - y*y)); ray.dir = x*b1 + y*b2 + z*normal; ray.dir *= input.rayLength; #ifdef FLIP_NORMALS if (pass == 1) ray.dir *= -1.0f; #endif // cast the ray CastResult castResRay; CastStack stackRay; castRay(castResRay, stackRay, ray); float ill = smoothstep(castResRay.t * 2.f - 1.f); // taper off in last 50% illum.x += ill; illum.y += ill; illum.z += ill; } // calculate result illum *= (1.f / input.raysPerNode); // sum over warp F3COPY(aux.orig, illum); if (!(threadIdx.x & 1)) aux.orig.x+=(&aux+ 1)->orig.x,aux.orig.y+=(&aux+ 1)->orig.y,aux.orig.z+=(&aux+ 1)->orig.z; if (!(threadIdx.x & 2)) aux.orig.x+=(&aux+ 2)->orig.x,aux.orig.y+=(&aux+ 2)->orig.y,aux.orig.z+=(&aux+ 2)->orig.z; if (!(threadIdx.x & 4)) aux.orig.x+=(&aux+ 4)->orig.x,aux.orig.y+=(&aux+ 4)->orig.y,aux.orig.z+=(&aux+ 4)->orig.z; if (!(threadIdx.x & 8)) aux.orig.x+=(&aux+ 8)->orig.x,aux.orig.y+=(&aux+ 8)->orig.y,aux.orig.z+=(&aux+ 8)->orig.z; if (!(threadIdx.x & 16)) aux.orig.x+=(&aux+16)->orig.x,aux.orig.y+=(&aux+16)->orig.y,aux.orig.z+=(&aux+16)->orig.z; #ifdef FLIP_NORMALS if (auxbuf[AMBK_BLOCK_WIDTH * threadIdx.y].orig.x >= 0.1f) break; AmbientRequest& req = ((AmbientRequest*)input.requestPtr)[ridx]; ray.orig.x = __int_as_float(req.pos.x + 0x3f800000u) * 2.0f - ray.orig.x; ray.orig.y = __int_as_float(req.pos.y + 0x3f800000u) * 2.0f - ray.orig.y; ray.orig.z = __int_as_float(req.pos.z + 0x3f800000u) * 2.0f - ray.orig.z; #endif } // write result if (threadIdx.x == 0) { AmbientResult& res = ((AmbientResult*)input.resultPtr)[ridx]; F3COPY(res.ao, aux.orig); } // fetch more work if (threadIdx.x == 0) aux0 = atomicAdd(&g_warpCounter, 1); warp = aux0; } } //------------------------------------------------------------------------
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/color.hpp" #include "cvt_color_internal.h" namespace cv { namespace cuda { namespace device { OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_x = 8 }; enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, traits) \ void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ cv::cuda::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \ } #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_CUDA_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra) OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra) #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F #undef OPENCV_CUDA_IMPLEMENT_CVTCOLOR_8U32F_FULL }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
the_stack
static inline void THNN_(VolumetricAveragePooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode) { int inputSlices; int inputTime; int inputHeight; int inputWidth; int ndim = input->nDimension; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->nDimension == 5) { dimN++; dimt++; dimh++; dimw++; } if (THCTensor_(nDimension)(state, input) == 4) { THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); /* sizes */ inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (THCTensor_(nDimension)(state, input) == 5) { THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); /* sizes */ inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } else { THArgCheck(false, 2, "4D or 5D tensor expected, but got: %d", input->nDimension); } // The second argument is the index of padH. THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11, "pad should not be greater than half of kernel size, but got " "padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d", padT, padW, padH, kT, kW, kH); int outputTime; int outputHeight; int outputWidth; if (ceil_mode) { outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } else { outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } if (padT || padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimN, inputSlices); THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } } void THNN_(VolumetricAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int dimt = 1; int dimh = 2; int dimw = 3; if (input->nDimension == 5) { dimt++; dimh++; dimw++; } THNN_(VolumetricAveragePooling_shapeCheck) (state, input, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); if (THCTensor_(nDimension)(state, input) == 4) { /* sizes */ batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); } else if (THCTensor_(nDimension)(state, input) == 5) { /* sizes */ batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); } int outputTime; int outputHeight; int outputWidth; if (ceil_mode) { outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } else { outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1; outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1; outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1; } if (padT || padH || padW) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (input->nDimension == 4) /* 4D */ { /* resize output */ THCTensor_(resize4d)(state, output, inputSlices, outputTime, outputHeight, outputWidth); } else /* 5D */ { THCTensor_(resize5d)(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); // Collapse batch and feature dimensions THCDeviceTensor<real, 4> cudaInput; THCDeviceTensor<real, 4> cudaOutput; if (THCTensor_(nDimension)(state, input) == 4) { cudaInput = toDeviceTensor<real, 4>(state, input); cudaOutput = toDeviceTensor<real, 4>(state, output); } else { cudaInput = toDeviceTensor<real, 5>(state, input).downcastOuter<4>(); cudaOutput = toDeviceTensor<real, 5>(state, output).downcastOuter<4>(); } int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7); default: cuda_VolumetricAveragePooling_updateOutput<real, accreal><<<grid, block>>>( cudaInput, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ ); break; } totalZ -= 65535; offsetZ += 65535; THCudaCheck(cudaGetLastError()); } THCTensor_(free)(state, input); } void THNN_(VolumetricAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { THNN_(VolumetricAveragePooling_shapeCheck) (state, input, gradOutput, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW); // Resize and initialize result tensor. THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; if (THCTensor_(nDimension)(state, input) == 4) /* 4D */ { batchSize = 1; inputSlices = THCTensor_(size)(state, input, 0); inputTime = THCTensor_(size)(state, input, 1); inputHeight = THCTensor_(size)(state, input, 2); inputWidth = THCTensor_(size)(state, input, 3); outputTime = THCTensor_(size)(state, gradOutput, 1); outputHeight = THCTensor_(size)(state, gradOutput, 2); outputWidth = THCTensor_(size)(state, gradOutput, 3); } else { batchSize = THCTensor_(size)(state, input, 0); inputSlices = THCTensor_(size)(state, input, 1); inputTime = THCTensor_(size)(state, input, 2); inputHeight = THCTensor_(size)(state, input, 3); inputWidth = THCTensor_(size)(state, input, 4); outputTime = THCTensor_(size)(state, gradOutput, 2); outputHeight = THCTensor_(size)(state, gradOutput, 3); outputWidth = THCTensor_(size)(state, gradOutput, 4); } gradOutput = THCTensor_(newContiguous)(state, gradOutput); // Collapse batch and feature dimensions THCDeviceTensor<real, 4> cudaGradInput; THCDeviceTensor<real, 4> cudaGradOutput; if (THCTensor_(nDimension)(state, input) == 4) { cudaGradInput = toDeviceTensor<real, 4>(state, gradInput); cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput); } else { cudaGradInput = toDeviceTensor<real, 5>(state, gradInput).downcastOuter<4>(); cudaGradOutput = toDeviceTensor<real, 5>(state, gradOutput).downcastOuter<4>(); } dim3 block(32, 8); // Optimizing for stride 1 is probably only of limited value, but this // specialization yields 3x speedup over the atomicAdd implementation. // Padding must be 0, otherwise, pool size may change. if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) { int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; while (totalZ > 0) { dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)), THCCeilDiv(inputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); cuda_VolumetricAveragePooling_updateGradInput_Stride1<real, accreal><<<grid, block>>>( cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW), offsetZ); THCudaCheck(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } else { int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; while (totalZ > 0) { dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); if (kernelsOverlap) { cuda_VolumetricAveragePooling_updateGradInput_atomicAdd<real, accreal><<<grid, block>>>( cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } else { cuda_VolumetricAveragePooling_updateGradInput<real, accreal><<<grid, block>>>( cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } THCudaCheck(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } THCTensor_(free)(state, gradOutput); } #endif
the_stack
* @file reader_impl.cu * @brief cuDF-IO CSV reader class implementation */ #include "csv_common.h" #include "csv_gpu.h" #include <io/comp/io_uncomp.h> #include <io/utilities/column_buffer.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <io/utilities/parsing_utils.cuh> #include <io/utilities/type_conversion.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/detail/utilities/visitor_overload.hpp> #include <cudf/io/csv.hpp> #include <cudf/io/datasource.hpp> #include <cudf/io/detail/csv.hpp> #include <cudf/io/types.hpp> #include <cudf/strings/replace.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/host_vector.h> #include <algorithm> #include <iostream> #include <memory> #include <numeric> #include <string> #include <tuple> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> using std::string; using std::vector; using cudf::device_span; using cudf::host_span; using cudf::detail::make_device_uvector_async; namespace cudf { namespace io { namespace detail { namespace csv { using namespace cudf::io::csv; using namespace cudf::io; namespace { /** * @brief Offsets of CSV rows in device memory, accessed through a shrinkable span. * * Row offsets are stored this way to avoid reallocation/copies when discarding front or back * elements. */ class selected_rows_offsets { rmm::device_uvector<uint64_t> all; device_span<uint64_t const> selected; public: selected_rows_offsets(rmm::device_uvector<uint64_t>&& data, device_span<uint64_t const> selected_span) : all{std::move(data)}, selected{selected_span} { } selected_rows_offsets(rmm::cuda_stream_view stream) : all{0, stream}, selected{all} {} operator device_span<uint64_t const>() const { return selected; } void shrink(size_t size) { CUDF_EXPECTS(size <= selected.size(), "New size must be smaller"); selected = selected.subspan(0, size); } void erase_first_n(size_t n) { CUDF_EXPECTS(n <= selected.size(), "Too many elements to remove"); selected = selected.subspan(n, selected.size() - n); } auto size() const { return selected.size(); } auto data() const { return selected.data(); } }; /** * @brief Removes the first and Last quote in the string */ string removeQuotes(string str, char quotechar) { // Exclude first and last quotation char const size_t first_quote = str.find(quotechar); if (first_quote != string::npos) { str.erase(first_quote, 1); } const size_t last_quote = str.rfind(quotechar); if (last_quote != string::npos) { str.erase(last_quote, 1); } return str; } /** * @brief Parse the first row to set the column names in the raw_csv parameter. * The first row can be either the header row, or the first data row */ std::vector<std::string> get_column_names(std::vector<char> const& header, parse_options_view const& parse_opts, int header_row, std::string prefix) { std::vector<std::string> col_names; // If there is only a single character then it would be the terminator if (header.size() <= 1) { return col_names; } std::vector<char> first_row = header; int num_cols = 0; bool quotation = false; for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) { // Flip the quotation flag if current character is a quotechar if (first_row[pos] == parse_opts.quotechar) { quotation = !quotation; } // Check if end of a column/row else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == parse_opts.terminator) || (!quotation && first_row[pos] == parse_opts.delimiter)) { // This is the header, add the column name if (header_row >= 0) { // Include the current character, in case the line is not terminated int col_name_len = pos - prev + 1; // Exclude the delimiter/terminator is present if (first_row[pos] == parse_opts.delimiter || first_row[pos] == parse_opts.terminator) { --col_name_len; } // Also exclude '\r' character at the end of the column name if it's // part of the terminator if (col_name_len > 0 && parse_opts.terminator == '\n' && first_row[pos] == '\n' && first_row[pos - 1] == '\r') { --col_name_len; } const string new_col_name(first_row.data() + prev, col_name_len); col_names.push_back(removeQuotes(new_col_name, parse_opts.quotechar)); // Stop parsing when we hit the line terminator; relevant when there is // a blank line following the header. In this case, first_row includes // multiple line terminators at the end, as the new recStart belongs to // a line that comes after the blank line(s) if (!quotation && first_row[pos] == parse_opts.terminator) { break; } } else { // This is the first data row, add the automatically generated name col_names.push_back(prefix + std::to_string(num_cols)); } num_cols++; // Skip adjacent delimiters if delim_whitespace is set while (parse_opts.multi_delimiter && pos < first_row.size() && first_row[pos] == parse_opts.delimiter && first_row[pos + 1] == parse_opts.delimiter) { ++pos; } prev = pos + 1; } } return col_names; } template <typename C> void erase_except_last(C& container, rmm::cuda_stream_view stream) { cudf::detail::device_single_thread( [span = device_span<typename C::value_type>{container}] __device__() mutable { span.front() = span.back(); }, stream); container.resize(1, stream); } size_t find_first_row_start(char row_terminator, host_span<char const> data) { // For now, look for the first terminator (assume the first terminator isn't within a quote) // TODO: Attempt to infer this from the data size_t pos = 0; while (pos < data.size() && data[pos] != row_terminator) { ++pos; } return std::min(pos + 1, data.size()); } /** * @brief Finds row positions in the specified input data, and loads the selected data onto GPU. * * This function scans the input data to record the row offsets (relative to the start of the * input data). A row is actually the data/offset between two termination symbols. * * @param data Uncompressed input data in host memory * @param range_begin Only include rows starting after this position * @param range_end Only include rows starting before this position * @param skip_rows Number of rows to skip from the start * @param num_rows Number of rows to read; -1: all remaining data * @param load_whole_file Hint that the entire data will be needed on gpu * @param stream CUDA stream used for device memory operations and kernel launches * @return Input data and row offsets in the device memory */ std::pair<rmm::device_uvector<char>, selected_rows_offsets> load_data_and_gather_row_offsets( csv_reader_options const& reader_opts, parse_options const& parse_opts, std::vector<char>& header, host_span<char const> data, size_t range_begin, size_t range_end, size_t skip_rows, int64_t num_rows, bool load_whole_file, rmm::cuda_stream_view stream) { constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB size_t buffer_size = std::min(max_chunk_bytes, data.size()); size_t max_blocks = std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2); hostdevice_vector<uint64_t> row_ctx(max_blocks, stream); size_t buffer_pos = std::min(range_begin - std::min(range_begin, sizeof(char)), data.size()); size_t pos = std::min(range_begin, data.size()); size_t header_rows = (reader_opts.get_header() >= 0) ? reader_opts.get_header() + 1 : 0; uint64_t ctx = 0; // For compatibility with the previous parser, a row is considered in-range if the // previous row terminator is within the given range range_end += (range_end < data.size()); // Reserve memory by allocating and then resetting the size rmm::device_uvector<char> d_data{ (load_whole_file) ? data.size() : std::min(buffer_size * 2, data.size()), stream}; d_data.resize(0, stream); rmm::device_uvector<uint64_t> all_row_offsets{0, stream}; do { size_t target_pos = std::min(pos + max_chunk_bytes, data.size()); size_t chunk_size = target_pos - pos; auto const previous_data_size = d_data.size(); d_data.resize(target_pos - buffer_pos, stream); CUDF_CUDA_TRY(cudaMemcpyAsync(d_data.begin() + previous_data_size, data.begin() + buffer_pos + previous_data_size, target_pos - buffer_pos - previous_data_size, cudaMemcpyDefault, stream.value())); // Pass 1: Count the potential number of rows in each character block for each // possible parser state at the beginning of the block. uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(parse_opts.view(), row_ctx.device_ptr(), device_span<uint64_t>(), d_data, chunk_size, pos, buffer_pos, data.size(), range_begin, range_end, skip_rows, stream); CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(), row_ctx.device_ptr(), num_blocks * sizeof(uint64_t), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); // Sum up the rows in each character block, selecting the row count that // corresponds to the current input context. Also stores the now known input // context per character block that will be needed by the second pass. for (uint32_t i = 0; i < num_blocks; i++) { uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]); row_ctx[i] = ctx; ctx = ctx_next; } size_t total_rows = ctx >> 2; if (total_rows > skip_rows) { // At least one row in range in this batch all_row_offsets.resize(total_rows - skip_rows, stream); CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.device_ptr(), row_ctx.host_ptr(), num_blocks * sizeof(uint64_t), cudaMemcpyHostToDevice, stream.value())); // Pass 2: Output row offsets cudf::io::csv::gpu::gather_row_offsets(parse_opts.view(), row_ctx.device_ptr(), all_row_offsets, d_data, chunk_size, pos, buffer_pos, data.size(), range_begin, range_end, skip_rows, stream); // With byte range, we want to keep only one row out of the specified range if (range_end < data.size()) { CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(), row_ctx.device_ptr(), num_blocks * sizeof(uint64_t), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); size_t rows_out_of_range = 0; for (uint32_t i = 0; i < num_blocks; i++) { rows_out_of_range += row_ctx[i]; } if (rows_out_of_range != 0) { // Keep one row out of range (used to infer length of previous row) auto new_row_offsets_size = all_row_offsets.size() - std::min(rows_out_of_range - 1, all_row_offsets.size()); all_row_offsets.resize(new_row_offsets_size, stream); // Implies we reached the end of the range break; } } // num_rows does not include blank rows if (num_rows >= 0) { if (all_row_offsets.size() > header_rows + static_cast<size_t>(num_rows)) { size_t num_blanks = cudf::io::csv::gpu::count_blank_rows( parse_opts.view(), d_data, all_row_offsets, stream); if (all_row_offsets.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) { // Got the desired number of rows break; } } } } else { // Discard data (all rows below skip_rows), keeping one character for history size_t discard_bytes = std::max(d_data.size(), sizeof(char)) - sizeof(char); if (discard_bytes != 0) { erase_except_last(d_data, stream); buffer_pos += discard_bytes; } } pos = target_pos; } while (pos < data.size()); auto const non_blank_row_offsets = io::csv::gpu::remove_blank_rows(parse_opts.view(), d_data, all_row_offsets, stream); auto row_offsets = selected_rows_offsets{std::move(all_row_offsets), non_blank_row_offsets}; // Remove header rows and extract header const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1; if (header_row_index + 1 < row_offsets.size()) { CUDF_CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(), row_offsets.data() + header_row_index, 2 * sizeof(uint64_t), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); const auto header_start = buffer_pos + row_ctx[0]; const auto header_end = buffer_pos + row_ctx[1]; CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(), "Invalid csv header location"); header.assign(data.begin() + header_start, data.begin() + header_end); if (header_rows > 0) { row_offsets.erase_first_n(header_rows); } } // Apply num_rows limit if (num_rows >= 0 && static_cast<size_t>(num_rows) < row_offsets.size() - 1) { row_offsets.shrink(num_rows + 1); } return {std::move(d_data), std::move(row_offsets)}; } std::pair<rmm::device_uvector<char>, selected_rows_offsets> select_data_and_row_offsets( cudf::io::datasource* source, csv_reader_options const& reader_opts, std::vector<char>& header, parse_options const& parse_opts, rmm::cuda_stream_view stream) { auto range_offset = reader_opts.get_byte_range_offset(); auto range_size = reader_opts.get_byte_range_size(); auto range_size_padded = reader_opts.get_byte_range_size_with_padding(); auto skip_rows = reader_opts.get_skiprows(); auto skip_end_rows = reader_opts.get_skipfooter(); auto num_rows = reader_opts.get_nrows(); if (range_offset > 0 || range_size > 0) { CUDF_EXPECTS(reader_opts.get_compression() == compression_type::NONE, "Reading compressed data using `byte range` is unsupported"); } // Transfer source data to GPU if (!source->is_empty()) { auto data_size = (range_size_padded != 0) ? range_size_padded : source->size(); auto buffer = source->host_read(range_offset, data_size); auto h_data = host_span<char const>( // reinterpret_cast<const char*>(buffer->data()), buffer->size()); std::vector<char> h_uncomp_data_owner; if (reader_opts.get_compression() != compression_type::NONE) { h_uncomp_data_owner = get_uncompressed_data(h_data, reader_opts.get_compression()); h_data = h_uncomp_data_owner; } // None of the parameters for row selection is used, we are parsing the entire file const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 && skip_end_rows <= 0 && num_rows == -1; // With byte range, find the start of the first data row size_t const data_start_offset = (range_offset != 0) ? find_first_row_start(parse_opts.terminator, h_data) : 0; // TODO: Allow parsing the header outside the mapped range CUDF_EXPECTS((range_offset == 0 || reader_opts.get_header() < 0), "byte_range offset with header not supported"); // Gather row offsets auto data_row_offsets = load_data_and_gather_row_offsets(reader_opts, parse_opts, header, h_data, data_start_offset, (range_size) ? range_size : h_data.size(), (skip_rows > 0) ? skip_rows : 0, num_rows, load_whole_file, stream); auto& row_offsets = data_row_offsets.second; // Exclude the rows that are to be skipped from the end if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets.size()) { row_offsets.shrink(row_offsets.size() - skip_end_rows); } return data_row_offsets; } return {rmm::device_uvector<char>{0, stream}, selected_rows_offsets{stream}}; } void select_data_types(host_span<data_type const> user_dtypes, host_span<column_parse::flags> column_flags, host_span<data_type> column_types) { if (user_dtypes.empty()) { return; } CUDF_EXPECTS(user_dtypes.size() == 1 || user_dtypes.size() == column_flags.size(), "Specify data types for all columns in file, or use a dictionary/map"); for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) { if (column_flags[col_idx] & column_parse::enabled) { // If it's a single dtype, assign that dtype to all active columns auto const& dtype = user_dtypes.size() == 1 ? user_dtypes[0] : user_dtypes[col_idx]; column_types[col_idx] = dtype; // Reset the inferred flag, no need to infer the types from the data column_flags[col_idx] &= ~column_parse::inferred; } } } void get_data_types_from_column_names(std::map<std::string, data_type> const& user_dtypes, host_span<std::string const> column_names, host_span<column_parse::flags> column_flags, host_span<data_type> column_types) { if (user_dtypes.empty()) { return; } for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) { if (column_flags[col_idx] & column_parse::enabled) { auto const col_type_it = user_dtypes.find(column_names[col_idx]); if (col_type_it != user_dtypes.end()) { // Assign the type from the map column_types[col_idx] = col_type_it->second; // Reset the inferred flag, no need to infer the types from the data column_flags[col_idx] &= ~column_parse::inferred; } } } } void infer_column_types(parse_options const& parse_opts, host_span<column_parse::flags const> column_flags, device_span<char const> data, device_span<uint64_t const> row_offsets, int32_t num_records, data_type timestamp_type, host_span<data_type> column_types, rmm::cuda_stream_view stream) { if (num_records == 0) { for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) { if (column_flags[col_idx] & column_parse::inferred) { column_types[col_idx] = data_type(cudf::type_id::STRING); } } return; } auto const num_inferred_columns = std::count_if(column_flags.begin(), column_flags.end(), [](auto& flags) { return flags & column_parse::inferred; }); if (num_inferred_columns == 0) { return; } auto const column_stats = cudf::io::csv::gpu::detect_column_types(parse_opts.view(), data, make_device_uvector_async(column_flags, stream), row_offsets, num_inferred_columns, stream); stream.synchronize(); auto inf_col_idx = 0; for (auto col_idx = 0u; col_idx < column_flags.size(); ++col_idx) { if (not(column_flags[col_idx] & column_parse::inferred)) { continue; } auto const& stats = column_stats[inf_col_idx++]; unsigned long long int_count_total = stats.big_int_count + stats.negative_small_int_count + stats.positive_small_int_count; if (stats.null_count == num_records) { // Entire column is NULL; allocate the smallest amount of memory column_types[col_idx] = data_type(cudf::type_id::INT8); } else if (stats.string_count > 0L) { column_types[col_idx] = data_type(cudf::type_id::STRING); } else if (stats.datetime_count > 0L) { column_types[col_idx] = timestamp_type.id() == cudf::type_id::EMPTY ? data_type(cudf::type_id::TIMESTAMP_NANOSECONDS) : timestamp_type; } else if (stats.bool_count > 0L) { column_types[col_idx] = data_type(cudf::type_id::BOOL8); } else if (stats.float_count > 0L || (stats.float_count == 0L && int_count_total > 0L && stats.null_count > 0L)) { // The second condition has been added to conform to // pandas which states that a column of integers with // a single NULL record need to be treated as floats. column_types[col_idx] = data_type(cudf::type_id::FLOAT64); } else if (stats.big_int_count == 0) { column_types[col_idx] = data_type(cudf::type_id::INT64); } else if (stats.big_int_count != 0 && stats.negative_small_int_count != 0) { column_types[col_idx] = data_type(cudf::type_id::STRING); } else { // Integers are stored as 64-bit to conform to PANDAS column_types[col_idx] = data_type(cudf::type_id::UINT64); } } } std::vector<column_buffer> decode_data(parse_options const& parse_opts, std::vector<column_parse::flags> const& column_flags, std::vector<std::string> const& column_names, device_span<char const> data, device_span<uint64_t const> row_offsets, host_span<data_type const> column_types, int32_t num_records, int32_t num_actual_columns, int32_t num_active_columns, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // Alloc output; columns' data memory is still expected for empty dataframe std::vector<column_buffer> out_buffers; out_buffers.reserve(column_types.size()); for (int col = 0, active_col = 0; col < num_actual_columns; ++col) { if (column_flags[col] & column_parse::enabled) { const bool is_final_allocation = column_types[active_col].id() != type_id::STRING; auto out_buffer = column_buffer(column_types[active_col], num_records, true, stream, is_final_allocation ? mr : rmm::mr::get_current_device_resource()); out_buffer.name = column_names[col]; out_buffer.null_count() = UNKNOWN_NULL_COUNT; out_buffers.emplace_back(std::move(out_buffer)); active_col++; } } thrust::host_vector<void*> h_data(num_active_columns); thrust::host_vector<bitmask_type*> h_valid(num_active_columns); for (int i = 0; i < num_active_columns; ++i) { h_data[i] = out_buffers[i].data(); h_valid[i] = out_buffers[i].null_mask(); } cudf::io::csv::gpu::decode_row_column_data(parse_opts.view(), data, make_device_uvector_async(column_flags, stream), row_offsets, make_device_uvector_async(column_types, stream), make_device_uvector_async(h_data, stream), make_device_uvector_async(h_valid, stream), stream); return out_buffers; } std::vector<data_type> determine_column_types(csv_reader_options const& reader_opts, parse_options const& parse_opts, host_span<std::string const> column_names, device_span<char const> data, device_span<uint64_t const> row_offsets, int32_t num_records, host_span<column_parse::flags> column_flags, rmm::cuda_stream_view stream) { std::vector<data_type> column_types(column_flags.size()); std::visit(cudf::detail::visitor_overload{ [&](const std::vector<data_type>& user_dtypes) { return select_data_types(user_dtypes, column_flags, column_types); }, [&](const std::map<std::string, data_type>& user_dtypes) { return get_data_types_from_column_names( user_dtypes, column_names, column_flags, column_types); }}, reader_opts.get_dtypes()); infer_column_types(parse_opts, column_flags, data, row_offsets, num_records, reader_opts.get_timestamp_type(), column_types, stream); // compact column_types to only include active columns std::vector<data_type> active_col_types; std::copy_if(column_types.cbegin(), column_types.cend(), std::back_inserter(active_col_types), [&column_flags, &types = std::as_const(column_types)](auto& dtype) { auto const idx = std::distance(types.data(), &dtype); return column_flags[idx] & column_parse::enabled; }); return active_col_types; } table_with_metadata read_csv(cudf::io::datasource* source, csv_reader_options const& reader_opts, parse_options const& parse_opts, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::vector<char> header; auto const data_row_offsets = select_data_and_row_offsets(source, reader_opts, header, parse_opts, stream); auto const& data = data_row_offsets.first; auto const& row_offsets = data_row_offsets.second; // Exclude the end-of-data row from number of rows with actual data auto num_records = std::max(row_offsets.size(), 1ul) - 1; auto column_flags = std::vector<column_parse::flags>(); auto column_names = std::vector<std::string>(); auto num_actual_columns = static_cast<int32_t>(reader_opts.get_names().size()); auto num_active_columns = num_actual_columns; // Check if the user gave us a list of column names if (not reader_opts.get_names().empty()) { column_flags.resize(reader_opts.get_names().size(), column_parse::enabled | column_parse::inferred); column_names = reader_opts.get_names(); } else { column_names = get_column_names( header, parse_opts.view(), reader_opts.get_header(), reader_opts.get_prefix()); num_actual_columns = num_active_columns = column_names.size(); column_flags.resize(num_actual_columns, column_parse::enabled | column_parse::inferred); // Rename empty column names to "Unnamed: col_index" for (size_t col_idx = 0; col_idx < column_names.size(); ++col_idx) { if (column_names[col_idx].empty()) { column_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx); } } // Looking for duplicates std::unordered_map<string, int> col_names_histogram; for (auto& col_name : column_names) { // Operator [] inserts a default-initialized value if the given key is not // present if (++col_names_histogram[col_name] > 1) { if (reader_opts.is_enabled_mangle_dupe_cols()) { // Rename duplicates of column X as X.1, X.2, ...; First appearance // stays as X do { col_name += "." + std::to_string(col_names_histogram[col_name] - 1); } while (col_names_histogram[col_name]++); } else { // All duplicate columns will be ignored; First appearance is parsed const auto idx = &col_name - column_names.data(); column_flags[idx] = column_parse::disabled; } } } // Update the number of columns to be processed, if some might have been // removed if (!reader_opts.is_enabled_mangle_dupe_cols()) { num_active_columns = col_names_histogram.size(); } } // User can specify which columns should be parsed if (!reader_opts.get_use_cols_indexes().empty() || !reader_opts.get_use_cols_names().empty()) { std::fill(column_flags.begin(), column_flags.end(), column_parse::disabled); for (const auto index : reader_opts.get_use_cols_indexes()) { column_flags[index] = column_parse::enabled | column_parse::inferred; } num_active_columns = std::unordered_set<int>(reader_opts.get_use_cols_indexes().begin(), reader_opts.get_use_cols_indexes().end()) .size(); for (const auto& name : reader_opts.get_use_cols_names()) { const auto it = std::find(column_names.begin(), column_names.end(), name); if (it != column_names.end()) { auto curr_it = it - column_names.begin(); if (column_flags[curr_it] == column_parse::disabled) { column_flags[curr_it] = column_parse::enabled | column_parse::inferred; num_active_columns++; } } } } // User can specify which columns should be read as datetime if (!reader_opts.get_parse_dates_indexes().empty() || !reader_opts.get_parse_dates_names().empty()) { for (const auto index : reader_opts.get_parse_dates_indexes()) { column_flags[index] |= column_parse::as_datetime; } for (const auto& name : reader_opts.get_parse_dates_names()) { auto it = std::find(column_names.begin(), column_names.end(), name); if (it != column_names.end()) { column_flags[it - column_names.begin()] |= column_parse::as_datetime; } } } // User can specify which columns should be parsed as hexadecimal if (!reader_opts.get_parse_hex_indexes().empty() || !reader_opts.get_parse_hex_names().empty()) { for (const auto index : reader_opts.get_parse_hex_indexes()) { column_flags[index] |= column_parse::as_hexadecimal; } for (const auto& name : reader_opts.get_parse_hex_names()) { auto it = std::find(column_names.begin(), column_names.end(), name); if (it != column_names.end()) { column_flags[it - column_names.begin()] |= column_parse::as_hexadecimal; } } } // Return empty table rather than exception if nothing to load if (num_active_columns == 0) { return {std::make_unique<table>(), {}}; } auto const column_types = determine_column_types( reader_opts, parse_opts, column_names, data, row_offsets, num_records, column_flags, stream); auto metadata = table_metadata{}; auto out_columns = std::vector<std::unique_ptr<cudf::column>>(); out_columns.reserve(column_types.size()); if (num_records != 0) { auto out_buffers = decode_data( // parse_opts, column_flags, column_names, data, row_offsets, column_types, num_records, num_actual_columns, num_active_columns, stream, mr); for (size_t i = 0; i < column_types.size(); ++i) { metadata.column_names.emplace_back(out_buffers[i].name); if (column_types[i].id() == type_id::STRING && parse_opts.quotechar != '\0' && parse_opts.doublequote == true) { // PANDAS' default behavior of enabling doublequote for two consecutive // quotechars in quoted fields results in reduction to a single quotechar // TODO: Would be much more efficient to perform this operation in-place // during the conversion stage const std::string quotechar(1, parse_opts.quotechar); const std::string dblquotechar(2, parse_opts.quotechar); std::unique_ptr<column> col = cudf::make_strings_column(*out_buffers[i]._strings, stream); out_columns.emplace_back( cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr)); } else { out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr)); } } } else { // Create empty columns for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_empty_column(column_types[i])); } // Handle empty metadata for (int col = 0; col < num_actual_columns; ++col) { if (column_flags[col] & column_parse::enabled) { metadata.column_names.emplace_back(column_names[col]); } } } return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)}; } /** * @brief Create a serialized trie for N/A value matching, based on the options. */ cudf::detail::trie create_na_trie(char quotechar, csv_reader_options const& reader_opts, rmm::cuda_stream_view stream) { // Default values to recognize as null values static std::vector<std::string> const default_na_values{"", "#N/A", "#N/A N/A", "#NA", "-1.#IND", "-1.#QNAN", "-NaN", "-nan", "1.#IND", "1.#QNAN", "<NA>", "N/A", "NA", "NULL", "NaN", "n/a", "nan", "null"}; if (!reader_opts.is_enabled_na_filter()) { return cudf::detail::trie(0, stream); } std::vector<std::string> na_values = reader_opts.get_na_values(); if (reader_opts.is_enabled_keep_default_na()) { na_values.insert(na_values.end(), default_na_values.begin(), default_na_values.end()); } // Pandas treats empty strings as N/A if empty fields are treated as N/A if (std::find(na_values.begin(), na_values.end(), "") != na_values.end()) { na_values.push_back(std::string(2, quotechar)); } return cudf::detail::create_serialized_trie(na_values, stream); } parse_options make_parse_options(csv_reader_options const& reader_opts, rmm::cuda_stream_view stream) { auto parse_opts = parse_options{}; if (reader_opts.is_enabled_delim_whitespace()) { parse_opts.delimiter = ' '; parse_opts.multi_delimiter = true; } else { parse_opts.delimiter = reader_opts.get_delimiter(); parse_opts.multi_delimiter = false; } parse_opts.terminator = reader_opts.get_lineterminator(); if (reader_opts.get_quotechar() != '\0' && reader_opts.get_quoting() != quote_style::NONE) { parse_opts.quotechar = reader_opts.get_quotechar(); parse_opts.keepquotes = false; parse_opts.doublequote = reader_opts.is_enabled_doublequote(); } else { parse_opts.quotechar = '\0'; parse_opts.keepquotes = true; parse_opts.doublequote = false; } parse_opts.skipblanklines = reader_opts.is_enabled_skip_blank_lines(); parse_opts.comment = reader_opts.get_comment(); parse_opts.dayfirst = reader_opts.is_enabled_dayfirst(); parse_opts.decimal = reader_opts.get_decimal(); parse_opts.thousands = reader_opts.get_thousands(); CUDF_EXPECTS(parse_opts.decimal != parse_opts.delimiter, "Decimal point cannot be the same as the delimiter"); CUDF_EXPECTS(parse_opts.thousands != parse_opts.delimiter, "Thousands separator cannot be the same as the delimiter"); // Handle user-defined true values, whereby field data is substituted with a // boolean true or numeric `1` value if (reader_opts.get_true_values().size() != 0) { parse_opts.trie_true = cudf::detail::create_serialized_trie(reader_opts.get_true_values(), stream); } // Handle user-defined false values, whereby field data is substituted with a // boolean false or numeric `0` value if (reader_opts.get_false_values().size() != 0) { parse_opts.trie_false = cudf::detail::create_serialized_trie(reader_opts.get_false_values(), stream); } // Handle user-defined N/A values, whereby field data is treated as null parse_opts.trie_na = create_na_trie(parse_opts.quotechar, reader_opts, stream); return parse_opts; } } // namespace table_with_metadata read_csv(std::unique_ptr<cudf::io::datasource>&& source, csv_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto parse_options = make_parse_options(options, stream); return read_csv(source.get(), options, parse_options, stream, mr); } } // namespace csv } // namespace detail } // namespace io } // namespace cudf
the_stack
namespace faiss { namespace gpu { template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor() : data_(nullptr) { static_assert(Dim > 0, "must have > 0 dimensions"); for (int i = 0; i < Dim; ++i) { size_[i] = 0; stride_[i] = (IndexT)1; } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor( Tensor<T, Dim, InnerContig, IndexT, PtrTraits>& t) { this->operator=(t); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor( Tensor<T, Dim, InnerContig, IndexT, PtrTraits>&& t) { this->operator=(std::move(t)); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>& Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::operator=(Tensor<T, Dim, InnerContig, IndexT, PtrTraits>& t) { data_ = t.data_; for (int i = 0; i < Dim; ++i) { size_[i] = t.size_[i]; stride_[i] = t.stride_[i]; } return *this; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>& Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::operator=(Tensor<T, Dim, InnerContig, IndexT, PtrTraits>&& t) { data_ = t.data_; t.data_ = nullptr; for (int i = 0; i < Dim; ++i) { stride_[i] = t.stride_[i]; t.stride_[i] = 0; size_[i] = t.size_[i]; t.size_[i] = 0; } return *this; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor( DataPtrType data, const IndexT sizes[Dim]) : data_(data) { static_assert(Dim > 0, "must have > 0 dimensions"); for (int i = 0; i < Dim; ++i) { size_[i] = sizes[i]; } stride_[Dim - 1] = (IndexT)1; for (int i = Dim - 2; i >= 0; --i) { stride_[i] = stride_[i + 1] * sizes[i + 1]; } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor( DataPtrType data, std::initializer_list<IndexT> sizes) : data_(data) { GPU_FAISS_ASSERT(sizes.size() == Dim); static_assert(Dim > 0, "must have > 0 dimensions"); int i = 0; for (auto s : sizes) { size_[i++] = s; } stride_[Dim - 1] = (IndexT)1; for (int j = Dim - 2; j >= 0; --j) { stride_[j] = stride_[j + 1] * size_[j + 1]; } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor( DataPtrType data, const IndexT sizes[Dim], const IndexT strides[Dim]) : data_(data) { static_assert(Dim > 0, "must have > 0 dimensions"); for (int i = 0; i < Dim; ++i) { size_[i] = sizes[i]; stride_[i] = strides[i]; } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ void Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::copyFrom( const Tensor<T, Dim, InnerContig, IndexT, PtrTraits>& t, cudaStream_t stream) { // The tensor must be fully contiguous GPU_FAISS_ASSERT(this->isContiguous()); // Size must be the same (since dimensions are checked and // continuity is assumed, we need only check total number of // elements GPU_FAISS_ASSERT(this->numElements() == t.numElements()); if (t.numElements() > 0) { GPU_FAISS_ASSERT(this->data_); GPU_FAISS_ASSERT(t.data()); int ourDev = getDeviceForAddress(this->data_); int tDev = getDeviceForAddress(t.data()); if (tDev == -1) { CUDA_VERIFY(cudaMemcpyAsync( this->data_, t.data(), this->getSizeInBytes(), ourDev == -1 ? cudaMemcpyHostToHost : cudaMemcpyHostToDevice, stream)); } else { CUDA_VERIFY(cudaMemcpyAsync( this->data_, t.data(), this->getSizeInBytes(), ourDev == -1 ? cudaMemcpyDeviceToHost : cudaMemcpyDeviceToDevice, stream)); } } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ void Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::copyTo( Tensor<T, Dim, InnerContig, IndexT, PtrTraits>& t, cudaStream_t stream) { // The tensor must be fully contiguous GPU_FAISS_ASSERT(this->isContiguous()); // Size must be the same (since dimensions are checked and // continuity is assumed, we need only check total number of // elements GPU_FAISS_ASSERT(this->numElements() == t.numElements()); if (t.numElements() > 0) { GPU_FAISS_ASSERT(this->data_); GPU_FAISS_ASSERT(t.data()); int ourDev = getDeviceForAddress(this->data_); int tDev = getDeviceForAddress(t.data()); if (tDev == -1) { CUDA_VERIFY(cudaMemcpyAsync( t.data(), this->data_, this->getSizeInBytes(), ourDev == -1 ? cudaMemcpyHostToHost : cudaMemcpyDeviceToHost, stream)); } else { CUDA_VERIFY(cudaMemcpyAsync( t.data(), this->data_, this->getSizeInBytes(), ourDev == -1 ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice, stream)); } } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ void Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::copyFrom( const std::vector<T>& v, cudaStream_t stream) { // The tensor must be fully contiguous GPU_FAISS_ASSERT(this->isContiguous()); // Size must be the same GPU_FAISS_ASSERT(this->numElements() == v.size()); if (v.size() > 0) { GPU_FAISS_ASSERT(this->data_); int ourDev = getDeviceForAddress(this->data_); CUDA_VERIFY(cudaMemcpyAsync( this->data_, v.data(), this->getSizeInBytes(), ourDev == -1 ? cudaMemcpyHostToHost : cudaMemcpyHostToDevice, stream)); } } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ std::vector<T> Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: copyToVector(cudaStream_t stream) { // The tensor must be fully contiguous GPU_FAISS_ASSERT(this->isContiguous()); std::vector<T> out(this->numElements()); if (!out.empty()) { int ourDev = getDeviceForAddress(this->data_); if (ourDev == -1) { std::memcpy( out.data(), this->data_, this->numElements() * sizeof(T)); } else { CUDA_VERIFY(cudaMemcpyAsync( out.data(), this->data_, this->numElements() * sizeof(T), cudaMemcpyDeviceToHost, stream)); } } return out; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename OtherT, int OtherDim> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::isSame( const Tensor<OtherT, OtherDim, InnerContig, IndexT, PtrTraits>& rhs) const { if (Dim != OtherDim) { return false; } for (int i = 0; i < Dim; ++i) { if (this->getSize(i) != rhs.getSize(i)) { return false; } if (this->getStride(i) != rhs.getStride(i)) { return false; } } return true; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename OtherT, int OtherDim> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: isSameSize( const Tensor<OtherT, OtherDim, InnerContig, IndexT, PtrTraits>& rhs) const { if (Dim != OtherDim) { return false; } for (int i = 0; i < Dim; ++i) { if (this->getSize(i) != rhs.getSize(i)) { return false; } } return true; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename U> __host__ __device__ Tensor<U, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::cast() { static_assert(sizeof(U) == sizeof(T), "cast must be to same size object"); return Tensor<U, Dim, InnerContig, IndexT, PtrTraits>( reinterpret_cast<U*>(data_), size_, stride_); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename U> __host__ __device__ const Tensor<U, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::cast() const { static_assert(sizeof(U) == sizeof(T), "cast must be to same size object"); return Tensor<U, Dim, InnerContig, IndexT, PtrTraits>( reinterpret_cast<U*>(data_), size_, stride_); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename U> __host__ __device__ Tensor<U, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::castResize() { static_assert(sizeof(U) >= sizeof(T), "only handles greater sizes"); constexpr int kMultiple = sizeof(U) / sizeof(T); GPU_FAISS_ASSERT(canCastResize<U>()); IndexT newSize[Dim]; IndexT newStride[Dim]; for (int i = 0; i < Dim - 1; ++i) { newSize[i] = size_[i]; newStride[i] = stride_[i] / kMultiple; } newStride[Dim - 1] = 1; // this is the same as the old stride newSize[Dim - 1] = size_[Dim - 1] / kMultiple; return Tensor<U, Dim, InnerContig, IndexT, PtrTraits>( reinterpret_cast<U*>(data_), newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename U> __host__ __device__ const Tensor<U, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::castResize() const { return const_cast<Tensor<T, Dim, InnerContig, IndexT, PtrTraits>*>(this) ->castResize<U>(); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename U> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: canCastResize() const { static_assert(sizeof(U) >= sizeof(T), "only handles greater sizes"); constexpr int kMultiple = sizeof(U) / sizeof(T); // Ensure that the base pointer is sizeof(U) aligned if (((uintptr_t)data_) % sizeof(U) != 0) { return false; } // Check all outer strides for (int i = 0; i < Dim - 1; ++i) { if (stride_[i] % kMultiple != 0) { return false; } } // Check inner size if (size_[Dim - 1] % kMultiple != 0) { return false; } if (stride_[Dim - 1] != 1) { return false; } return true; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename NewIndexT> __host__ Tensor<T, Dim, InnerContig, NewIndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::castIndexType() const { if (sizeof(NewIndexT) < sizeof(IndexT)) { GPU_FAISS_ASSERT(this->canUseIndexType<NewIndexT>()); } NewIndexT newSize[Dim]; NewIndexT newStride[Dim]; for (int i = 0; i < Dim; ++i) { newSize[i] = (NewIndexT)size_[i]; newStride[i] = (NewIndexT)stride_[i]; } return Tensor<T, Dim, InnerContig, NewIndexT, PtrTraits>( data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <typename NewIndexT> __host__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::canUseIndexType() const { static_assert(sizeof(size_t) >= sizeof(IndexT), "index size too large"); static_assert( sizeof(size_t) >= sizeof(NewIndexT), "new index size too large"); // Find maximum offset that can be calculated // FIXME: maybe also consider offset in bytes? multiply by sizeof(T)? size_t maxOffset = 0; for (int i = 0; i < Dim; ++i) { size_t curMaxOffset = (size_t)size_[i] * (size_t)stride_[i]; if (curMaxOffset > maxOffset) { maxOffset = curMaxOffset; } } if (maxOffset > (size_t)std::numeric_limits<NewIndexT>::max()) { return false; } return true; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ size_t Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::numElements() const { size_t size = (size_t)getSize(0); for (int i = 1; i < Dim; ++i) { size *= (size_t)getSize(i); } return size; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: isContiguous() const { long prevSize = 1; for (int i = Dim - 1; i >= 0; --i) { if (getSize(i) != (IndexT)1) { if (getStride(i) == prevSize) { prevSize *= getSize(i); } else { return false; } } } return true; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: isConsistentlySized(int i) const { if (i == 0 && getStride(i) > 0 && getSize(i) > 0) { return true; } else if ( (i > 0) && (i < Dim) && (getStride(i) > 0) && ((getStride(i - 1) / getStride(i)) >= getSize(i))) { return true; } return false; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: isConsistentlySized() const { for (int i = 0; i < Dim; ++i) { if (!isConsistentlySized(i)) { return false; } } return true; } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ bool Tensor<T, Dim, InnerContig, IndexT, PtrTraits>:: isContiguousDim(int i) const { return (i == Dim - 1) || // just in case ((i < Dim - 1) && ((getStride(i) / getStride(i + 1)) == getSize(i + 1))); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::transpose(int dim1, int dim2) const { GPU_FAISS_ASSERT(dim1 >= 0 && dim1 < Dim); GPU_FAISS_ASSERT(dim2 >= 0 && dim2 < Dim); // If a tensor is innermost contiguous, one cannot transpose the innermost // dimension if (InnerContig) { GPU_FAISS_ASSERT(dim1 != Dim - 1 && dim2 != Dim - 1); } IndexT newSize[Dim]; IndexT newStride[Dim]; for (int i = 0; i < Dim; ++i) { newSize[i] = size_[i]; newStride[i] = stride_[i]; } IndexT tmp = newSize[dim1]; newSize[dim1] = newSize[dim2]; newSize[dim2] = tmp; tmp = newStride[dim1]; newStride[dim1] = newStride[dim2]; newStride[dim2] = tmp; return Tensor<T, Dim, true, IndexT, PtrTraits>(data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, false, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::transposeInnermost(int dim1) const { GPU_FAISS_ASSERT(dim1 >= 0 && dim1 < Dim); // We are exchanging with the innermost dimension int dim2 = 1; IndexT newSize[Dim]; IndexT newStride[Dim]; for (int i = 0; i < Dim; ++i) { newSize[i] = size_[i]; newStride[i] = stride_[i]; } IndexT tmp = newSize[dim1]; newSize[dim1] = newSize[dim2]; newSize[dim2] = tmp; tmp = newStride[dim1]; newStride[dim1] = newStride[dim2]; newStride[dim2] = tmp; return Tensor<T, Dim, false, IndexT, PtrTraits>(data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int NewDim> __host__ __device__ Tensor<T, NewDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::upcastOuter() { // Can only create tensors of greater dimension static_assert(NewDim > Dim, "Can only upcast to greater dim"); IndexT newSize[NewDim]; IndexT newStride[NewDim]; int shift = NewDim - Dim; for (int i = 0; i < NewDim; ++i) { if (i < shift) { // These are the extended dimensions newSize[i] = (IndexT)1; newStride[i] = size_[0] * stride_[0]; } else { // Shift the remaining dimensions newSize[i] = size_[i - shift]; newStride[i] = stride_[i - shift]; } } return Tensor<T, NewDim, InnerContig, IndexT, PtrTraits>( data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int NewDim> __host__ __device__ Tensor<T, NewDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::upcastInner() { // Can only create tensors of greater dimension static_assert(NewDim > Dim, "Can only upcast to greater dim"); IndexT newSize[NewDim]; IndexT newStride[NewDim]; for (int i = 0; i < NewDim; ++i) { if (i < Dim) { // Existing dimensions get copied over newSize[i] = size_[i]; newStride[i] = stride_[i]; } else { // Extended dimensions newSize[i] = (IndexT)1; newStride[i] = (IndexT)1; } } return Tensor<T, NewDim, InnerContig, IndexT, PtrTraits>( data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int NewDim> __host__ __device__ Tensor<T, NewDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::downcastOuter() { // Can only create tensors of lesser dimension static_assert(NewDim < Dim, "Can only downcast to lesser dim"); // We can't downcast non-contiguous tensors, since it leaves // garbage data in the tensor. The tensor needs to be contiguous // in all of the dimensions we are collapsing (no padding in // them). for (int i = 0; i < Dim - NewDim; ++i) { bool cont = isContiguousDim(i); GPU_FAISS_ASSERT(cont); } IndexT newSize[NewDim]; IndexT newStride[NewDim]; int ignoredDims = Dim - NewDim; IndexT collapsedSize = 1; for (int i = 0; i < Dim; ++i) { if (i < ignoredDims) { // Collapse these dimensions collapsedSize *= getSize(i); } else { // Non-collapsed dimensions if (i == ignoredDims) { // This is the first non-collapsed dimension newSize[i - ignoredDims] = collapsedSize * getSize(i); } else { // Subsequent non-collapsed dimensions newSize[i - ignoredDims] = getSize(i); } newStride[i - ignoredDims] = getStride(i); } } return Tensor<T, NewDim, InnerContig, IndexT, PtrTraits>( data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int NewDim> __host__ __device__ Tensor<T, NewDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::downcastInner() { // Can only create tensors of lesser dimension static_assert(NewDim < Dim, "Can only downcast to lesser dim"); // We can't downcast non-contiguous tensors, since it leaves // garbage data in the tensor. The tensor needs to be contiguous // in all of the dimensions we are collapsing (no padding in // them). for (int i = NewDim; i < Dim; ++i) { GPU_FAISS_ASSERT(isContiguousDim(i)); } IndexT newSize[NewDim]; IndexT newStride[NewDim]; IndexT collapsedSize = 1; for (int i = Dim - 1; i >= 0; --i) { if (i >= NewDim) { // Collapse these dimensions collapsedSize *= getSize(i); } else { // Non-collapsed dimensions if (i == NewDim - 1) { // This is the first non-collapsed dimension newSize[i] = collapsedSize * getSize(i); newStride[i] = getStride(Dim - 1); } else { // Subsequent non-collapsed dimensions newSize[i] = getSize(i); newStride[i] = getStride(i); } } } return Tensor<T, NewDim, InnerContig, IndexT, PtrTraits>( data_, newSize, newStride); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int SubDim> __host__ __device__ Tensor<T, SubDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::view(DataPtrType at) { static_assert( SubDim >= 1 && SubDim < Dim, "can only create view of lesser dim"); IndexT viewSizes[SubDim]; IndexT viewStrides[SubDim]; for (int i = 0; i < SubDim; ++i) { viewSizes[i] = size_[Dim - SubDim + i]; viewStrides[i] = stride_[Dim - SubDim + i]; } return Tensor<T, SubDim, InnerContig, IndexT, PtrTraits>( at, viewSizes, viewStrides); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int SubDim> __host__ __device__ Tensor<T, SubDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::view() { return view<SubDim>(data_); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::narrowOutermost(IndexT start, IndexT size) { return this->narrow(0, start, size); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::narrow(int dim, IndexT start, IndexT size) { DataPtrType newData = data_; GPU_FAISS_ASSERT( start >= 0 && start < size_[dim] && (start + size) <= size_[dim]); if (start > 0) { newData += (size_t)start * stride_[dim]; } IndexT newSize[Dim]; for (int i = 0; i < Dim; ++i) { if (i == dim) { GPU_FAISS_ASSERT(start + size <= size_[dim]); newSize[i] = size; } else { newSize[i] = size_[i]; } } // If we were innermost contiguous before, we are still innermost contiguous return Tensor<T, Dim, InnerContig, IndexT, PtrTraits>( newData, newSize, stride_); } template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> template <int NewDim> __host__ __device__ Tensor<T, NewDim, InnerContig, IndexT, PtrTraits> Tensor< T, Dim, InnerContig, IndexT, PtrTraits>::view(std::initializer_list<IndexT> sizes) { GPU_FAISS_ASSERT(this->isContiguous()); GPU_FAISS_ASSERT(sizes.size() == NewDim); // The total size of the new view must be the same as the total size // of the old view size_t curSize = numElements(); size_t newSize = 1; for (auto s : sizes) { newSize *= s; } GPU_FAISS_ASSERT(curSize == newSize); return Tensor<T, NewDim, true, IndexT, PtrTraits>(data(), sizes); } } // namespace gpu } // namespace faiss
the_stack
void deformable_im2col(DArrayLite data_im, DArrayLite data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, DArrayLite data_col, cudaStream_t stream) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.elemType().prim(), ([&] { deformable_im2col_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, data_im.ptr<scalar_t>(), data_offset.ptr<scalar_t>(), height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col.ptr<scalar_t>()); })); PARROTS_CUDA_CHECK(cudaGetLastError()); } void deformable_col2im(DArrayLite data_col, DArrayLite data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, DArrayLite grad_im, cudaStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.elemType().prim(), ([&] { deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, data_col.ptr<scalar_t>(), data_offset.ptr<scalar_t>(), channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im.ptr<scalar_t>()); })); PARROTS_CUDA_CHECK(cudaGetLastError()); } void deformable_col2im_coord( DArrayLite data_col, DArrayLite data_im, DArrayLite data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, DArrayLite grad_offset, cudaStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.elemType().prim(), ([&] { deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, data_col.ptr<scalar_t>(), data_im.ptr<scalar_t>(), data_offset.ptr<scalar_t>(), channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset.ptr<scalar_t>()); })); PARROTS_CUDA_CHECK(cudaGetLastError()); } void deform_conv_shape_check(DArrayLite input, DArrayLite offset, DArrayLite* gradOutput, DArrayLite weight, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW, int group, int deformable_group) { PARROTS_CHECKARGS(weight.ndims() == 4) << "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, but got: " << weight.ndims(); PARROTS_CHECKARGS(weight.isContiguous()) << "weight tensor has to be contiguous"; PARROTS_CHECKARGS(kW > 0 && kH > 0) << "kernel size should be greater than zero, but got kH: " << kH << " kW: " << kW; PARROTS_CHECKARGS(weight.dim(2) == kH && weight.dim(3) == kW) << "kernel size should be consistent with weight, but got kH: " << kH << " kW: " << kW << " weight.dim(2): " << weight.dim(2) << ", weight.dim(3): " << weight.dim(3); PARROTS_CHECKARGS(dW > 0 && dH > 0) << "stride should be greater than zero, but got dH: " << dH << " dW: " << dW; PARROTS_CHECKARGS(dilationW > 0 && dilationH > 0) << "dilation should be greater than 0, but got dilationH: " << dilationH << " dilationW: " << dilationW; int ndim = input.ndims(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } PARROTS_CHECKARGS(ndim == 3 || ndim == 4) << "3D or 4D input tensor expected but got: " << ndim; size_t nInputPlane = weight.dim(1) * group; size_t inputHeight = input.dim(dimh); size_t inputWidth = input.dim(dimw); size_t nOutputPlane = weight.dim(0); size_t outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; size_t outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; PARROTS_CHECKARGS(nInputPlane % deformable_group == 0) << "input channels must divide deformable group size"; PARROTS_CHECKARGS(outputWidth >= 1 || outputHeight >= 1) << "Given input size: (" << nInputPlane << " x " << inputHeight << " x " << inputWidth << "). Calculated output size: (" << nOutputPlane << " x " << outputHeight << " x " << outputWidth << "). Output size is too small"; PARROTS_CHECKARGS(input.dim(1) == nInputPlane) << "invalid number of input planes, expected: " << nInputPlane << ", but got: " << input.dim(1); PARROTS_CHECKARGS(inputHeight >= kH && inputWidth >= kW) << "input image is smaller than kernel"; PARROTS_CHECKARGS(offset.dim(2) == outputHeight && offset.dim(3) == outputWidth) << "invalid spatial dim of offset, expected height: " << outputHeight << " width: " << outputWidth << ", but got height: " << offset.dim(2) << " width: " << offset.dim(3); PARROTS_CHECKARGS(offset.dim(1) == deformable_group * 2 * kH * kW) << "invalid number of channels of offset"; if (gradOutput != NULL) { PARROTS_CHECKARGS(gradOutput->dim(dimf) == nOutputPlane) << "invalid number of gradOutput planes, expected: " << nOutputPlane << ", but got: " << gradOutput->dim(dimf); PARROTS_CHECKARGS(gradOutput->dim(dimh) == outputHeight && gradOutput->dim(dimw) == outputWidth) << "invalid dim of gradOutput, expected height: " << outputHeight << " width: " << outputWidth << " , but got height: " << gradOutput->dim(dimh) << " width: " << gradOutput->dim(dimw); } } void DeformConvForwardCUDAKernelLauncher( DArrayLite input, DArrayLite weight, DArrayLite offset, DArrayLite output, DArrayLite columns, DArrayLite ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, CudaContext& ctx, cudaStream_t stream) { // todo: resize columns to include im2col: done // todo: add im2col_step as input // todo: add new output buffer and transpose it to output (or directly // transpose output) todo: possibly change data indexing because of // parallel_imgs deform_conv_shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW, dilationH, dilationW, group, deformable_group); int batch = 1; if (input.ndims() == 3) { // Force batch batch = 0; input = input.view({1, input.dim(0), input.dim(1), input.dim(2)}); offset = offset.view({1, offset.dim(0), offset.dim(1), offset.dim(2)}); } // todo: assert batchsize dividable by im2col_step size_t batchSize = input.dim(0); size_t nInputPlane = input.dim(1); size_t inputHeight = input.dim(2); size_t inputWidth = input.dim(3); size_t nOutputPlane = weight.dim(0); size_t outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; size_t outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; PARROTS_CHECKARGS(offset.dim(0) == batchSize) << "invalid batch size of offset"; output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, outputHeight, outputWidth}); columns = ctx.createDArrayLite( input.elemType(), DArrayShape(nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth)); columns.setZeros(ctx.getStream()); if (ones.ndims() != 2 || ones.dim(0) * ones.dim(1) < outputHeight * outputWidth) { ones = ctx.createDArrayLite(input.elemType(), DArrayShape(outputHeight, outputWidth)); fill(ctx, ones, *toScalar(1)); } input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, inputHeight, inputWidth}); offset = offset.view({batchSize / im2col_step, im2col_step, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); auto output_buffer = ctx.createDArrayLite( input.elemType(), DArrayShape(batchSize / im2col_step, nOutputPlane, im2col_step * outputHeight, outputWidth)); output_buffer.setZeros(ctx.getStream()); output_buffer = output_buffer.view( {output_buffer.dim(0), group, output_buffer.dim(1) / group, output_buffer.dim(2) * output_buffer.dim(3)}); for (size_t elt = 0; elt < batchSize / im2col_step; elt++) { deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, columns, stream); columns = columns.view({group, columns.dim(0) / group, columns.dim(1)}); weight = weight.view( {group, nOutputPlane / group, nInputPlane / group * kH * kW}); for (size_t g = 0; g < group; g++) { auto output_g = output_buffer[elt][g]; auto weight_g = weight[g]; auto columns_g = columns[g]; gemm(ctx, 1, false, weight_g, false, columns_g, 1, output_g); } } output_buffer = output_buffer.view( {output_buffer.dim(0), output_buffer.dim(1) * output_buffer.dim(2), output_buffer.dim(3)}); output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, outputHeight, outputWidth}); output_buffer = transpose(ctx, output_buffer, 1, 2); if (!output_buffer.isContiguous()) { output_buffer = ctx.cloneDArrayLite(output_buffer); } copy(ctx, output, output_buffer); output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); offset = offset.view( {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); if (batch == 0) { output = output.view({nOutputPlane, outputHeight, outputWidth}); input = input.view({nInputPlane, inputHeight, inputWidth}); offset = offset.view({offset.dim(1), offset.dim(2), offset.dim(3)}); } } void DeformConvBackwardInputCUDAKernelLauncher( DArrayLite input, DArrayLite offset, DArrayLite gradOutput, DArrayLite gradInput, DArrayLite gradOffset, DArrayLite weight, DArrayLite columns, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, CudaContext& ctx, cudaStream_t stream) { deform_conv_shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW, dilationH, dilationW, group, deformable_group); int batch = 1; if (input.ndims() == 3) { // Force batch batch = 0; input = input.view({1, input.dim(0), input.dim(1), input.dim(2)}); offset = offset.view({1, offset.dim(0), offset.dim(1), offset.dim(2)}); gradOutput = gradOutput.view( {1, gradOutput.dim(0), gradOutput.dim(1), gradOutput.dim(2)}); } size_t batchSize = input.dim(0); size_t nInputPlane = input.dim(1); size_t inputHeight = input.dim(2); size_t inputWidth = input.dim(3); size_t nOutputPlane = weight.dim(0); size_t outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; size_t outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; PARROTS_CHECKARGS(offset.dim(0) == batchSize) << "invalid batch size of offset"; gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); columns = ctx.createDArrayLite( input.elemType(), DArrayShape(nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth)); columns.setZeros(ctx.getStream()); // change order of grad output gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, nOutputPlane, outputHeight, outputWidth}); gradOutput = transpose(ctx, gradOutput, 1, 2); if (!gradOutput.isContiguous()) { gradOutput = ctx.cloneDArrayLite(gradOutput); } gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, inputHeight, inputWidth}); input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, inputHeight, inputWidth}); gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); offset = offset.view({batchSize / im2col_step, im2col_step, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); for (size_t elt = 0; elt < batchSize / im2col_step; elt++) { // divide into groups columns = columns.view({group, columns.dim(0) / group, columns.dim(1)}); weight = weight.view({group, weight.dim(0) / group, weight.dim(1) * weight.dim(2) * weight.dim(3)}); gradOutput = gradOutput.view( {gradOutput.dim(0), group, gradOutput.dim(1) / group, gradOutput.dim(2) * gradOutput.dim(3) * gradOutput.dim(4)}); for (size_t g = 0; g < group; g++) { auto columns_g = columns[g]; gemm(ctx, 1, true, weight[g], false, gradOutput[elt][g], 0, columns_g); } columns = columns.view({columns.dim(0) * columns.dim(1), columns.dim(2)}); gradOutput = gradOutput.view({gradOutput.dim(0), gradOutput.dim(1) * gradOutput.dim(2), im2col_step, outputHeight, outputWidth}); deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, gradOffset[elt], stream); deformable_col2im(columns, offset[elt], nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, gradInput[elt], stream); } gradOutput = transpose(ctx, gradOutput, 1, 2); if (!gradOutput.isContiguous()) { gradOutput = ctx.cloneDArrayLite(gradOutput); } gradOutput = gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); gradOffset = gradOffset.view( {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); offset = offset.view( {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); if (batch == 0) { gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); input = input.view({nInputPlane, inputHeight, inputWidth}); gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); offset = offset.view({offset.dim(1), offset.dim(2), offset.dim(3)}); gradOffset = gradOffset.view({offset.dim(1), offset.dim(2), offset.dim(3)}); } } void DeformConvBackwardParametersCUDAKernelLauncher( DArrayLite input, DArrayLite offset, DArrayLite gradOutput, DArrayLite gradWeight, DArrayLite columns, DArrayLite ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, float scale, int im2col_step, CudaContext& ctx, cudaStream_t stream) { // todo: transpose and reshape outGrad // todo: reshape columns // todo: add im2col_step as input deform_conv_shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH, padW, dilationH, dilationW, group, deformable_group); int batch = 1; if (input.ndims() == 3) { // Force batch batch = 0; input = input.view({1, input.dim(0), input.dim(1), input.dim(2)}); gradOutput = gradOutput.view( {1, gradOutput.dim(0), gradOutput.dim(1), gradOutput.dim(2)}); } size_t batchSize = input.dim(0); size_t nInputPlane = input.dim(1); size_t inputHeight = input.dim(2); size_t inputWidth = input.dim(3); size_t nOutputPlane = gradWeight.dim(0); size_t outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; size_t outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; PARROTS_CHECKARGS(offset.dim(0) == batchSize) << "invalid batch size of offset"; columns = ctx.createDArrayLite( input.elemType(), DArrayShape(nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth)); columns.setZeros(ctx.getStream()); gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, nOutputPlane, outputHeight, outputWidth}); gradOutput = transpose(ctx, gradOutput, 1, 2); if (!gradOutput.isContiguous()) { gradOutput = ctx.cloneDArrayLite(gradOutput); } auto gradOutputBuffer = ctx.cloneDArrayLite(gradOutput); gradOutputBuffer = gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step * outputHeight, outputWidth}); gradOutput = transpose(ctx, gradOutput, 1, 2); if (!gradOutput.isContiguous()) { gradOutput = ctx.cloneDArrayLite(gradOutput); } gradOutput = gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, inputHeight, inputWidth}); offset = offset.view({batchSize / im2col_step, im2col_step, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); for (size_t elt = 0; elt < batchSize / im2col_step; elt++) { deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, columns, stream); // divide into group gradOutputBuffer = gradOutputBuffer.view( {gradOutputBuffer.dim(0), group, gradOutputBuffer.dim(1) / group, gradOutputBuffer.dim(2) * gradOutputBuffer.dim(3)}); columns = columns.view({group, columns.dim(0) / group, columns.dim(1)}); gradWeight = gradWeight.view( {group, gradWeight.dim(0) / group, gradWeight.dim(1) * gradWeight.dim(2) * gradWeight.dim(3)}); for (int g = 0; g < group; g++) { auto gradWeight_g = gradWeight[g]; gemm(ctx, scale, false, gradOutputBuffer[elt][g], true, columns[g], 1, gradWeight_g); } gradOutputBuffer = gradOutputBuffer.view( {gradOutputBuffer.dim(0), gradOutputBuffer.dim(1) * gradOutputBuffer.dim(2), im2col_step * outputHeight, outputWidth}); columns = columns.view({columns.dim(0) * columns.dim(1), columns.dim(2)}); gradWeight = gradWeight.view( {gradWeight.dim(0) * gradWeight.dim(1), nInputPlane / group, kH, kW}); } input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); offset = offset.view( {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); if (batch == 0) { gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); input = input.view({nInputPlane, inputHeight, inputWidth}); } }
the_stack
******************************************************************************/ #pragma once #include "radixsort_reduction_kernel.cu" #include "radixsort_spine_kernel.cu" #include "radixsort_scanscatter_kernel.cu" namespace b40c { /****************************************************************************** * Single-grid (SG) kernel for LSB radix sorting ******************************************************************************/ // Target threadblock occupancy for bulk scan/scatter kernel #define B40C_SM20_SG_OCCUPANCY() (6) // 8 threadblocks on GF100 #define B40C_SM12_SG_OCCUPANCY() (1) // 8 threadblocks on GT200 #define B40C_SM10_SG_OCCUPANCY() (1) // 8 threadblocks on G80 #define B40C_RADIXSORT_SG_OCCUPANCY(version) ((version >= 200) ? B40C_SM20_SG_OCCUPANCY() : \ (version >= 120) ? B40C_SM12_SG_OCCUPANCY() : \ B40C_SM10_SG_OCCUPANCY()) // Number of 256-element loads to rake per raking cycle #define B40C_SM20_SG_LOG_LOADS_PER_CYCLE(K, V) (1) // 2 loads on GF100 #define B40C_SM12_SG_LOG_LOADS_PER_CYCLE(K, V) (1) // 2 loads on GT200 #define B40C_SM10_SG_LOG_LOADS_PER_CYCLE(K, V) (1) // 2 loads on G80 #define B40C_RADIXSORT_SG_LOG_LOADS_PER_CYCLE(version, K, V) ((version >= 200) ? B40C_SM20_SG_LOG_LOADS_PER_CYCLE(K, V) : \ (version >= 120) ? B40C_SM12_SG_LOG_LOADS_PER_CYCLE(K, V) : \ B40C_SM10_SG_LOG_LOADS_PER_CYCLE(K, V)) // Number of raking cycles per tile #define B40C_SM20_SG_LOG_CYCLES_PER_TILE(K, V) (0) // 1 cycle on GF100 #define B40C_SM12_SG_LOG_CYCLES_PER_TILE(K, V) (0) // 1 cycle on GT200 #define B40C_SM10_SG_LOG_CYCLES_PER_TILE(K, V) (0) // 1 cycle on G80 #define B40C_RADIXSORT_SG_LOG_CYCLES_PER_TILE(version, K, V) ((version >= 200) ? B40C_SM20_SG_LOG_CYCLES_PER_TILE(K, V) : \ (version >= 120) ? B40C_SM12_SG_LOG_CYCLES_PER_TILE(K, V) : \ B40C_SM10_SG_LOG_CYCLES_PER_TILE(K, V)) // Number of raking threads per raking cycle #define B40C_SM20_SG_LOG_RAKING_THREADS() (B40C_LOG_WARP_THREADS + 2) // 2 raking warps on GF100 #define B40C_SM12_SG_LOG_RAKING_THREADS() (B40C_LOG_WARP_THREADS + 2) // 1 raking warp on GT200 #define B40C_SM10_SG_LOG_RAKING_THREADS() (B40C_LOG_WARP_THREADS + 2) // 4 raking warps on G80 #define B40C_RADIXSORT_SG_LOG_RAKING_THREADS(version) ((version >= 200) ? B40C_SM20_SG_LOG_RAKING_THREADS() : \ (version >= 120) ? B40C_SM12_SG_LOG_RAKING_THREADS() : \ B40C_SM10_SG_LOG_RAKING_THREADS()) // Number of elements per tile #define B40C_RADIXSORT_SG_LOG_TILE_ELEMENTS(version, K, V) (B40C_RADIXSORT_SG_LOG_LOADS_PER_CYCLE(version, K, V) + B40C_RADIXSORT_SG_LOG_CYCLES_PER_TILE(version, K, V) + B40C_RADIXSORT_LOG_THREADS + 1) #define B40C_RADIXSORT_SG_TILE_ELEMENTS(version, K, V) (1 << B40C_RADIXSORT_SG_LOG_TILE_ELEMENTS(version, K, V)) __device__ __forceinline__ int LoadCG(int* d_ptr) { int retval; GlobalLoad<int, CG>::Ld(retval, d_ptr, 0); return retval; } /** * Implements a global, lock-free software barrier between CTAs */ __device__ __forceinline__ void GlobalBarrier(int* d_sync) { // Threadfence and syncthreads to make sure global writes are visible before // thread-0 reports in with its sync counter __threadfence(); __syncthreads(); if (blockIdx.x == 0) { // Report in ourselves if (threadIdx.x == 0) { d_sync[blockIdx.x] = 1; } __syncthreads(); // Wait for everyone else to report in for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += B40C_RADIXSORT_THREADS) { while (LoadCG(d_sync + peer_block) == 0) { __threadfence_block(); } } __syncthreads(); // Let everyone know it's safe to read their prefix sums for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += B40C_RADIXSORT_THREADS) { d_sync[peer_block] = 0; } } else { if (threadIdx.x == 0) { // Report in d_sync[blockIdx.x] = 1; // Wait for acknowledgement while (LoadCG(d_sync + blockIdx.x) == 1) { __threadfence_block(); } } __syncthreads(); } } template < typename K, typename V, int BIT, int RADIX_BITS, int RADIX_DIGITS, int TILE_ELEMENTS, typename PreprocessFunctor, typename PostprocessFunctor, int REDUCTION_LANES, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, int SPINE_PARTIALS_PER_SEG, int SCAN_LANES_PER_LOAD, int LOADS_PER_CYCLE, int CYCLES_PER_TILE, int SCAN_LANES_PER_CYCLE, int RAKING_THREADS, int LOG_RAKING_THREADS_PER_LANE, int RAKING_THREADS_PER_LANE, int PARTIALS_PER_SEG, int PARTIALS_PER_ROW, int ROWS_PER_LANE> __device__ __forceinline__ void DistributionSortingPass( int* d_sync, int* d_spine, K* d_in_keys, K* d_out_keys, V* d_in_values, V* d_out_values, int block_offset, int block_elements, const int &out_of_bounds, const int &extra_elements, int spine_elements, int *base_partial, int *raking_partial, int *spine_raking_partial, int *encoded_reduction_col, int *smem_pool, int warpscan[SCAN_LANES_PER_CYCLE][3][RAKING_THREADS_PER_LANE], int digit_carry[RADIX_DIGITS], int digit_scan[2][RADIX_DIGITS], int digit_counts[CYCLES_PER_TILE][LOADS_PER_CYCLE][RADIX_DIGITS], int spine_scan[2][B40C_WARP_THREADS]) { //------------------------------------------------------------------------- // Reduction //------------------------------------------------------------------------- ReductionPass<K, CG, BIT, RADIX_BITS, RADIX_DIGITS, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor, false>( d_in_keys, d_spine, block_offset, encoded_reduction_col, smem_pool, out_of_bounds + extra_elements); //------------------------------------------------------------------------- // Global Barrier + Scan spine //------------------------------------------------------------------------- // Threadfence and syncthreads to make sure global writes are visible before // thread-0 reports in with its sync counter __threadfence(); __syncthreads(); if (blockIdx.x == 0) { // Report in ourselves if (threadIdx.x == 0) { d_sync[blockIdx.x] = 1; } // Wait for everyone else to report in for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += B40C_RADIXSORT_THREADS) { while (LoadCG(d_sync + peer_block) == 0) { __threadfence_block(); } } __syncthreads(); // Scan the spine in blocks of tile_elements int spine_carry = 0; int spine_offset = 0; while (spine_offset < spine_elements) { SrtsScanTile<CG, SPINE_PARTIALS_PER_SEG>( base_partial, spine_raking_partial, spine_scan, reinterpret_cast<int4 *>(&d_spine[spine_offset]), reinterpret_cast<int4 *>(&d_spine[spine_offset]), spine_carry); spine_offset += B40C_RADIXSORT_SPINE_TILE_ELEMENTS; } // Threadfence and syncthreads to make sure global writes are visible before // everyone reports back with their sync counters __threadfence(); __syncthreads(); // Let everyone know it's safe to read their prefix sums for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += B40C_RADIXSORT_THREADS) { d_sync[peer_block] = 0; } } else { if (threadIdx.x == 0) { // Report in d_sync[blockIdx.x] = 1; // Wait for acknowledgement while (LoadCG(d_sync + blockIdx.x) == 1) { __threadfence_block(); } } __syncthreads(); } //------------------------------------------------------------------------- // Scan/Scatter //------------------------------------------------------------------------- ScanScatterDigitPass<K, V, CG, BIT, RADIX_BITS, RADIX_DIGITS, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor>( d_spine, d_in_keys, d_in_values, d_out_keys, d_out_values, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, base_partial, raking_partial, block_offset, out_of_bounds, extra_elements); //------------------------------------------------------------------------- // Global barrier //------------------------------------------------------------------------- GlobalBarrier(d_sync); } template < int PASS, typename K, typename V, int RADIX_BITS, int RADIX_DIGITS, int TILE_ELEMENTS, typename PreprocessFunctor, typename PostprocessFunctor, int REDUCTION_LANES, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, int SPINE_PARTIALS_PER_SEG, int SCAN_LANES_PER_LOAD, int LOADS_PER_CYCLE, int CYCLES_PER_TILE, int SCAN_LANES_PER_CYCLE, int RAKING_THREADS, int LOG_RAKING_THREADS_PER_LANE, int RAKING_THREADS_PER_LANE, int PARTIALS_PER_SEG, int PARTIALS_PER_ROW, int ROWS_PER_LANE> __device__ __forceinline__ void DistributionSortingPass( int* d_sync, int* d_spine, K* d_keys0, K* d_keys1, V* d_values0, V* d_values1, int block_offset, int block_elements, const int &out_of_bounds, const int &extra_elements, int spine_elements, int *base_partial, int *raking_partial, int *spine_raking_partial, int *encoded_reduction_col, int *smem_pool, int warpscan[SCAN_LANES_PER_CYCLE][3][RAKING_THREADS_PER_LANE], int digit_carry[RADIX_DIGITS], int digit_scan[2][RADIX_DIGITS], int digit_counts[CYCLES_PER_TILE][LOADS_PER_CYCLE][RADIX_DIGITS], int spine_scan[2][B40C_WARP_THREADS]) { const int BIT = PASS * RADIX_BITS; SuppressUnusedConstantWarning(BIT); if (PASS & 0x1) { // Odd pass (flip keys0/keys1) DistributionSortingPass<K, V, BIT, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys1, d_keys0, d_values1, d_values0, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } else { // Even pass DistributionSortingPass<K, V, BIT, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } } template < int PASSES, int PASS, typename K, typename V, int RADIX_BITS, int RADIX_DIGITS, int TILE_ELEMENTS, typename PreprocessFunctor, typename PostprocessFunctor, int REDUCTION_LANES, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, int SPINE_PARTIALS_PER_SEG, int SCAN_LANES_PER_LOAD, int LOADS_PER_CYCLE, int CYCLES_PER_TILE, int SCAN_LANES_PER_CYCLE, int RAKING_THREADS, int LOG_RAKING_THREADS_PER_LANE, int RAKING_THREADS_PER_LANE, int PARTIALS_PER_SEG, int PARTIALS_PER_ROW, int ROWS_PER_LANE> __device__ __forceinline__ void DistributionSortingPass( int* d_sync, int* d_spine, K* d_keys0, K* d_keys1, V* d_values0, V* d_values1, int block_offset, int block_elements, const int &out_of_bounds, const int &extra_elements, int spine_elements, int *base_partial, int *raking_partial, int *spine_raking_partial, int *encoded_reduction_col, int *smem_pool, int warpscan[SCAN_LANES_PER_CYCLE][3][RAKING_THREADS_PER_LANE], int digit_carry[RADIX_DIGITS], int digit_scan[2][RADIX_DIGITS], int digit_counts[CYCLES_PER_TILE][LOADS_PER_CYCLE][RADIX_DIGITS], int spine_scan[2][B40C_WARP_THREADS]) { if (PASSES == 1) { // Only one pass: use both key pre- and post- processors in the same pass DistributionSortingPass<PASS, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } else if (PASS == 0) { // First pass: use key pre-processor in this pass DistributionSortingPass<PASS, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, NopFunctor<K>, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } else if (PASS == PASSES - 1) { // Last pass: use key post-processor in this pass DistributionSortingPass<PASS, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, NopFunctor<K>, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } else { // Middle pass: use nop-functors for keys this pass DistributionSortingPass<PASS, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, NopFunctor<K>, NopFunctor<K>, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } } /** * Single-grid sorting kernel. Performs up to 8 passes. */ template < typename K, typename V, int RADIX_BITS, int PASSES, int STARTING_PASS, typename PreprocessFunctor, typename PostprocessFunctor> __launch_bounds__ (B40C_RADIXSORT_THREADS, B40C_RADIXSORT_SG_OCCUPANCY(__CUDA_ARCH__)) __global__ void LsbSingleGridSortingKernel( int* d_sync, int* d_spine, K* d_keys0, K* d_keys1, V* d_values0, V* d_values1, CtaDecomposition work_decomposition, int spine_elements) { const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILE_ELEMENTS = B40C_RADIXSORT_SG_TILE_ELEMENTS(__CUDA_ARCH__, K, V); const int LOG_SCAN_LANES_PER_LOAD = (RADIX_BITS > 2) ? RADIX_BITS - 2 : 0; // Always at one lane per load const int SCAN_LANES_PER_LOAD = 1 << LOG_SCAN_LANES_PER_LOAD; const int LOG_LOADS_PER_CYCLE = B40C_RADIXSORT_SG_LOG_LOADS_PER_CYCLE(__CUDA_ARCH__, K, V); const int LOADS_PER_CYCLE = 1 << LOG_LOADS_PER_CYCLE; const int LOG_CYCLES_PER_TILE = B40C_RADIXSORT_SG_LOG_CYCLES_PER_TILE(__CUDA_ARCH__, K, V); const int CYCLES_PER_TILE = 1 << LOG_CYCLES_PER_TILE; const int LOG_SCAN_LANES_PER_CYCLE = LOG_LOADS_PER_CYCLE + LOG_SCAN_LANES_PER_LOAD; const int SCAN_LANES_PER_CYCLE = 1 << LOG_SCAN_LANES_PER_CYCLE; const int LOG_PARTIALS_PER_LANE = B40C_RADIXSORT_LOG_THREADS; const int LOG_PARTIALS_PER_CYCLE = LOG_SCAN_LANES_PER_CYCLE + LOG_PARTIALS_PER_LANE; const int LOG_RAKING_THREADS = B40C_RADIXSORT_SG_LOG_RAKING_THREADS(__CUDA_ARCH__); const int RAKING_THREADS = 1 << LOG_RAKING_THREADS; const int LOG_RAKING_THREADS_PER_LANE = LOG_RAKING_THREADS - LOG_SCAN_LANES_PER_CYCLE; const int RAKING_THREADS_PER_LANE = 1 << LOG_RAKING_THREADS_PER_LANE; const int LOG_PARTIALS_PER_SEG = LOG_PARTIALS_PER_LANE - LOG_RAKING_THREADS_PER_LANE; const int PARTIALS_PER_SEG = 1 << LOG_PARTIALS_PER_SEG; const int LOG_PARTIALS_PER_ROW = (LOG_PARTIALS_PER_SEG < B40C_LOG_MEM_BANKS(__CUDA_ARCH__)) ? B40C_LOG_MEM_BANKS(__CUDA_ARCH__) : LOG_PARTIALS_PER_SEG; // floor of MEM_BANKS partials per row const int PARTIALS_PER_ROW = 1 << LOG_PARTIALS_PER_ROW; const int PADDED_PARTIALS_PER_ROW = PARTIALS_PER_ROW + 1; const int LOG_SEGS_PER_ROW = LOG_PARTIALS_PER_ROW - LOG_PARTIALS_PER_SEG; const int SEGS_PER_ROW = 1 << LOG_SEGS_PER_ROW; const int LOG_ROWS_PER_LOAD = LOG_PARTIALS_PER_CYCLE - LOG_PARTIALS_PER_ROW; const int LOG_ROWS_PER_LANE = LOG_PARTIALS_PER_LANE - LOG_PARTIALS_PER_ROW; const int ROWS_PER_LANE = 1 << LOG_ROWS_PER_LANE; const int LOG_ROWS_PER_CYCLE = LOG_SCAN_LANES_PER_CYCLE + LOG_ROWS_PER_LANE; const int ROWS_PER_CYCLE = 1 << LOG_ROWS_PER_CYCLE; const int REDUCTION_LANES = SCAN_LANES_PER_LOAD; const int LOG_REDUCTION_PARTIALS_PER_LANE = B40C_RADIXSORT_LOG_THREADS; const int REDUCTION_PARTIALS_PER_LANE = 1 << LOG_PARTIALS_PER_LANE; const int PADDED_REDUCTION_PARTIALS = B40C_WARP_THREADS + 1; const int SCAN_LANE_BYTES = ROWS_PER_CYCLE * PADDED_PARTIALS_PER_ROW * sizeof(int); const int REDUCTION_LANE_BYTES = REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE * sizeof(int); const int REDUCTION_RAKING_BYTES = RADIX_DIGITS * PADDED_REDUCTION_PARTIALS; const int MAX_EXCHANGE_BYTES = B40C_MAX(TILE_ELEMENTS * sizeof(K), TILE_ELEMENTS * sizeof(V)); const int SHARED_BYTES = B40C_MAX(REDUCTION_RAKING_BYTES, B40C_MAX(SCAN_LANE_BYTES, B40C_MAX(REDUCTION_LANE_BYTES, MAX_EXCHANGE_BYTES))); const int SHARED_INT4S = (SHARED_BYTES + sizeof(int4) - 1) / sizeof(int4); const int LOG_SPINE_RAKING_THREADS = B40C_LOG_WARP_THREADS; const int SPINE_RAKING_THREADS = 1 << LOG_SPINE_RAKING_THREADS; const int LOG_SPINE_PARTIALS = B40C_RADIXSORT_LOG_THREADS; const int SPINE_PARTIALS = 1 << LOG_SPINE_PARTIALS; const int LOG_SPINE_PARTIALS_PER_SEG = LOG_SPINE_PARTIALS - LOG_SPINE_RAKING_THREADS; const int SPINE_PARTIALS_PER_SEG = 1 << LOG_SPINE_PARTIALS_PER_SEG; const int LOG_SPINE_SEGS_PER_ROW = LOG_PARTIALS_PER_ROW - LOG_SPINE_PARTIALS_PER_SEG; const int SPINE_SEGS_PER_ROW = 1 << LOG_SPINE_SEGS_PER_ROW; // N.B.: We use the following voodoo incantations to elide the compiler's miserable // "declared but never referenced" warnings for these (which are actually used for // template instantiation) SuppressUnusedConstantWarning(SCAN_LANES_PER_LOAD); SuppressUnusedConstantWarning(PARTIALS_PER_SEG); SuppressUnusedConstantWarning(LOG_ROWS_PER_LOAD); SuppressUnusedConstantWarning(ROWS_PER_LANE); SuppressUnusedConstantWarning(LOG_REDUCTION_PARTIALS_PER_LANE); SuppressUnusedConstantWarning(SPINE_RAKING_THREADS); SuppressUnusedConstantWarning(SPINE_PARTIALS); SuppressUnusedConstantWarning(SPINE_PARTIALS_PER_SEG); __shared__ int4 aligned_smem_pool[SHARED_INT4S]; // aligned_smem_pool is a int4[] to avoid alignment issues when casting to (K *) and/or (V *) __shared__ int warpscan[SCAN_LANES_PER_CYCLE][3][RAKING_THREADS_PER_LANE]; // One warpscan per fours-group __shared__ int digit_carry[RADIX_DIGITS]; __shared__ int digit_scan[2][RADIX_DIGITS]; __shared__ int digit_counts[CYCLES_PER_TILE][LOADS_PER_CYCLE][RADIX_DIGITS]; __shared__ int spine_scan[2][B40C_WARP_THREADS]; __shared__ int extra_elements; __shared__ int out_of_bounds; int* smem_pool = reinterpret_cast<int*>(aligned_smem_pool); // calculate our threadblock's range int block_elements, block_offset; if (blockIdx.x < work_decomposition.num_big_blocks) { block_offset = work_decomposition.big_block_elements * blockIdx.x; block_elements = work_decomposition.big_block_elements; } else { block_offset = (work_decomposition.normal_block_elements * blockIdx.x) + (work_decomposition.num_big_blocks * TILE_ELEMENTS); block_elements = work_decomposition.normal_block_elements; } extra_elements = 0; if (blockIdx.x == gridDim.x - 1) { extra_elements = work_decomposition.extra_elements_last_block; if (extra_elements) { block_elements -= TILE_ELEMENTS; } } out_of_bounds = block_offset + block_elements; // Column for encoding reduction counts int* encoded_reduction_col = smem_pool + threadIdx.x; // first element of column // Location for placing 2-element partial reductions in the first lane of a cycle int row = threadIdx.x >> LOG_PARTIALS_PER_ROW; int col = threadIdx.x & (PARTIALS_PER_ROW - 1); int *base_partial = smem_pool + (row * PADDED_PARTIALS_PER_ROW) + col; int *spine_raking_partial = 0; int *raking_partial = 0; if (threadIdx.x < RAKING_THREADS) { if (threadIdx.x < B40C_WARP_THREADS) { // Location for spine raking row = threadIdx.x >> LOG_SPINE_SEGS_PER_ROW; col = (threadIdx.x & (SPINE_SEGS_PER_ROW - 1)) << LOG_SPINE_PARTIALS_PER_SEG; spine_raking_partial = smem_pool + (row * PADDED_PARTIALS_PER_ROW) + col; // Initialize warpscan for spine_scan spine_scan[0][threadIdx.x] = 0; } // Location for scan/scatter-raking across all loads within a cycle row = threadIdx.x >> LOG_SEGS_PER_ROW; col = (threadIdx.x & (SEGS_PER_ROW - 1)) << LOG_PARTIALS_PER_SEG; raking_partial = smem_pool + (row * PADDED_PARTIALS_PER_ROW) + col; // Initalize lane warpscans if (threadIdx.x < RAKING_THREADS_PER_LANE) { #pragma unroll for (int SCAN_LANE = 0; SCAN_LANE < (int) SCAN_LANES_PER_CYCLE; SCAN_LANE++) { warpscan[SCAN_LANE][0][threadIdx.x] = 0; } } // Initialize digit_scan if (threadIdx.x < RADIX_DIGITS) { digit_scan[0][threadIdx.x] = 0; } } // Up to 8 sorting passes if (PASSES > 0) { DistributionSortingPass<PASSES, STARTING_PASS + 0, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 1) { DistributionSortingPass<PASSES, STARTING_PASS + 1, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 2) { DistributionSortingPass<PASSES, STARTING_PASS + 2, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 3) { DistributionSortingPass<PASSES, STARTING_PASS + 3, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 4) { DistributionSortingPass<PASSES, STARTING_PASS + 4, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 5) { DistributionSortingPass<PASSES, STARTING_PASS + 5, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 6) { DistributionSortingPass<PASSES, STARTING_PASS + 6, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } if (PASSES > 7) { DistributionSortingPass<PASSES, STARTING_PASS + 7, K, V, RADIX_BITS, RADIX_DIGITS, TILE_ELEMENTS, PreprocessFunctor, PostprocessFunctor, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, SPINE_PARTIALS_PER_SEG, SCAN_LANES_PER_LOAD, LOADS_PER_CYCLE, CYCLES_PER_TILE, SCAN_LANES_PER_CYCLE, RAKING_THREADS, LOG_RAKING_THREADS_PER_LANE, RAKING_THREADS_PER_LANE, PARTIALS_PER_SEG, PARTIALS_PER_ROW, ROWS_PER_LANE>( d_sync, d_spine, d_keys0, d_keys1, d_values0, d_values1, block_offset, block_elements, out_of_bounds, extra_elements, spine_elements, base_partial, raking_partial, spine_raking_partial, encoded_reduction_col, smem_pool, warpscan, digit_carry, digit_scan, digit_counts, spine_scan); } } } // namespace b40c
the_stack
#include "correlation_cuda_kernel.cuh" #define CUDA_NUM_THREADS 1024 #define THREADS_PER_BLOCK 32 #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> using at::Half; template <typename scalar_t> __global__ void channels_first(const scalar_t* __restrict__ input, scalar_t* rinput, int channels, int height, int width, int pad_size) { // n (batch size), c (num of channels), y (height), x (width) int n = blockIdx.x; int y = blockIdx.y; int x = blockIdx.z; int ch_off = threadIdx.x; scalar_t value; int dimcyx = channels * height * width; int dimyx = height * width; int p_dimx = (width + 2 * pad_size); int p_dimy = (height + 2 * pad_size); int p_dimyxc = channels * p_dimy * p_dimx; int p_dimxc = p_dimx * channels; for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) { value = input[n * dimcyx + c * dimyx + y * width + x]; rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value; } } template <typename scalar_t> __global__ void correlation_forward(scalar_t* output, int nOutputChannels, int outputHeight, int outputWidth, const scalar_t* __restrict__ rInput1, int nInputChannels, int inputHeight, int inputWidth, const scalar_t* __restrict__ rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2) { // n (batch size), c (num of channels), y (height), x (width) int pInputWidth = inputWidth + 2 * pad_size; int pInputHeight = inputHeight + 2 * pad_size; int kernel_rad = (kernel_size - 1) / 2; int displacement_rad = max_displacement / stride2; int displacement_size = 2 * displacement_rad + 1; int n = blockIdx.x; int y1 = blockIdx.y * stride1 + max_displacement; int x1 = blockIdx.z * stride1 + max_displacement; int c = threadIdx.x; int pdimyxc = pInputHeight * pInputWidth * nInputChannels; int pdimxc = pInputWidth * nInputChannels; int pdimc = nInputChannels; int tdimcyx = nOutputChannels * outputHeight * outputWidth; int tdimyx = outputHeight * outputWidth; int tdimx = outputWidth; scalar_t nelems = kernel_size * kernel_size * pdimc; __shared__ scalar_t prod_sum[THREADS_PER_BLOCK]; // no significant speed-up in using chip memory for input1 sub-data, // not enough chip memory size to accomodate memory per block for input2 sub-data // instead i've used device memory for both // element-wise product along channel axis for (int tj = -displacement_rad; tj <= displacement_rad; ++tj) { for (int ti = -displacement_rad; ti <= displacement_rad; ++ti) { prod_sum[c] = 0; int x2 = x1 + ti*stride2; int y2 = y1 + tj*stride2; for (int j = -kernel_rad; j <= kernel_rad; ++j) { for (int i = -kernel_rad; i <= kernel_rad; ++i) { for (int ch = c; ch < pdimc; ch += THREADS_PER_BLOCK) { int indx1 = n * pdimyxc + (y1 + j) * pdimxc + (x1 + i) * pdimc + ch; int indx2 = n * pdimyxc + (y2 + j) * pdimxc + (x2 + i) * pdimc + ch; prod_sum[c] += rInput1[indx1] * rInput2[indx2]; } } } // accumulate __syncthreads(); if (c == 0) { scalar_t reduce_sum = 0; for (int index = 0; index < THREADS_PER_BLOCK; ++index) { reduce_sum += prod_sum[index]; } int tc = (tj + displacement_rad) * displacement_size + (ti + displacement_rad); const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx + blockIdx.z; output[tindx] = reduce_sum / nelems; } } } } template <typename scalar_t> __global__ void correlation_backward_input1(int item, scalar_t* gradInput1, int nInputChannels, int inputHeight, int inputWidth, const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth, const scalar_t* __restrict__ rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2) { // n (batch size), c (num of channels), y (height), x (width) int n = item; int y = blockIdx.x * stride1 + pad_size; int x = blockIdx.y * stride1 + pad_size; int c = blockIdx.z; int tch_off = threadIdx.x; int kernel_rad = (kernel_size - 1) / 2; int displacement_rad = max_displacement / stride2; int displacement_size = 2 * displacement_rad + 1; int xmin = (x - kernel_rad - max_displacement) / stride1; int ymin = (y - kernel_rad - max_displacement) / stride1; int xmax = (x + kernel_rad - max_displacement) / stride1; int ymax = (y + kernel_rad - max_displacement) / stride1; if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) { // assumes gradInput1 is pre-allocated and zero filled return; } if (xmin > xmax || ymin > ymax) { // assumes gradInput1 is pre-allocated and zero filled return; } xmin = max(0, xmin); xmax = min(outputWidth - 1, xmax); ymin = max(0, ymin); ymax = min(outputHeight - 1, ymax); int pInputWidth = inputWidth + 2 * pad_size; int pInputHeight = inputHeight + 2 * pad_size; int pdimyxc = pInputHeight * pInputWidth * nInputChannels; int pdimxc = pInputWidth * nInputChannels; int pdimc = nInputChannels; int tdimcyx = nOutputChannels * outputHeight * outputWidth; int tdimyx = outputHeight * outputWidth; int tdimx = outputWidth; int odimcyx = nInputChannels * inputHeight* inputWidth; int odimyx = inputHeight * inputWidth; int odimx = inputWidth; scalar_t nelems = kernel_size * kernel_size * nInputChannels; __shared__ scalar_t prod_sum[THREADS_PER_BLOCK]; prod_sum[tch_off] = 0; for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) { int i2 = (tc % displacement_size - displacement_rad) * stride2; int j2 = (tc / displacement_size - displacement_rad) * stride2; int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c; scalar_t val2 = rInput2[indx2]; for (int j = ymin; j <= ymax; ++j) { for (int i = xmin; i <= xmax; ++i) { int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i; prod_sum[tch_off] += gradOutput[tindx] * val2; } } } __syncthreads(); if (tch_off == 0) { scalar_t reduce_sum = 0; for (int idx = 0; idx < THREADS_PER_BLOCK; idx++) { reduce_sum += prod_sum[idx]; } const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size); gradInput1[indx1] = reduce_sum / nelems; } } template <typename scalar_t> __global__ void correlation_backward_input2(int item, scalar_t* gradInput2, int nInputChannels, int inputHeight, int inputWidth, const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth, const scalar_t* __restrict__ rInput1, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2) { // n (batch size), c (num of channels), y (height), x (width) int n = item; int y = blockIdx.x * stride1 + pad_size; int x = blockIdx.y * stride1 + pad_size; int c = blockIdx.z; int tch_off = threadIdx.x; int kernel_rad = (kernel_size - 1) / 2; int displacement_rad = max_displacement / stride2; int displacement_size = 2 * displacement_rad + 1; int pInputWidth = inputWidth + 2 * pad_size; int pInputHeight = inputHeight + 2 * pad_size; int pdimyxc = pInputHeight * pInputWidth * nInputChannels; int pdimxc = pInputWidth * nInputChannels; int pdimc = nInputChannels; int tdimcyx = nOutputChannels * outputHeight * outputWidth; int tdimyx = outputHeight * outputWidth; int tdimx = outputWidth; int odimcyx = nInputChannels * inputHeight* inputWidth; int odimyx = inputHeight * inputWidth; int odimx = inputWidth; scalar_t nelems = kernel_size * kernel_size * nInputChannels; __shared__ scalar_t prod_sum[THREADS_PER_BLOCK]; prod_sum[tch_off] = 0; for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) { int i2 = (tc % displacement_size - displacement_rad) * stride2; int j2 = (tc / displacement_size - displacement_rad) * stride2; int xmin = (x - kernel_rad - max_displacement - i2) / stride1; int ymin = (y - kernel_rad - max_displacement - j2) / stride1; int xmax = (x + kernel_rad - max_displacement - i2) / stride1; int ymax = (y + kernel_rad - max_displacement - j2) / stride1; if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) { // assumes gradInput2 is pre-allocated and zero filled continue; } if (xmin > xmax || ymin > ymax) { // assumes gradInput2 is pre-allocated and zero filled continue; } xmin = max(0, xmin); xmax = min(outputWidth - 1, xmax); ymin = max(0, ymin); ymax = min(outputHeight - 1, ymax); int indx1 = n * pdimyxc + (y - j2)* pdimxc + (x - i2) * pdimc + c; scalar_t val1 = rInput1[indx1]; for (int j = ymin; j <= ymax; ++j) { for (int i = xmin; i <= xmax; ++i) { int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i; prod_sum[tch_off] += gradOutput[tindx] * val1; } } } __syncthreads(); if (tch_off == 0) { scalar_t reduce_sum = 0; for (int idx = 0; idx < THREADS_PER_BLOCK; idx++) { reduce_sum += prod_sum[idx]; } const int indx2 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size); gradInput2[indx2] = reduce_sum / nelems; } } int correlation_forward_cuda_kernel(at::Tensor& output, int ob, int oc, int oh, int ow, int osb, int osc, int osh, int osw, at::Tensor& input1, int ic, int ih, int iw, int isb, int isc, int ish, int isw, at::Tensor& input2, int gc, int gsb, int gsc, int gsh, int gsw, at::Tensor& rInput1, at::Tensor& rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2, int corr_type_multiply, cudaStream_t stream) { int batchSize = ob; int nInputChannels = ic; int inputWidth = iw; int inputHeight = ih; int nOutputChannels = oc; int outputWidth = ow; int outputHeight = oh; dim3 blocks_grid(batchSize, inputHeight, inputWidth); dim3 threads_block(THREADS_PER_BLOCK); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "channels_first_fwd_1", ([&] { channels_first<scalar_t> << <blocks_grid, threads_block, 0, stream >> >( input1.data<scalar_t>(), rInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.type(), "channels_first_fwd_2", ([&] { channels_first<scalar_t> << <blocks_grid, threads_block, 0, stream >> > ( input2.data<scalar_t>(), rInput2.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size); })); dim3 threadsPerBlock(THREADS_PER_BLOCK); dim3 totalBlocksCorr(batchSize, outputHeight, outputWidth); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "correlation_forward", ([&] { correlation_forward<scalar_t> << <totalBlocksCorr, threadsPerBlock, 0, stream >> > (output.data<scalar_t>(), nOutputChannels, outputHeight, outputWidth, rInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, rInput2.data<scalar_t>(), pad_size, kernel_size, max_displacement, stride1, stride2); })); cudaError_t err = cudaGetLastError(); // check for errors if (err != cudaSuccess) { printf("error in correlation_forward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; } int correlation_backward_cuda_kernel( at::Tensor& gradOutput, int gob, int goc, int goh, int gow, int gosb, int gosc, int gosh, int gosw, at::Tensor& input1, int ic, int ih, int iw, int isb, int isc, int ish, int isw, at::Tensor& input2, int gsb, int gsc, int gsh, int gsw, at::Tensor& gradInput1, int gisb, int gisc, int gish, int gisw, at::Tensor& gradInput2, int ggc, int ggsb, int ggsc, int ggsh, int ggsw, at::Tensor& rInput1, at::Tensor& rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2, int corr_type_multiply, cudaStream_t stream) { int batchSize = gob; int num = batchSize; int nInputChannels = ic; int inputWidth = iw; int inputHeight = ih; int nOutputChannels = goc; int outputWidth = gow; int outputHeight = goh; dim3 blocks_grid(batchSize, inputHeight, inputWidth); dim3 threads_block(THREADS_PER_BLOCK); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "lltm_forward_cuda", ([&] { channels_first<scalar_t> << <blocks_grid, threads_block, 0, stream >> >( input1.data<scalar_t>(), rInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size ); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.type(), "lltm_forward_cuda", ([&] { channels_first<scalar_t> << <blocks_grid, threads_block, 0, stream >> >( input2.data<scalar_t>(), rInput2.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size ); })); dim3 threadsPerBlock(THREADS_PER_BLOCK); dim3 totalBlocksCorr(inputHeight, inputWidth, nInputChannels); for (int n = 0; n < num; ++n) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.type(), "lltm_forward_cuda", ([&] { correlation_backward_input1<scalar_t> << <totalBlocksCorr, threadsPerBlock, 0, stream >> > ( n, gradInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, gradOutput.data<scalar_t>(), nOutputChannels, outputHeight, outputWidth, rInput2.data<scalar_t>(), pad_size, kernel_size, max_displacement, stride1, stride2); })); } for (int n = 0; n < batchSize; n++) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(rInput1.type(), "lltm_forward_cuda", ([&] { correlation_backward_input2<scalar_t> << <totalBlocksCorr, threadsPerBlock, 0, stream >> >( n, gradInput2.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, gradOutput.data<scalar_t>(), nOutputChannels, outputHeight, outputWidth, rInput1.data<scalar_t>(), pad_size, kernel_size, max_displacement, stride1, stride2); })); } // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in correlation_backward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; }
the_stack
#include <raft/cudart_utils.h> #include <raft/cuda_utils.cuh> #include <raft/linalg/binary_op.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/random/rng.hpp> #include <algorithm> #include <numeric> #include <random> #include <stack> #include <device_launch_parameters.h> #include <rmm/device_uvector.hpp> namespace cuml { namespace genetic { /** * @brief Simultaneously execute tournaments for all programs. * The fitness values being compared are adjusted for bloat (program length), * using the given parsimony coefficient. * * @param progs Device pointer to programs * @param win_indices Winning indices for every tournament * @param seeds Init seeds for choice selection * @param n_progs Number of programs * @param n_tours No of tournaments to be conducted * @param tour_size No of programs considered per tournament(@c <=n_progs><) * @param criterion Selection criterion for choices(min/max) * @param parsimony Parsimony coefficient to account for bloat */ __global__ void batched_tournament_kernel(const program_t progs, int* win_indices, const int* seeds, const int n_progs, const int n_tours, const int tour_size, const int criterion, const float parsimony) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_tours) return; raft::random::detail::PhiloxGenerator rng(seeds[idx], idx, 0); int r; rng.next(r); // Define optima values int opt = r % n_progs; float opt_penalty = parsimony * progs[opt].len * (2 * criterion - 1); float opt_score = progs[opt].raw_fitness_ - opt_penalty; for (int s = 1; s < tour_size; ++s) { rng.next(r); int curr = r % n_progs; float curr_penalty = parsimony * progs[curr].len * (2 * criterion - 1); float curr_score = progs[curr].raw_fitness_ - curr_penalty; // Eliminate thread divergence - b takes values in {0,1} // All threads have same criterion but mostly have different 'b' int b = (opt_score < curr_score); if (criterion) { opt = (1 - b) * opt + b * curr; opt_penalty = (1 - b) * opt_penalty + b * curr_penalty; opt_score = (1 - b) * opt_score + b * curr_score; } else { opt = b * opt + (1 - b) * curr; opt_penalty = b * opt_penalty + (1 - b) * curr_penalty; opt_score = b * opt_score + (1 - b) * curr_score; } } // Set win index win_indices[idx] = opt; } /** * @brief Driver function for evolving a generation of programs * * @param h cuML handle * @param h_oldprogs previous generation host programs * @param d_oldprogs previous generation device programs * @param h_nextprogs next generation host programs * @param d_nextprogs next generation device programs * @param n_samples No of samples in input dataset * @param data Device pointer to input dataset * @param y Device pointer to input predictions * @param sample_weights Device pointer to input weights * @param params Training hyperparameters * @param generation Current generation id * @param seed Random seed for generators */ void parallel_evolve(const raft::handle_t& h, const std::vector<program>& h_oldprogs, const program_t& d_oldprogs, std::vector<program>& h_nextprogs, program_t& d_nextprogs, const int n_samples, const float* data, const float* y, const float* sample_weights, const param& params, const int generation, const int seed) { cudaStream_t stream = h.get_stream(); auto n_progs = params.population_size; auto tour_size = params.tournament_size; auto n_tours = n_progs; // at least num_progs tournaments // Seed engines std::mt19937 h_gen(seed); // CPU rng raft::random::Rng d_gen(seed); // GPU rng std::uniform_real_distribution<float> dist_U(0.0f, 1.0f); // Build, Mutate and Run Tournaments if (generation == 1) { // Build random programs for the first generation for (auto i = 0; i < n_progs; ++i) { build_program(h_nextprogs[i], params, h_gen); } } else { // Set mutation type float mut_probs[4]; mut_probs[0] = params.p_crossover; mut_probs[1] = params.p_subtree_mutation; mut_probs[2] = params.p_hoist_mutation; mut_probs[3] = params.p_point_mutation; std::partial_sum(mut_probs, mut_probs + 4, mut_probs); for (auto i = 0; i < n_progs; ++i) { float prob = dist_U(h_gen); if (prob < mut_probs[0]) { h_nextprogs[i].mut_type = mutation_t::crossover; n_tours++; } else if (prob < mut_probs[1]) { h_nextprogs[i].mut_type = mutation_t::subtree; } else if (prob < mut_probs[2]) { h_nextprogs[i].mut_type = mutation_t::hoist; } else if (prob < mut_probs[3]) { h_nextprogs[i].mut_type = mutation_t::point; } else { h_nextprogs[i].mut_type = mutation_t::reproduce; } } // Run tournaments rmm::device_uvector<int> tour_seeds(n_tours, stream); rmm::device_uvector<int> d_win_indices(n_tours, stream); d_gen.uniformInt(tour_seeds.data(), n_tours, 1, INT_MAX, stream); auto criterion = params.criterion(); dim3 nblks(raft::ceildiv(n_tours, GENE_TPB), 1, 1); batched_tournament_kernel<<<nblks, GENE_TPB, 0, stream>>>(d_oldprogs, d_win_indices.data(), tour_seeds.data(), n_progs, n_tours, tour_size, criterion, params.parsimony_coefficient); CUDA_CHECK(cudaPeekAtLastError()); // Make sure tournaments have finished running before copying win indices CUDA_CHECK(cudaStreamSynchronize(stream)); // Perform host mutations auto donor_pos = n_progs; for (auto pos = 0; pos < n_progs; ++pos) { auto parent_index = d_win_indices.element(pos, stream); if (h_nextprogs[pos].mut_type == mutation_t::crossover) { // Get secondary index auto donor_index = d_win_indices.element(donor_pos, stream); donor_pos++; crossover( h_oldprogs[parent_index], h_oldprogs[donor_index], h_nextprogs[pos], params, h_gen); } else if (h_nextprogs[pos].mut_type == mutation_t::subtree) { subtree_mutation(h_oldprogs[parent_index], h_nextprogs[pos], params, h_gen); } else if (h_nextprogs[pos].mut_type == mutation_t::hoist) { hoist_mutation(h_oldprogs[parent_index], h_nextprogs[pos], params, h_gen); } else if (h_nextprogs[pos].mut_type == mutation_t::point) { point_mutation(h_oldprogs[parent_index], h_nextprogs[pos], params, h_gen); } else if (h_nextprogs[pos].mut_type == mutation_t::reproduce) { h_nextprogs[pos] = h_oldprogs[parent_index]; } else { // Should not come here } } } /* Memcpy individual host nodes to device and destroy previous generation device nodes TODO: Find a better way to do this. */ for (auto i = 0; i < n_progs; ++i) { program tmp(h_nextprogs[i]); delete[] tmp.nodes; // Set current generation device nodes tmp.nodes = (node*)rmm::mr::get_current_device_resource()->allocate( h_nextprogs[i].len * sizeof(node), stream); raft::copy(tmp.nodes, h_nextprogs[i].nodes, h_nextprogs[i].len, stream); raft::copy(d_nextprogs + i, &tmp, 1, stream); if (generation > 1) { // Free device memory allocated to program nodes in previous generation raft::copy(&tmp, d_oldprogs + i, 1, stream); rmm::mr::get_current_device_resource()->deallocate( tmp.nodes, h_nextprogs[i].len * sizeof(node), stream); } tmp.nodes = nullptr; } // Make sure all copying is done CUDA_CHECK(cudaStreamSynchronize(stream)); // Update raw fitness for all programs set_batched_fitness( h, n_progs, d_nextprogs, h_nextprogs, params, n_samples, data, y, sample_weights); } float param::p_reproduce() const { auto sum = this->p_crossover + this->p_subtree_mutation + this->p_hoist_mutation + this->p_point_mutation; auto ret = 1.f - sum; return fmaxf(0.f, fminf(ret, 1.f)); } int param::max_programs() const { // in the worst case every generation's top program ends up reproducing, // thereby adding another program into the population return this->population_size + this->generations; } int param::criterion() const { // Returns 0 if a smaller value is preferred and 1 for the opposite switch (this->metric) { case metric_t::mse: return 0; case metric_t::logloss: return 0; case metric_t::mae: return 0; case metric_t::rmse: return 0; case metric_t::pearson: return 1; case metric_t::spearman: return 1; default: return -1; } } std::string stringify(const program& prog) { std::string eqn = "( "; std::string delim = ""; std::stack<int> ar_stack; ar_stack.push(0); for (int i = 0; i < prog.len; ++i) { if (prog.nodes[i].is_terminal()) { eqn += delim; if (prog.nodes[i].t == node::type::variable) { // variable eqn += "X"; eqn += std::to_string(prog.nodes[i].u.fid); } else { // const eqn += std::to_string(prog.nodes[i].u.val); } int end_elem = ar_stack.top(); ar_stack.pop(); ar_stack.push(end_elem - 1); while (ar_stack.top() == 0) { ar_stack.pop(); eqn += ") "; if (ar_stack.empty()) { break; } end_elem = ar_stack.top(); ar_stack.pop(); ar_stack.push(end_elem - 1); } delim = ", "; } else { ar_stack.push(prog.nodes[i].arity()); eqn += delim; switch (prog.nodes[i].t) { // binary operators case node::type::add: eqn += "add("; break; case node::type::atan2: eqn += "atan2("; break; case node::type::div: eqn += "div("; break; case node::type::fdim: eqn += "fdim("; break; case node::type::max: eqn += "max("; break; case node::type::min: eqn += "min("; break; case node::type::mul: eqn += "mult("; break; case node::type::pow: eqn += "pow("; break; case node::type::sub: eqn += "sub("; break; // unary operators case node::type::abs: eqn += "abs("; break; case node::type::acos: eqn += "acos("; break; case node::type::acosh: eqn += "acosh("; break; case node::type::asin: eqn += "asin("; break; case node::type::asinh: eqn += "asinh("; break; case node::type::atan: eqn += "atan("; break; case node::type::atanh: eqn += "atanh("; break; case node::type::cbrt: eqn += "cbrt("; break; case node::type::cos: eqn += "cos("; break; case node::type::cosh: eqn += "cosh("; break; case node::type::cube: eqn += "cube("; break; case node::type::exp: eqn += "exp("; break; case node::type::inv: eqn += "inv("; break; case node::type::log: eqn += "log("; break; case node::type::neg: eqn += "neg("; break; case node::type::rcbrt: eqn += "rcbrt("; break; case node::type::rsqrt: eqn += "rsqrt("; break; case node::type::sin: eqn += "sin("; break; case node::type::sinh: eqn += "sinh("; break; case node::type::sq: eqn += "sq("; break; case node::type::sqrt: eqn += "sqrt("; break; case node::type::tan: eqn += "tan("; break; case node::type::tanh: eqn += "tanh("; break; default: break; } eqn += " "; delim = ""; } } eqn += ")"; return eqn; } void symFit(const raft::handle_t& handle, const float* input, const float* labels, const float* sample_weights, const int n_rows, const int n_cols, param& params, program_t& final_progs, std::vector<std::vector<program>>& history) { cudaStream_t stream = handle.get_stream(); // Update arity map in params - Need to do this only here, as all operations will call Fit atleast // once for (auto f : params.function_set) { int ar = 1; if (node::type::binary_begin <= f && f <= node::type::binary_end) { ar = 2; } if (params.arity_set.find(ar) == params.arity_set.end()) { // Create map entry for current arity std::vector<node::type> vec_f(1, f); params.arity_set.insert(std::make_pair(ar, vec_f)); } else { // Insert into map std::vector<node::type> vec_f = params.arity_set.at(ar); if (std::find(vec_f.begin(), vec_f.end(), f) == vec_f.end()) { params.arity_set.at(ar).push_back(f); } } } // Check terminalRatio to dynamically set it bool growAuto = (params.terminalRatio == 0.0f); if (growAuto) { params.terminalRatio = 1.0f * params.num_features / (params.num_features + params.function_set.size()); } /* Initializations */ std::vector<program> h_currprogs(params.population_size); std::vector<program> h_nextprogs(params.population_size); std::vector<float> h_fitness(params.population_size, 0.0f); program_t d_currprogs; // pointer to current programs d_currprogs = (program_t)rmm::mr::get_current_device_resource()->allocate( params.population_size * sizeof(program), stream); program_t d_nextprogs = final_progs; // Reuse memory already allocated for final_progs final_progs = nullptr; std::mt19937_64 h_gen_engine(params.random_state); std::uniform_int_distribution<int> seed_dist; /* Begin training */ auto gen = 0; params.num_epochs = 0; while (gen < params.generations) { // Generate an init seed auto init_seed = seed_dist(h_gen_engine); // Evolve current generation parallel_evolve(handle, h_currprogs, d_currprogs, h_nextprogs, d_nextprogs, n_rows, input, labels, sample_weights, params, (gen + 1), init_seed); // Update epochs ++params.num_epochs; // Update h_currprogs (deepcopy) h_currprogs = h_nextprogs; // Update evolution history, depending on the low memory flag if (!params.low_memory || gen == 0) { history.push_back(h_currprogs); } else { history.back() = h_currprogs; } // Swap d_currprogs(to preserve device memory) program_t d_tmp = d_currprogs; d_currprogs = d_nextprogs; d_nextprogs = d_tmp; // Update fitness array [host] and compute stopping criterion auto crit = params.criterion(); h_fitness[0] = h_currprogs[0].raw_fitness_; auto opt_fit = h_fitness[0]; for (auto i = 1; i < params.population_size; ++i) { h_fitness[i] = h_currprogs[i].raw_fitness_; if (crit == 0) { opt_fit = std::min(opt_fit, h_fitness[i]); } else { opt_fit = std::max(opt_fit, h_fitness[i]); } } // Check for stop criterion if ((crit == 0 && opt_fit <= params.stopping_criteria) || (crit == 1 && opt_fit >= params.stopping_criteria)) { CUML_LOG_DEBUG( "Early stopping criterion reached in Generation #%d, fitness=%f", (gen + 1), opt_fit); break; } // Update generation ++gen; } // Set final generation programs final_progs = d_currprogs; // Reset automatic growth parameter if (growAuto) { params.terminalRatio = 0.0f; } // Deallocate the previous generation device memory rmm::mr::get_current_device_resource()->deallocate( d_nextprogs, params.population_size * sizeof(program), stream); d_currprogs = nullptr; d_nextprogs = nullptr; } void symRegPredict(const raft::handle_t& handle, const float* input, const int n_rows, const program_t& best_prog, float* output) { // Assume best_prog is on device execute(handle, best_prog, n_rows, 1, input, output); } void symClfPredictProbs(const raft::handle_t& handle, const float* input, const int n_rows, const param& params, const program_t& best_prog, float* output) { cudaStream_t stream = handle.get_stream(); // Assume output is of shape [n_rows, 2] in colMajor format execute(handle, best_prog, n_rows, 1, input, output); // Apply 2 map operations to get probabilities! // TODO: Modification needed for n_classes if (params.transformer == transformer_t::sigmoid) { raft::linalg::unaryOp( output + n_rows, output, n_rows, [] __device__(float in) { return 1.0f / (1.0f + expf(-in)); }, stream); raft::linalg::unaryOp( output, output + n_rows, n_rows, [] __device__(float in) { return 1.0f - in; }, stream); } else { // Only sigmoid supported for now } } void symClfPredict(const raft::handle_t& handle, const float* input, const int n_rows, const param& params, const program_t& best_prog, float* output) { cudaStream_t stream = handle.get_stream(); // Memory for probabilities rmm::device_uvector<float> probs(2 * n_rows, stream); symClfPredictProbs(handle, input, n_rows, params, best_prog, probs.data()); // Take argmax along columns // TODO: Further modification needed for n_classes raft::linalg::binaryOp( output, probs.data(), probs.data() + n_rows, n_rows, [] __device__(float p0, float p1) { return 1.0f * (p0 <= p1); }, stream); } void symTransform(const raft::handle_t& handle, const float* input, const param& params, const program_t& final_progs, const int n_rows, const int n_cols, float* output) { cudaStream_t stream = handle.get_stream(); // Execute final_progs(ordered by fitness) on input // output of size [n_rows,hall_of_fame] execute(handle, final_progs, n_rows, params.n_components, input, output); } } // namespace genetic } // namespace cuml
the_stack
#include "IPsecAES_kernel.hh" #include <openssl/aes.h> #include <openssl/md5.h> /* The index is given by the order in get_used_datablocks(). */ #define dbid_enc_payloads_d (0) #define dbid_flow_ids_d (1) #define dbid_iv_d (2) #define dbid_aes_block_info_d (3) #ifndef __AES_CORE__ /*same constants are defined in ssl/aes/aes_core.h */ #define __AES_CORE__ __constant__ __device__ uint32_t rcon[10] = { 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ }; __constant__ __device__ uint32_t Te0_ConstMem[256] = { 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, }; __constant__ __device__ uint32_t Te1_ConstMem[256] = { 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, }; __constant__ __device__ uint32_t Te2_ConstMem[256] = { 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, }; __constant__ __device__ uint32_t Te3_ConstMem[256] = { 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, }; __constant__ __device__ uint32_t Td0_ConstMem[256] = { 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, }; __constant__ __device__ uint32_t Td1_ConstMem[256] = { 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, }; __constant__ __device__ uint32_t Td2_ConstMem[256] = { 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, }; __constant__ __device__ uint32_t Td3_ConstMem[256] = { 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, }; __constant__ __device__ uint8_t Td4_ConstMem[256] = { 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU, 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U, 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU, 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU, 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U, 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U, 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U, 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU, 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U, 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U, 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU, 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU, 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U, 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU, 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U, 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU, 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U, 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U, 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU, 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU, 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U, 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U, 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU, }; #endif /* __AES_CORE__ */ // TODO: optimize these two routines. Try to use longer data type, instead of byte access. //# define _GETU32(pt) (((uint32_t)(pt)[0] << 24) ^ ((uint32_t)(pt)[1] << 16) ^ ((uint32_t)(pt)[2] << 8) ^ ((uint32_t)(pt)[3])) //# define _PUTU32(ct, st) { (ct)[0] = (uint8_t)((st) >> 24); (ct)[1] = (uint8_t)((st) >> 16); (ct)[2] = (uint8_t)((st) >> 8); (ct)[3] = (uint8_t)(st); } //#define _GETU32(pt) ((( (*((uint32_t*)(pt))) &0x000000ffU)<<24) ^ (((*((uint32_t*)(pt)))&0x0000ff00U)<<8) ^ (((*((uint32_t*)(pt)))&0x00ff0000U)>>8) ^ (((*((uint32_t*)(pt)))&0xff000000U)>>24)) __device__ static uint32_t _GETU32(const uint8_t *pt) { uint32_t i = *((uint32_t*) pt); return ((i & 0x000000ffU) << 24) ^ ((i & 0x0000ff00U) << 8) ^ ((i & 0x00ff0000U) >> 8) ^ ((i & 0xff000000U) >> 24); } __device__ static void _PUTU32(uint8_t *ct, uint32_t st) { *((uint32_t*) ct) = ((st >> 24) ^ (((st << 8) >> 24) << 8) ^ (((st << 16) >> 24) << 16) ^ (st << 24)); } __device__ static void _next_rk(uint32_t* rk, const int round, const uint32_t Te0[], const uint32_t Te1[], const uint32_t Te2[], const uint32_t Te3[], const uint32_t rcon[]) { uint32_t temp; temp = rk[3]; rk[0] = rk[0] ^ (Te2[(temp >> 16) & 0xff] & 0xff000000) ^ (Te3[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te0[(temp) & 0xff] & 0x0000ff00) ^ (Te1[(temp >> 24)] & 0x000000ff) ^ rcon[round]; rk[1] = rk[1] ^ rk[0]; rk[2] = rk[2] ^ rk[1]; rk[3] = rk[3] ^ rk[2]; } /*----------------------------------------------------------------------*/ /* increment counter (128-bit int) by the given amount */ /*----------------------------------------------------------------------*/ __device__ static void AES_ctr128_inc(unsigned char *counter, int inc) { /* convert counter to big endian format */ uint32_t c[4]; c[0] = _GETU32(counter + 12); c[1] = _GETU32(counter + 8); c[2] = _GETU32(counter + 4); c[3] = _GETU32(counter + 0); /* increase the counter */ uint64_t *d = (uint64_t*) c; d[0] += inc; /* store the counter back */ _PUTU32(counter + 12, c[0]); _PUTU32(counter + 8, c[1]); _PUTU32(counter + 4, c[2]); _PUTU32(counter + 0, c[3]); } __device__ static void AES_encrypt_cu_optimized(const uint8_t *in, uint8_t *out, const uint8_t * __restrict__ key, const uint32_t Te0[], const uint32_t Te1[], const uint32_t Te2[], const uint32_t Te3[], const uint32_t rcon[]) { //const uint32_t *rk; uint32_t s0, s1, s2, s3, t0, t1, t2, t3; //rk = key->rd_key; uint32_t rk[4]; rk[0] = _GETU32(key); rk[1] = _GETU32(key + 4); rk[2] = _GETU32(key + 8); rk[3] = _GETU32(key + 12); /* * map byte array block to cipher state * and add initial round key: */ s0 = _GETU32(in) ^ rk[0]; s1 = _GETU32(in + 4) ^ rk[1]; s2 = _GETU32(in + 8) ^ rk[2]; s3 = _GETU32(in + 12) ^ rk[3]; /* round 1: */ _next_rk(rk, 0, Te0, Te1, Te2, Te3, rcon); t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[0]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[1]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[2]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[3]; /* round 2: */ _next_rk(rk, 1, Te0, Te1, Te2, Te3, rcon); s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[0]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[1]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[2]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[3]; /* round 3: */ _next_rk(rk, 2, Te0, Te1, Te2, Te3, rcon); t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[0]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[1]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[2]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[3]; /* round 4: */ _next_rk(rk, 3, Te0, Te1, Te2, Te3, rcon); s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[0]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[1]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[2]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[3]; /* round 5: */ _next_rk(rk, 4, Te0, Te1, Te2, Te3, rcon); t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[0]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[1]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[2]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[3]; /* round 6: */ _next_rk(rk, 5, Te0, Te1, Te2, Te3, rcon); s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[0]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[1]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[2]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[3]; /* round 7: */ _next_rk(rk, 6, Te0, Te1, Te2, Te3, rcon); t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[0]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[1]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[2]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[3]; /* round 8: */ _next_rk(rk, 7, Te0, Te1, Te2, Te3, rcon); s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[0]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[1]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[2]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[3]; /* round 9: */ _next_rk(rk, 8, Te0, Te1, Te2, Te3, rcon); t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[0]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[1]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[2]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[3]; /* * apply last round and * map cipher state to byte array block: */ _next_rk(rk, 9, Te0, Te1, Te2, Te3, rcon); s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3) & 0xff] & 0x000000ff) ^ rk[0]; _PUTU32(out, s0); s1 = (Te2[(t1 >> 24)] & 0xff000000) ^ (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t0) & 0xff] & 0x000000ff) ^ rk[1]; _PUTU32(out + 4, s1); s2 = (Te2[(t2 >> 24)] & 0xff000000) ^ (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t1) & 0xff] & 0x000000ff) ^ rk[2]; _PUTU32(out + 8, s2); s3 = (Te2[(t3 >> 24)] & 0xff000000) ^ (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t2) & 0xff] & 0x000000ff) ^ rk[3]; _PUTU32(out + 12, s3); } __global__ void AES_ctr_encrypt_chunk_SharedMem_5( struct datablock_kernel_arg **datablocks, uint32_t count, uint32_t *item_counts, uint32_t num_batches, uint8_t *checkbits_d, struct aes_sa_entry* flows ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count && count != 0) { uint32_t batch_idx, item_idx; nba::error_t err; err = nba::get_accum_idx(item_counts, num_batches, idx, batch_idx, item_idx); assert(err == nba::NBA_SUCCESS); const struct datablock_kernel_arg *db_enc_payloads = datablocks[dbid_enc_payloads_d]; const struct datablock_kernel_arg *const db_flow_ids = datablocks[dbid_flow_ids_d]; const struct datablock_kernel_arg *const db_iv = datablocks[dbid_iv_d]; const struct datablock_kernel_arg *const db_aes_block_info = datablocks[dbid_aes_block_info_d]; assert(item_idx < db_aes_block_info->batches[batch_idx].item_count); uint64_t flow_id = 65536; const struct aes_block_info &cur_block_info = ((struct aes_block_info *) db_aes_block_info->batches[batch_idx].buffer_bases) [item_idx]; const int pkt_idx = cur_block_info.pkt_idx; const int block_idx_local = cur_block_info.block_idx; const uintptr_t offset = (uintptr_t) db_enc_payloads->batches[batch_idx].item_offsets[pkt_idx].as_value<uintptr_t>(); const uintptr_t length = (uintptr_t) db_enc_payloads->batches[batch_idx].item_sizes[pkt_idx]; if (cur_block_info.magic == 85739 && pkt_idx < 64 && length != 0) { flow_id = ((uint64_t *) db_flow_ids->batches[batch_idx].buffer_bases)[pkt_idx]; if (flow_id != 65536) assert(flow_id < 1024); } /* Step 2. (marginal) */ for (int i = 0; i * blockDim.x < 256; i++) { int index = threadIdx.x + blockDim.x * i; if (index < 256) { shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } } for (int i = 0; i * blockDim.x < 10; i++) { int index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } __syncthreads(); if (flow_id != 65536 && length != 0) { assert(flow_id < 1024); assert(pkt_idx < 64); const uint8_t *const aes_key = flows[flow_id].aes_key; uint8_t *iv = ((uint8_t *) db_iv->batches[batch_idx].buffer_bases + (uintptr_t) (16 * pkt_idx)); const uint8_t *enc_payload = ((uint8_t *) db_enc_payloads->batches[batch_idx].buffer_bases) + offset; uint4 ecounter = {0,0,0,0}; assert(enc_payload != NULL); /* Step 3: Update the IV counters. */ AES_ctr128_inc(iv, block_idx_local); /* Step 4: Encrypt the counter (this is the bottleneck) */ AES_encrypt_cu_optimized(iv, (uint8_t *) &ecounter, aes_key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); //AES_encrypt_cu_optimized(iv, (uint8_t *) &ecounter, // aes_key, Te0_ConstMem, Te1_ConstMem, Te2_ConstMem, // Te3_ConstMem, rcon); /* Step 5: XOR the plain text (in-place). */ uint4 *in_blk = (uint4 *) &enc_payload[block_idx_local * AES_BLOCK_SIZE]; assert((uint8_t*)in_blk + AES_BLOCK_SIZE <= enc_payload + length); (*in_blk).x = ecounter.x ^ (*in_blk).x; (*in_blk).y = ecounter.y ^ (*in_blk).y; (*in_blk).z = ecounter.z ^ (*in_blk).z; (*in_blk).w = ecounter.w ^ (*in_blk).w; } __syncthreads(); if (threadIdx.x == 0 && checkbits_d != NULL) checkbits_d[blockIdx.x] = 1; } // endif(valid-idx) } void *nba::ipsec_aes_encryption_get_cuda_kernel() { return reinterpret_cast<void *> (AES_ctr_encrypt_chunk_SharedMem_5); } // vim: ts=8 sts=4 sw=4 et tw=150
the_stack
#include "SegmentedScan.h" #include <cmath> #include <iostream> #include <stdio.h> using namespace std; // 宏:SEG_SCAN_PACK // 定义了核函数中每段处理的数量。 #define SEG_SCAN_PACK 16 // 宏:SEG_SCAN_PACK_NUM // 定义了核函数中处理的段数。 #define SEG_SCAN_PACK_NUM 64 // 宏:SEG_SCAN_BLOCKSIZE // 定义了线程块大小。 #define SEG_SCAN_BLOCKSIZE (SEG_SCAN_PACK * SEG_SCAN_PACK_NUM) // 宏:SEG_DEBUG_CPU_PRINT(CPU 版本调试打印开关) // 打开该开关则会在 CPU 版本运行时打印相关的信息,以参考调试程序;如果注释掉该 // 宏,则 CPU 不会打印这些信息,但这会有助于程序更快速的运行。 // #define SEG_DEBUG_CPU_PRINT // Kernel 函数: _segmentedScanMatrixKer(数组分段扫描的矩阵方法版本) // 矩阵方法的SegmentedScan 实现。具体的算法为核函数用 SEG_SCAN_PACK_NUM 个线程 // 进行扫描,将输入的数组分为 SEG_SCAN_PACK_NUM 段,每段的 SEG_SCAN_PACK 个元素 // 采用串行的方法进行分段扫描。 __global__ void // Kernel 函数无返回值 _segmentedScanMatrixKer( float *inarray, // 输入数组。 int *label, // 输入数组的分段标签值。 int *index, // 输入数组的位置索引。 int n, // 数组的长度,处理元素的个数。 float *maxdist, // 输出,分段扫描后的最大垂直距离数组, int *maxdistidx, // 输出,分段扫描侯的最大垂距的索引值数组。 float *blockmaxdist, // 中间结果,每块最后位置的最大垂距。 int *blocklabel, // 中间结果,每块最后位置处的标签。 int *blockmaxdistidx // 中间结果,每块最后位置处的垂距索引。 ); // Kernel 函数: _segmentedScanBackKer(将中间结果进行返回分段扫描) // 对一个数组进行扫描,结合中间结果的小数组,对输出数组进行返回式的分段扫描。 __global__ void // Kernel 函数无返回值 _segmentedScanBackKer( float *maxdist, // 输出的分段扫描后的最大垂直距离数组, // 表示每段中的垂距最大的值。 int *maxdistidx, // 输出的扫描后最大垂距点的位置索引。 int *label, // 输入数组的分段标签值。 float *blockmaxdist, // 中间结果,每块最后位置的最大垂距。 int *blocklabel, // 中间结果,每块最后位置处的标签。 int *blockmaxdistidx, // 中间结果,每块最后位置处的垂距索引。 int numelements // 扫描数组的长度。 ); // Kernel 函数: _segmentedScanMatrixKer(数组分段扫描的矩阵方法版本) __global__ void _segmentedScanMatrixKer(float *inarray, int *label, int *index, int n, float *maxdist, int *maxdistidx, float *blockmaxdist, int *blocklabel, int *blockmaxdistidx) { // 声明共享内存。shd开头的指针表示当前点的信息,长度为块大小。 // shdcol开头的指针表示每段数组最后一个元素的信息,长度为 PACK_NUM。 extern __shared__ float shdmem[]; float *shdmaxdist = shdmem; float *shdcolmaxdist = &shdmaxdist[blockDim.x]; int *shdlabel = (int*)&shdcolmaxdist[SEG_SCAN_PACK_NUM]; int *shdcollabel = &shdlabel[blockDim.x]; int *shdindex = &shdcollabel[SEG_SCAN_PACK_NUM]; int *shdcolindex = &shdindex[blockDim.x]; // 基础索引。表示每块的起始位置索引。 int baseidx = blockIdx.x * blockDim.x; // 块外的数组索引。 int idx = threadIdx.x + baseidx; // 采用矩阵方法分段扫描,每段的头索引。 int packidx = SEG_SCAN_PACK * threadIdx.x; // 本地变量。表示目前扫描过的已知的最大垂距,区域标签,索引。 float curmaxdist; int curlabel; int curindex; // 本地变量,特殊值,用来给超出数组长度的点赋值或者给不需要处理的点赋值。 float pmaxdist = -100; int plabel = -1; int pindex = -1; // 将需要计算的值从输入加载到共享内存上。 if (idx < n) { shdmaxdist[threadIdx.x] = inarray[idx]; shdlabel[threadIdx.x] = label[idx]; // 如果记录最大垂距位置索引的指针为空,用当前点的实际索引对共享内存进行 // 赋值。 if (index == NULL) shdindex[threadIdx.x] = idx; // 否则用输入对共享内存进行赋值。 else shdindex[threadIdx.x] = index[idx]; // 超出数组长度的点赋特殊值。 } else { shdmaxdist[threadIdx.x] = pmaxdist; shdlabel[threadIdx.x] = plabel; shdindex[threadIdx.x] = pindex; } // 进行块内同步。 __syncthreads(); // 用 SEG_SCAN_PACK_NUM 个线程对每段进行 segmented scan,段内为 // SEG_SCAN_PACK 个元素的串行扫描。 if (threadIdx.x < SEG_SCAN_PACK_NUM) { // 记录每段 SEG_SCAN_PACK 中开始位置处的值,作为目前已知最大垂距的点的 // 信息。 curmaxdist = shdmaxdist[packidx]; curlabel = shdlabel[packidx]; curindex = shdindex[packidx]; // 对每段 SEG_SCAN_PACK 进行串行扫描。 for (int i = packidx + 1; i < packidx + SEG_SCAN_PACK; i++) { // 如果当前点的区域标签和目前已知的最大垂距的点的区域标签不同, // 那么重新记录目前已知最大垂距点的信息。 // 或者区域标签相同,当前点的垂距大于目前已知的最大垂距点的垂距, // 那么重新记录目前已知最大垂距点的信息。 if (shdlabel[i] != curlabel || shdmaxdist[i] > curmaxdist) { curmaxdist = shdmaxdist[i]; curlabel = shdlabel[i]; curindex = shdindex[i]; // 否则就更改当前点的最大垂距点位置的索引,更新当前点记录的已知的最 // 大垂距值。 } else { shdindex[i] = curindex; shdmaxdist[i] = curmaxdist; } } // 将每段 SEG_SCAN_PACK 进行分段扫描后的记录值(目前已知的最大垂距点信 // 息)写入列数组。 shdcolmaxdist[threadIdx.x] = curmaxdist; shdcollabel[threadIdx.x] = curlabel; shdcolindex[threadIdx.x] = curindex; } // 进行块内同步。 __syncthreads(); // 用第 0 个线程对保存每段 SEG_SCAN_PACK 中已知信息的列数组 shdcol 进行串行 // 分段扫描 if (threadIdx.x == 0) { // 串行扫描,扫描长度为 PACK_NUM。 for(int i = 1; i < SEG_SCAN_PACK_NUM; i++) { // 从上之下对列数组进行扫描,比较相邻的两个点的区域标签和最大垂距, // 如果属于同一区域, 并且最大距离小于前一个记录值,那么就改写当前 // 的记录。 if (shdcollabel[i] == shdcollabel[i - 1] && shdcolmaxdist[i] < shdcolmaxdist[i - 1]) { shdcolmaxdist[i] = shdcolmaxdist[i - 1]; shdcollabel[i] = shdcollabel[i - 1]; shdcolindex[i] = shdcolindex[i - 1]; } } } // 进行块内同步。 __syncthreads(); // 用 SEG_SCAN_PACK_NUM 个线程对每段 SEG_SCAN_PACK 进行回扫,将列数组经过分 // 段扫描后的更新值,与每段 SEG_SCAN_PACK 的值进行比较,进行更新分段扫描。 if (threadIdx.x < SEG_SCAN_PACK_NUM) { // 对于第一段,不需要用更新的列数组结果进行比较,故赋予特殊值。 if (threadIdx.x == 0) { curmaxdist = pmaxdist; curlabel = plabel; curindex = pindex; // 对于之后的每段,需要跟前一段的目前扫描到的最大垂距点的信息进行更新, // 所以把列数组中保存的,当前段的前一段的更新信息赋值给目前所能找到的最 // 大垂距点信息。 } else { curmaxdist = shdcolmaxdist[threadIdx.x - 1]; curlabel = shdcollabel[threadIdx.x - 1]; curindex = shdcolindex[threadIdx.x - 1]; } // 对每行进行串行的更新扫描 for (int i = packidx; i < packidx + SEG_SCAN_PACK; i++) { // 比较当前点和目前已知最大垂距点的区域标签和垂距, // 如果属于同一区域, 并且当前点的垂距小于目前已知信息,那么就改写 // 当前点的记录。 if (curlabel == shdlabel[i] && shdmaxdist[i] < curmaxdist) { shdmaxdist[i] = curmaxdist; shdindex[i] = curindex; // 否则,就 break 掉。 } else { break; } } } // 进行块内同步。 __syncthreads(); // 超出数组长度 n 的值不进行写入,直接返回。 if (idx >= n) return; // 将结果从共享内存写入到输出。包括本区域内的最大垂距和本区域内的最大垂距的 // 点的位置索引。 maxdist[idx] = shdmaxdist[threadIdx.x]; maxdistidx[idx] = shdindex[threadIdx.x]; // 如果中间结果数组为空,不进行处理直接返回。 if (blockmaxdist == NULL) return; // 如果大于一个线程块,用第 0 个线程把每块的最后一个点的信息记录到中间结果 // 数组。 if (blockIdx.x < gridDim.x - 1 && threadIdx.x == 0) { blockmaxdist[blockIdx.x] = shdcolmaxdist[SEG_SCAN_PACK_NUM - 1]; blocklabel[blockIdx.x] = shdcollabel[SEG_SCAN_PACK_NUM - 1]; blockmaxdistidx[blockIdx.x] = shdcolindex[SEG_SCAN_PACK_NUM - 1]; } } // Kernel 函数: _segmentedScanBackKer(将中间结果进行返回分段扫描) __global__ void _segmentedScanBackKer(float *maxdist, int *maxdistidx, int *label, float *blockmaxdist, int *blocklabel, int *blockmaxdistidx, int numelements) { // 声明共享内存。用来存放中间结果小数组中的元素,也就是输入的原数组的每块最 // 后一个元素。共包含三个信息。 __shared__ float shdcurmaxdist[1]; __shared__ int shdcurlabel[1]; __shared__ int shdcurmaxdistindex[1]; // 状态位,用来标记上一块的最后一个元素的标签值是否和本段第一个元素的标签值 // 相同。 __shared__ int state[1]; // 计算需要进行块间累加位置索引(块外的数组索引)。 int idx = (blockIdx.x + 1) * blockDim.x + threadIdx.x; // 用每块的第一个线程来读取每块前一块的最后一个元素,从中间结果数组中读取。 if (threadIdx.x == 0) { shdcurmaxdist[0] = blockmaxdist[blockIdx.x]; shdcurlabel[0] = blocklabel[blockIdx.x]; shdcurmaxdistindex[0] = blockmaxdistidx[blockIdx.x]; // 用 state 来记录上一块的最后一个元素的标签值是否和本段第一个元素的 // 标签值相同,相同则为 1,不同则为 0。 state[0] = (label[idx] == shdcurlabel[0]); } // 块内同步。 __syncthreads(); // 如果状态位为 0,说明上一块和本块无关,不在一个区域内,直接返回。 if (state[0] == 0) return; // 如果数组索引大于数组长度,直接返回。 if (idx >= numelements) return; // 如果当前位置处的标签值和目前已知的最大垂距的标签值相同,并且垂距小于目前 // 已知的最大垂距,那么更新当前位置处的最大垂距记录和最大垂距位置的索引。 if (label[idx] == shdcurlabel[0] && maxdist[idx] < shdcurmaxdist[0]) { maxdist[idx] = shdcurmaxdist[0]; maxdistidx[idx] = shdcurmaxdistindex[0]; } } // Host 成员方法:segmentedScanBack(将中间结果进行返回分段扫描) __host__ int SegmentedScan::segmentedScanBack(float *maxdist, int *maxdistidx, int *label, float *blockmaxdist, int *blocklabel, int *blockmaxdistidx, int numelements) { // 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。 if (maxdist == NULL || maxdistidx == NULL || label == NULL || blockmaxdist == NULL || blocklabel == NULL || blockmaxdistidx == NULL) return NULL_POINTER; // 检查处理的数组长度,如果小于 0 出错。 if (numelements < 0) return INVALID_DATA; // 计算线程块大小。 int gridsize = max(1, (numelements + SEG_SCAN_BLOCKSIZE - 1) / SEG_SCAN_BLOCKSIZE); // 判断 gridsize 大小,如果小于 1,则不用进行加回操作。返回正确。 if (gridsize < 1) return NO_ERROR; // 调用 _segmentedScanBackKer 核函数,将中间结果数组加回到原扫描数组。 _segmentedScanBackKer<<<gridsize, SEG_SCAN_BLOCKSIZE>>>( maxdist, maxdistidx, label, blockmaxdist, blocklabel, blockmaxdistidx, numelements); // 判断是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // Host 成员方法:segmentedScan(数组分段扫描) // 输入输出均在 CPU 端,不做 GPU 端考虑。以后可考虑添加输入输出在 device 端情况 // 不涉及中间变量和加回过程,串行代码。 __host__ int SegmentedScan::segmentedScanCpu(float *inarray, int *label, int *index, float *maxdist, int *maxdistidx, int numelements, bool hostinarray, bool hostlabel, bool hostindex, bool hostmaxdist, bool hostmaxdistidx) { // 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。 if (inarray == NULL || label == NULL || maxdist == NULL || maxdistidx == NULL) return NULL_POINTER; // 本程序实现的方法可处理的数组长度,加以判断控制。 if (numelements < 0) return INVALID_DATA; // 本地变量 int idx; float curmaxdist; int curlabel; int curindex; // 如果记录最大垂距位置索引的指针为空,用当前点的实际索引进行赋值。 if (index == NULL) { // 申请数组 index = new int[numelements]; // 申请失败 if (index == NULL) { return OUT_OF_MEM; } // 对索引数组赋值 for (idx = 0; idx < numelements; idx++) { index[idx] = idx; #ifdef SEG_DEBUG_CPU_PRINT cout << "[Cpu]index " << idx << " is " << index[idx] << endl; #endif } } // 当前信息初始化 curmaxdist = inarray[0]; curlabel = label[0]; curindex = index[0]; // 输出首位初始化 maxdist[0] = curmaxdist; maxdistidx[0] = curindex; #ifdef SEG_DEBUG_CPU_PRINT cout << "[CPU]0 maxdist is " << maxdist[0] << endl; cout << "[CPU]0 maxdistidx is " << maxdistidx[0] << endl; #endif // 从第一个点开始处理 for (idx = 1; idx < numelements; idx++) { // 如果此点标签与当前标签不符,或者此点的垂距大于当前最大垂距, // 更新当前最大垂距,当前标签,当前最大垂距的位置索引。 if (label[idx] != curlabel || inarray[idx] > curmaxdist) { curmaxdist = inarray[idx]; curlabel = label[idx]; curindex = index[idx]; } // 对输出赋值 maxdist[idx] = curmaxdist; maxdistidx[idx] = curindex; #ifdef SEG_DEBUG_CPU_PRINT cout << idx << "[CPU]maxdist is " << maxdist[idx] << endl; cout << idx << "[CPU]maxdistidx is " << maxdistidx[idx] << endl; #endif } // 释放内存 delete index; // 处理完毕退出。 return NO_ERROR; } // 宏:FAIL_SEGMENTED_SCAN_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_SEGMENTED_SCAN_FREE do { \ if (gridsize > 1) { \ if(blockmaxdistDev != NULL) \ cudaFree(blockmaxdistDev); \ if(blocklabelDev != NULL) \ cudaFree(blocklabelDev); \ if(blockmaxdistidxDev != NULL) \ cudaFree(blockmaxdistidxDev); \ } \ if (hostinarray && inarrayDev != NULL) \ cudaFree(inarrayDev); \ if (hostlabel && labelDev != NULL) \ cudaFree(labelDev); \ if (hostindex && indexDev != NULL) \ cudaFree(indexDev); \ if (hostmaxdist && maxdistDev != NULL) \ cudaFree(maxdistDev); \ if (hostmaxdistidx && maxdistidxDev != NULL) \ cudaFree(maxdistidxDev); \ } while (0) // Host 成员方法:segmentedScan(数组分段扫描) __host__ int SegmentedScan::segmentedScan(float *inarray, int *label, int *index, float *maxdist, int *maxdistidx, int numelements, bool hostinarray, bool hostlabel, bool hostindex, bool hostmaxdist, bool hostmaxdistidx) { // 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。 if (inarray == NULL || label == NULL || maxdist == NULL || maxdistidx == NULL) return NULL_POINTER; // 本程序实现的方法可处理的数组长度,加以判断控制。 if (numelements < 0) return INVALID_DATA; // 局部变量,错误码。 cudaError_t cuerrcode; int errcode; // 计算共享内存的长度。 unsigned int sharedmemsize = 0; // 定义设备端的输入输出数组指针,当输入输出指针在 Host 端时,在设备端申请对 // 应大小的数组。 float *inarrayDev = NULL; int *labelDev = NULL; int *indexDev = NULL; float *maxdistDev = NULL; int *maxdistidxDev = NULL; // 线程块的大小尺寸。 int gridsize = 0; int blocksize; // 局部变量,中间结果存放数组。长度会根据线程块大小来确定。 float *blockmaxdistDev = NULL; int *blocklabelDev = NULL; int *blockmaxdistidxDev = NULL; // 中间结果数组的长度。 int blocksumsize; // 判断当前 inarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组 // 申请一段空间;若该数组是在 Device 端,则直接使用。 if (hostinarray) { // 为输入数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&inarrayDev, sizeof (float) * numelements); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } // 将输入数组拷贝到设备端内存。 cuerrcode = cudaMemcpy(inarrayDev, inarray, sizeof (float) * numelements, cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } else { // 如果在设备端,则将指针传给对应的设备端统一指针。 inarrayDev = inarray; } // 判断当前 label 数组是否存储在 Host 端。若是,则需要在 Device 端为数组 // 申请一段空间;若该数组是在 Device 端,则直接使用。 if (hostlabel) { // 为输入数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&labelDev, sizeof (int) * numelements); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } // 将输入数组拷贝到设备端内存。 cuerrcode = cudaMemcpy(labelDev, label, sizeof (int) * numelements, cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } else { // 如果在设备端,则将指针传给对应的设备端统一指针。 labelDev = label; } // 判断当前 label 数组是否存储在 Host 端。若在设备端,则直接使用。若在 Host // 端,则不处理。 if (!hostindex) { // 如果在设备端,则将指针传给对应的设备端统一指针。 indexDev = index; } // 判断当前 maxdist 数组是否存储在 Host 端。若是,则需要在 Device 端为数组 // 申请一段空间;若该数组是在 Device 端,则直接使用。 if (hostmaxdist) { // 为输出数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&maxdistDev, sizeof (float) * numelements); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } else { // 如果在设备端,则将指针传给对应的设备端统一指针。 maxdistDev = maxdist; } // 判断当前 maxdistidx 数组是否存储在 Host 端。若是,则需要在 Device 端为数 // 组申请一段空间;若该数组是在 Device 端,则直接使用。 if (hostmaxdistidx) { // 为输出数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&maxdistidxDev, sizeof (int) * numelements); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } else { // 如果在设备端,则将指针传给对应的设备端统一指针。 maxdistidxDev = maxdistidx; } // 针对不同的实现类型,选择不同的路径进行处理。 switch(segmentedScanType) { // 使用矩阵方法的 segmentedscan 实现。 case MATRIX_SEGMENTED_SCAN: // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 // 矩阵方法分段扫描版本线程块大小。 blocksize = SEG_SCAN_BLOCKSIZE; // 计算线程块大小和共享内存长度。 gridsize = max(1, (numelements + blocksize - 1) / blocksize); sharedmemsize = (sizeof (float) + 2 * sizeof (int)) * (blocksize + SEG_SCAN_PACK_NUM); // 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申 // 请存放中间结果的数组。 if (gridsize > 1) { // 需要将每块处理的最后一个元素取出,最后一个线程块不进行处理。 blocksumsize = gridsize - 1; // 为存放中间结果的 3 个数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&blockmaxdistDev, blocksumsize * sizeof(float)); // 出错则释放申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } // 为存放区域标签的中间数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&blocklabelDev, blocksumsize * sizeof(int)); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } // 为存放最大垂距位置索引的中间结果数组在设备端申请内存。 cuerrcode = cudaMalloc((void **)&blockmaxdistidxDev, blocksumsize * sizeof(int)); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } // 调用 Kernel 函数,完成实际的分段数组扫描。 _segmentedScanMatrixKer<<<gridsize, blocksize, sharedmemsize>>>( inarrayDev, labelDev, indexDev, numelements, maxdistDev, maxdistidxDev, blockmaxdistDev, blocklabelDev, blockmaxdistidxDev); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return CUDA_ERROR; } break; // 其他方式情况下,直接返回非法数据错误。 default: // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return INVALID_DATA; } if (gridsize > 1) { // 递归调用分段扫描函数。此时输入输出数组皆为中间结果数组。 // 这里的递归调用不会调用多次,数组的规模是指数倍减小的。 errcode = segmentedScan(blockmaxdistDev, blocklabelDev, blockmaxdistidxDev, blockmaxdistDev, blockmaxdistidxDev, blocksumsize, false, false, false, false, false); if (errcode != NO_ERROR) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return errcode; } // 调用加回函数,将各块的扫描中间结果加回到输出数组。 errcode = segmentedScanBack(maxdistDev, maxdistidxDev, labelDev, blockmaxdistDev, blocklabelDev, blockmaxdistidxDev, numelements); if (errcode != NO_ERROR) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return errcode; } } // 如果 maxdist 数组在 Host 端,将结果拷贝到输出。 if (hostmaxdist) { // 将结果从设备端内存拷贝到输出数组。 cuerrcode = cudaMemcpy(maxdist, maxdistDev, sizeof (float) * numelements, cudaMemcpyDeviceToHost); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } // 如果 maxdistidx 数组在 Host 端,将结果拷贝到输出。 if (hostmaxdistidx) { // 将结果从设备端内存拷贝到输出数组。 cuerrcode = cudaMemcpy(maxdistidx, maxdistidxDev, sizeof (int) * numelements, cudaMemcpyDeviceToHost); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SEGMENTED_SCAN_FREE; return cuerrcode; } } // 释放 Device 内存。需要判断输入输出参数是否在 host 端。 FAIL_SEGMENTED_SCAN_FREE; // 处理完毕退出。 return NO_ERROR; } // 取消前面的宏定义。 #undef FAIL_SEGMENTED_SCAN_FREE
the_stack
// Modifications: CUDA implementation of CPU verison // Copyright 2020 Netease Fuxi AI LAB // SPDX-License-Identifier: Apache-2.0 #include "rasterize_triangles_cuda_impl.h" namespace pytorch_mesh_renderer { // Takes the minimum of a, b, and c, rounds down, and converts to an integer // in the range [low, high]. template <typename scalar_t> __device__ __forceinline__ int32_t ClampedIntegerMin(scalar_t a, scalar_t b, scalar_t c, int32_t low, int32_t high) { return (int32_t)fmin(fmax((floor(fmin(fmin(a, b), c))), (scalar_t)low), (scalar_t)high); } // Takes the maximum of a, b, and c, rounds up, and converts to an integer // in the range [low, high]. template <typename scalar_t> __device__ __forceinline__ int32_t ClampedIntegerMax(scalar_t a, scalar_t b, scalar_t c, int32_t low, int32_t high) { return (int32_t)fmin(fmax((ceil(fmax(fmax(a, b), c))), (scalar_t)low), (scalar_t)high); } // Computes a 3x3 matrix inverse without dividing by the determinant. // Instead, makes an unnormalized matrix inverse with the correct sign // by flipping the sign of the matrix if the determinant is negative. // By leaving out determinant division, the rows of M^-1 only depend on two out // of three of the columns of M; i.e., the first row of M^-1 only depends on the // second and third columns of M, the second only depends on the first and // third, etc. This means we can compute edge functions for two neighboring // triangles independently and produce exactly the same numerical result up to // the sign. This in turn means we can avoid cracks in rasterization without // using fixed-point arithmetic. // See http://mathworld.wolfram.com/MatrixInverse.html template <typename scalar_t> __device__ void ComputeUnnormalizedMatrixInverse( const scalar_t a11, const scalar_t a12, const scalar_t a13, const scalar_t a21, const scalar_t a22, const scalar_t a23, const scalar_t a31, const scalar_t a32, const scalar_t a33, scalar_t m_inv[9]) { m_inv[0] = a22 * a33 - a32 * a23; m_inv[1] = a13 * a32 - a33 * a12; m_inv[2] = a12 * a23 - a22 * a13; m_inv[3] = a23 * a31 - a33 * a21; m_inv[4] = a11 * a33 - a31 * a13; m_inv[5] = a13 * a21 - a23 * a11; m_inv[6] = a21 * a32 - a31 * a22; m_inv[7] = a12 * a31 - a32 * a11; m_inv[8] = a11 * a22 - a21 * a12; // The first column of the unnormalized M^-1 contains intermediate values for // det(M). const scalar_t det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6]; // Transfer the sign of the determinant. if (det < 0.0f) { for (int32_t i = 0; i < 9; ++i) { m_inv[i] = -m_inv[i]; } } } // Computes the edge functions from M^-1 as described by Olano and Greer, // "Triangle Scan Conversion using 2D Homogeneous Coordinates." // // This function combines equations (3) and (4). It first computes // [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc., // then computes edge_i = aX + bY + c template <typename scalar_t> __device__ void ComputeEdgeFunctions(const scalar_t px, const scalar_t py, const scalar_t m_inv[9], scalar_t values[3]) { for (int32_t i = 0; i < 3; ++i) { const scalar_t a = m_inv[3 * i + 0]; const scalar_t b = m_inv[3 * i + 1]; const scalar_t c = m_inv[3 * i + 2]; values[i] = a * px + b * py + c; } } // Determines whether the point p lies inside a front-facing triangle. // Counts pixels exactly on an edge as inside the triangle, as long as the // triangle is not degenerate. Degenerate (zero-area) triangles always fail the // inside test. template <typename scalar_t> __device__ __forceinline__ bool PixelIsInsideTriangle(const scalar_t edge_values[3]) { // Check that the edge values are all non-negative and that at least one is // positive (triangle is non-degenerate). return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) && (edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0); } template <typename scalar_t> __global__ void RasterizeTrianglesForwardCudaKernel( const scalar_t* vertices, const int64_t* triangles, int64_t triangle_count, int32_t image_width, int32_t image_height, scalar_t* barycentric_coordinates, int64_t* triangle_ids, scalar_t* z_buffer, int32_t* locks) { const int64_t triangle_id = blockIdx.x * blockDim.x + threadIdx.x; if (triangle_id >= triangle_count) { return; } const scalar_t half_image_width = 0.5f * image_width; const scalar_t half_image_height = 0.5f * image_height; double unnormalized_matrix_inverse[9]; double b_over_w[3]; const int64_t v0_x_id = 4 * triangles[3 * triangle_id]; const int64_t v1_x_id = 4 * triangles[3 * triangle_id + 1]; const int64_t v2_x_id = 4 * triangles[3 * triangle_id + 2]; const scalar_t v0w = vertices[v0_x_id + 3]; const scalar_t v1w = vertices[v1_x_id + 3]; const scalar_t v2w = vertices[v2_x_id + 3]; // Early exit: if all w < 0, triangle is entirely behind the eye. if (v0w < 0 && v1w < 0 && v2w < 0) { return; } const scalar_t v0x = vertices[v0_x_id]; const scalar_t v0y = vertices[v0_x_id + 1]; const scalar_t v1x = vertices[v1_x_id]; const scalar_t v1y = vertices[v1_x_id + 1]; const scalar_t v2x = vertices[v2_x_id]; const scalar_t v2y = vertices[v2_x_id + 1]; // The nondeterminacy of GPU device in single precision may lead some pixel // to be missing when a pixel is on the boundary of two triangles, so we use // double precision to check the location of a pixel. ComputeUnnormalizedMatrixInverse((double)v0x, (double)v1x, (double)v2x, (double)v0y, (double)v1y, (double)v2y, (double)v0w, (double)v1w, (double)v2w, unnormalized_matrix_inverse); // Initialize the bounding box to the entire screen. int32_t left = 0, right = image_width, bottom = 0, top = image_height; // If the triangle is entirely inside the screen, project the vertices to // pixel coordinates and find the triangle bounding box enlarged to the // nearest integer and clamped to the image boundaries. if (v0w > 0 && v1w > 0 && v2w > 0) { const scalar_t p0x = (v0x / v0w + 1.0f) * half_image_width; const scalar_t p1x = (v1x / v1w + 1.0f) * half_image_width; const scalar_t p2x = (v2x / v2w + 1.0f) * half_image_width; const scalar_t p0y = (v0y / v0w + 1.0f) * half_image_height; const scalar_t p1y = (v1y / v1w + 1.0f) * half_image_height; const scalar_t p2y = (v2y / v2w + 1.0f) * half_image_height; left = ClampedIntegerMin(p0x, p1x, p2x, 0, image_width); right = ClampedIntegerMax(p0x, p1x, p2x, 0, image_width); bottom = ClampedIntegerMin(p0y, p1y, p2y, 0, image_height); top = ClampedIntegerMax(p0y, p1y, p2y, 0, image_height); } // Iterate over each pixel in the bounding box. for (int32_t iy = bottom; iy < top; ++iy) { for (int32_t ix = left; ix < right; ++ix) { const scalar_t px = ((ix + 0.5f) / half_image_width) - 1.0f; const scalar_t py = ((iy + 0.5f) / half_image_height) - 1.0f; const int32_t pixel_idx = iy * image_width + ix; ComputeEdgeFunctions((double)px, (double)py, unnormalized_matrix_inverse, b_over_w); if (!PixelIsInsideTriangle(b_over_w)) { continue; } const scalar_t one_over_w = scalar_t(b_over_w[0] + b_over_w[1] + b_over_w[2]); const scalar_t b0 = scalar_t(b_over_w[0] / one_over_w); const scalar_t b1 = scalar_t(b_over_w[1] / one_over_w); const scalar_t b2 = scalar_t(b_over_w[2] / one_over_w); const scalar_t v0z = vertices[v0_x_id + 2]; const scalar_t v1z = vertices[v1_x_id + 2]; const scalar_t v2z = vertices[v2_x_id + 2]; // Since we computed an unnormalized w above, we need to recompute // a properly scaled clip-space w value and then divide clip-space z // by that. const scalar_t clip_z = b0 * v0z + b1 * v1z + b2 * v2z; const scalar_t clip_w = b0 * v0w + b1 * v1w + b2 * v2w; const scalar_t z = clip_z / clip_w; // Skip the pixel if it is farther than the current z-buffer pixel or // beyond the near or far clipping plane. if (z < -1.0 || z > 1.0) // || z > z_buffer[pixel_idx] { continue; } // write z_buffer, triangle_ids and barycentric_coordinates by using cuda threads lock // reference: https://stackoverflow.com/questions/21341495/cuda-mutex-and-atomiccas int32_t* mutex = locks + pixel_idx; bool isSet = false; do { if (isSet = atomicCAS(mutex, 0, 1) == 0) { if (z <= z_buffer[pixel_idx]) { z_buffer[pixel_idx] = z; triangle_ids[pixel_idx] = triangle_id; barycentric_coordinates[3 * pixel_idx + 0] = b0; barycentric_coordinates[3 * pixel_idx + 1] = b1; barycentric_coordinates[3 * pixel_idx + 2] = b2; } } if (isSet) { atomicExch(mutex, 0); __threadfence(); } } while (!isSet); /* original if (z < z_buffer[pixel_idx]) { z_buffer[pixel_idx] = z; triangle_ids[pixel_idx] = triangle_id; barycentric_coordinates[3 * pixel_idx + 0] = b0; barycentric_coordinates[3 * pixel_idx + 1] = b1; barycentric_coordinates[3 * pixel_idx + 2] = b2; } */ } } } void RasterizeTrianglesForwardCuda( at::Tensor vertices, at::Tensor triangles, int32_t image_width, int32_t image_height, torch::Tensor barycentric, torch::Tensor triangle_ids, torch::Tensor z_buffer) { const int64_t triangle_count = triangles.size(0); const int threads = 512; const dim3 blocks((triangle_count - 1) / threads + 1); int32_t* locks = NULL; // pixel locks cudaMalloc((void**)&locks, image_width * image_height * sizeof(int32_t)); cudaMemset(locks, 0, image_width * image_height * sizeof(int32_t)); AT_DISPATCH_FLOATING_TYPES(vertices.type(), "RasterizeTrianglesForwardCuda", ([&] { RasterizeTrianglesForwardCudaKernel<scalar_t> << <blocks, threads >> > ( vertices.data<scalar_t>(), triangles.data<int64_t>(), triangle_count, image_width, image_height, barycentric.data<scalar_t>(), triangle_ids.data<int64_t>(), z_buffer.data<scalar_t>(), locks); })); cudaFree(locks); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in RasterizeTrianglesForwardCuda: %s\n", cudaGetErrorString(err)); } template <typename scalar_t> __global__ void RasterizeTrianglesBackwardCudaKernel( const scalar_t* vertices, const int64_t* triangles, const scalar_t* barycentric_coordinates, const int64_t* triangle_ids, const scalar_t* df_dbarycentric_coordinates, int32_t image_width, int32_t image_height, scalar_t* df_dvertices) { const int32_t pixel_id = blockIdx.x * blockDim.x + threadIdx.x; if (pixel_id >= image_width * image_height) { return; } // We first loop over each pixel in the output image, and compute // dbarycentric_coordinate[0,1,2]/dvertex[0x, 0y, 1x, 1y, 2x, 2y]. // Next we compute each value above's contribution to // df/dvertices, building up that matrix as the output of this iteration. // b0, b1, and b2 are the three barycentric coordinate values // rendered at pixel pixel_id. const scalar_t b0 = barycentric_coordinates[3 * pixel_id]; const scalar_t b1 = barycentric_coordinates[3 * pixel_id + 1]; const scalar_t b2 = barycentric_coordinates[3 * pixel_id + 2]; if (b0 + b1 + b2 < kDegenerateBarycentricCoordinatesCutoff) { return; } const scalar_t df_db0 = df_dbarycentric_coordinates[3 * pixel_id]; const scalar_t df_db1 = df_dbarycentric_coordinates[3 * pixel_id + 1]; const scalar_t df_db2 = df_dbarycentric_coordinates[3 * pixel_id + 2]; const int64_t triangle_at_current_pixel = triangle_ids[pixel_id]; const int64_t* vertices_at_current_pixel = &triangles[3 * triangle_at_current_pixel]; // Extract vertex indices for the current triangle. const int64_t v0_id = 4 * vertices_at_current_pixel[0]; const int64_t v1_id = 4 * vertices_at_current_pixel[1]; const int64_t v2_id = 4 * vertices_at_current_pixel[2]; // Extract x,y,w components of the vertices' clip space coordinates. const scalar_t x0 = vertices[v0_id]; const scalar_t y0 = vertices[v0_id + 1]; const scalar_t w0 = vertices[v0_id + 3]; const scalar_t x1 = vertices[v1_id]; const scalar_t y1 = vertices[v1_id + 1]; const scalar_t w1 = vertices[v1_id + 3]; const scalar_t x2 = vertices[v2_id]; const scalar_t y2 = vertices[v2_id + 1]; const scalar_t w2 = vertices[v2_id + 3]; // Compute pixel's NDC-s. const int32_t ix = pixel_id % image_width; const int32_t iy = pixel_id / image_width; const scalar_t px = 2 * (ix + 0.5f) / image_width - 1.0f; const scalar_t py = 2 * (iy + 0.5f) / image_height - 1.0f; // Baricentric gradients wrt each vertex coordinate share a common factor. const scalar_t db0_dx = py * (w1 - w2) - (y1 - y2); const scalar_t db1_dx = py * (w2 - w0) - (y2 - y0); const scalar_t db2_dx = -(db0_dx + db1_dx); const scalar_t db0_dy = (x1 - x2) - px * (w1 - w2); const scalar_t db1_dy = (x2 - x0) - px * (w2 - w0); const scalar_t db2_dy = -(db0_dy + db1_dy); const scalar_t db0_dw = px * (y1 - y2) - py * (x1 - x2); const scalar_t db1_dw = px * (y2 - y0) - py * (x2 - x0); const scalar_t db2_dw = -(db0_dw + db1_dw); // Combine them with chain rule. const scalar_t df_dx = df_db0 * db0_dx + df_db1 * db1_dx + df_db2 * db2_dx; const scalar_t df_dy = df_db0 * db0_dy + df_db1 * db1_dy + df_db2 * db2_dy; const scalar_t df_dw = df_db0 * db0_dw + df_db1 * db1_dw + df_db2 * db2_dw; // Values of edge equations and inverse w at the current pixel. const scalar_t edge0_over_w = x2 * db0_dx + y2 * db0_dy + w2 * db0_dw; const scalar_t edge1_over_w = x2 * db1_dx + y2 * db1_dy + w2 * db1_dw; const scalar_t edge2_over_w = x1 * db2_dx + y1 * db2_dy + w1 * db2_dw; const scalar_t w_inv = edge0_over_w + edge1_over_w + edge2_over_w; // All gradients share a common denominator. const scalar_t w_sqr = 1 / (w_inv * w_inv); // Gradients wrt each vertex share a common factor. const scalar_t edge0 = w_sqr * edge0_over_w; const scalar_t edge1 = w_sqr * edge1_over_w; const scalar_t edge2 = w_sqr * edge2_over_w; atomicAdd(&df_dvertices[v0_id + 0], edge0 * df_dx); atomicAdd(&df_dvertices[v0_id + 1], edge0 * df_dy); atomicAdd(&df_dvertices[v0_id + 3], edge0 * df_dw); atomicAdd(&df_dvertices[v1_id + 0], edge1 * df_dx); atomicAdd(&df_dvertices[v1_id + 1], edge1 * df_dy); atomicAdd(&df_dvertices[v1_id + 3], edge1 * df_dw); atomicAdd(&df_dvertices[v2_id + 0], edge2 * df_dx); atomicAdd(&df_dvertices[v2_id + 1], edge2 * df_dy); atomicAdd(&df_dvertices[v2_id + 3], edge2 * df_dw); } void RasterizeTrianglesBackwardCuda( at::Tensor vertices, // FloatTensor at::Tensor triangles, // LongTensor at::Tensor barycentric_coordinates, // FloatTensor at::Tensor triangle_ids, // LongTensor at::Tensor df_dbarycentric_coordinates, // FloatTensor int32_t image_width, int32_t image_height, at::Tensor df_dvertices) { const int64_t pixel_count = image_width * image_height; const int threads = 512; const dim3 blocks((pixel_count - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(vertices.type(), "RasterizeTrianglesBackwardCuda", ([&] { RasterizeTrianglesBackwardCudaKernel<scalar_t> << <blocks, threads >> > ( vertices.data<scalar_t>(), triangles.data<int64_t>(), barycentric_coordinates.data<scalar_t>(), triangle_ids.data<int64_t>(), df_dbarycentric_coordinates.data<scalar_t>(), image_width, image_height, df_dvertices.data<scalar_t>()); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in RasterizeTrianglesBackwardCuda: %s\n", cudaGetErrorString(err)); } }// namespace pytorch_mesh_renderer
the_stack
#pragma once #include "cuda/Complex.cuh" #include "cuda/ComputeCapabilities.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/fbfft/FBFFTCommon.cuh" #include "cuda/fbfft/FBFFTParameters.h" #include "cuda/fbfft/FFT2D32.cuh" #include "cuda/util/CachedDeviceProperties.h" #include <cuda_runtime.h> #include <glog/logging.h> using namespace facebook::cuda; namespace facebook { namespace cuda { namespace fbfft { namespace detail { template <typename T, int FFTSize, int Dim> __device__ __forceinline__ void load2D( const DeviceTensor<T, Dim>& complex, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int offsetRow, const int indexRow, const int indexCol) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int col = adjustedThreadIdxX<FFTSize>() + indexCol * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int row = offsetRow + adjustedThreadIdxY<FFTSize>() + indexRow * blockDim.y; assert(FFTSize / 2 + 1 == complex.getSize(1)); assert(FFTSize == complex.getSize(2)); assert(col < FFTSize); assert(row < FFTSize); // IFFT, no need to pad by construction if (row < FFTSize / 2 + 1) { coeffs[indexCol] = ldg(complex[batch][row][col].template dataAs<Complex>()); } else { int rrow = FFTSize - row; int ccol = (FFTSize - col) % FFTSize; assert(rrow >= 0); assert(rrow < FFTSize / 2 + 1); assert(ccol >= 0); assert(ccol < FFTSize); coeffs[indexCol] = ldg(complex [batch] [rrow] [ccol] .template dataAs<Complex>()).conjugate(); } } template <typename T, int FFTSize, int Dim> __device__ __forceinline__ void load2D2a( const DeviceTensor<T, Dim>& complex, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int offsetRow, const int indexRow, const int indexCol) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int col = adjustedThreadIdxX<FFTSize>() + indexCol * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int row = offsetRow + adjustedThreadIdxY<FFTSize>() + indexRow * blockDim.y; assert(FFTSize / 2 + 1 == complex.getSize(1)); assert(FFTSize == complex.getSize(2)); assert(col < FFTSize); assert(row < FFTSize); // IFFT, no need to pad by construction if (row < FFTSize / 2 + 1) { coeffs[indexCol] = ldg(complex[batch][row][col].template dataAs<Complex>()); } else { int rrow = FFTSize - row; int ccol = (FFTSize - col) % FFTSize; assert(rrow >= 0); assert(rrow < FFTSize / 2 + 1); assert(ccol >= 0); assert(ccol < FFTSize); coeffs[indexCol] = ldg(complex [batch] [rrow] [ccol] .template dataAs<Complex>()).conjugate(); } } template <typename T, int FFTSize, int Dim> __device__ __forceinline__ void load2D2b( const DeviceTensor<T, Dim>& complex, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int offsetRow, const int indexRow, const int indexCol) { if (batch < complex.getSize(0)) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int col = adjustedThreadIdxX<FFTSize>() + indexCol * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int row = offsetRow + adjustedThreadIdxY<FFTSize>() + indexRow * blockDim.y; assert(FFTSize / 2 + 1 == complex.getSize(1)); assert(FFTSize == complex.getSize(2)); assert(col < FFTSize); assert(row < FFTSize); // IFFT, no need to pad by construction if (row < FFTSize / 2 + 1) { Complex tmp(ldg(complex[batch][row][col].template dataAs<Complex>())); coeffs[indexCol] += Complex(-tmp.im(), tmp.re()); } else { int rrow = FFTSize - row; int ccol = (FFTSize - col) % FFTSize; assert(rrow >= 0); assert(rrow < FFTSize / 2 + 1); assert(ccol >= 0); assert(ccol < FFTSize); Complex tmp(ldg(complex [batch] [rrow] [ccol] .template dataAs<Complex>()).conjugate()); coeffs[indexCol] += Complex(-tmp.im(), tmp.re()); } } } template <int FFTSize> __device__ __forceinline__ void store2D( DeviceTensor<float, 3>& real, const FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int indexCol, const int indexRow, const int padL, const int padU) { // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int col = adjustedThreadIdxX<FFTSize>() + indexCol * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int row = adjustedThreadIdxY<FFTSize>() + indexRow * blockDim.y; if (inBounds(row, col, padU, padL, real)) { // TODO: try to do something with float4 and shuffles real[batch][row - padU][col - padL] = coeffs[indexCol].re(); } } template <int FFTSize, int FFTPerWarp> __device__ __forceinline__ void store2D2( DeviceTensor<float, 3>& real, const FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int indexCol, const int indexRow, const int padL, const int padU) { // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int col = adjustedThreadIdxX<FFTSize>() + indexCol * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int row = adjustedThreadIdxY<FFTSize>() + indexRow * blockDim.y; if (inBounds(row, col, padU, padL, real)) { // TODO: try to do something with float4 and shuffles real[batch][row - padU][col - padL] = coeffs[indexCol].re(); if (batch + gridDim.x < real.getSize(0)) { real[batch + FFTPerWarp * gridDim.x][row - padU][col - padL] = coeffs[indexCol].im(); } } } template <int FFTSize, int FFTPerWarp, bool BitReverse> __global__ void decimateInFrequencyInverseHermitian2DWarpKernel( DeviceTensor<Complex, 3> src, DeviceTensor<float, 3> dst, const int padL, const int padU) { assert(src.getStride(2) == 1); assert(dst.getStride(2) == 1); cuda_static_assert(!(FFTPerWarp & (FFTPerWarp - 1))); cuda_static_assert(FFTPerWarp * FFTSize <= WARP_SIZE); // Only let FFTs <= 8 have multiple per warp, 16 and 32 perform better with // 1 per warp. cuda_static_assert(FFTSize <= 16 || FFTPerWarp == 1); assert(FFTPerWarp * FFTSize == blockDim.x); assert(src.getSize(0) % FFTPerWarp == 0); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs. const int batch = adjustedBatch<FFTSize, FFTPerWarp>(); assert (batch < src.getSize(0) / 2 + 1); FFT1DCoeffs<FFTSize> coeffs; __shared__ Complex buffer[WARP_SIZE][WARP_SIZE + 1]; cuda_static_assert(FFTSize <= WARP_SIZE); // Twiddles is the same as for 1D but fully data parallel across threadIdx.y FFT1DRoots<FFTSize> roots; roots.template twiddles<false>(); load2D2a<Complex, FFTSize, 3>(src, coeffs, batch, 0, 0, 0); load2D2b<Complex, FFTSize, 3>( src, coeffs, batch + FFTPerWarp * gridDim.x, 0, 0, 0); decimateInFrequency1DWarp<FFTSize>(coeffs[0], roots[0]); FFT1DBitReversal<FFTSize> bits; if (BitReverse) { bits.computeBitReversal(0); bitReverse1DWarp<FFTSize, FFTPerWarp>(coeffs, bits, 0); } if (FFTPerWarp > 1) { transpose2DMultiple<FFTSize, WARP_SIZE, 1, FFTPerWarp>( coeffs, (Complex(*)[WARP_SIZE][WARP_SIZE + 1])buffer); } else { transpose2D<FFTSize, WARP_SIZE, 1>( coeffs, (Complex(*)[WARP_SIZE][WARP_SIZE + 1])buffer); } decimateInFrequency1DWarp<FFTSize>(coeffs[0], roots[0]); if (BitReverse) { // Bit reversal is the same as for 1D but fully data parallel across // threadIdx.y bitReverse1DWarp<FFTSize, FFTPerWarp>(coeffs, bits, 0); } // If needed, could reintroduce the "untranspose" feature but this is // expensive for sizes > 32 store2D2<FFTSize, FFTPerWarp>(dst, coeffs, batch, 0, 0, padL, padU); } // First half of the inverse 2-D transform for >= 64. // Not called for <= 32 // 2-D IFFT after untransposed 2-D FFT template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __device__ __forceinline__ void decimateInFrequencyInverse2DKernel( DeviceTensor<float, 4> src, DeviceTensor<float, 4> dst) { assert(src.getStride(2) == 2); assert(dst.getStride(2) == 2); assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BlockDimY); assert(src.getSize(0) == dst.getSize(0)); // This version does not deal with a whole N x N FFT within a single block. // It *cannot* update in place transposed -> ensure we have the same // dimensions to update one row at a time. assert(src.getSize(1) == FFTSize / 2 + 1); assert(src.getSize(2) == FFTSize); assert(dst.getSize(1) == FFTSize); assert(dst.getSize(2) == FFTSize); int LogFFTSize = getMSB(FFTSize); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs const int batch = adjustedBatch<FFTSize, 1>(); assert (batch < src.getSize(0) / 2 + 1); for (int yiter = 0; yiter < FFTSize; yiter += RowsPerKernel * BlockDimY) { FFT1DCoeffs<FFTSize> coeffsArray[RowsPerKernel]; const int ColumnsPerWarp = coeffsArray[0].ColumnsPerWarp; __shared__ Complex buffer[BlockDimY][FFTSize]; // At this point, the buffer contains data in the form: // e^-iy (FFTSize of them) // e^-ix /--------------------------- // (FFTSize / 2 + 1) | // | // // With Hermitian symmetry 2-D Hermitian symmetry (X_{r,c} = X*_{N-r,N-c}) // where N-r and N-c are taken modulo N #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { load2D2a<float, FFTSize, 4>( src, coeffsArray[row], batch, yiter, row, reg); load2D2b<float, FFTSize, 4>( src, coeffsArray[row], batch + gridDim.x, yiter, row, reg); } } // At this point, the data is loaded in the form: // e^-iy (FFTSize of them) // e^-ix /--------------------------- // (FFTSize of them) | // | { // Twiddles is the same as for 1D but fully data parallel on threadIdx.y FFT1DRoots<FFTSize> roots; roots.template twiddles<false>(); decimateInFrequency1D<FFTSize, 1, RowsPerKernel, 0, RowsPerKernel>( coeffsArray, roots, batch); } if (BitReverse) { FFT1DBitReversal<FFTSize> bits; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { bits.computeBitReversal(reg); } Complex (*buffer2) [FFTSize] = (Complex(*)[FFTSize])buffer; // bitReverse all #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int y = getLaneId() + reg * WARP_SIZE; buffer2[threadIdx.y][y] = coeffsArray[row][reg]; } #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { coeffsArray[row][reg] = buffer2[threadIdx.y][bits[reg]]; } } // No need to sync up here, no following kernel } // Store as: // 1^y (FFTSize of them) // e^-ix /--------------------------- // (FFTSize) | // | // Here we store the whole thing, no symmetry #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int y = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = threadIdx.x + reg * blockDim.x; if (y < dst.getSize(1) && x < dst.getSize(2)) { dst[batch][y][x][0].as<Complex>() = coeffsArray[row][reg]; } } } } } // Second half of the 2-D transform for >= 64 // Only transform to be called for <= 32 template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __device__ __forceinline__ void decimateInFrequencyInverse2DKernel( const DeviceTensor<Complex, 3> src, DeviceTensor<float, 3> real, const int padL, const int padU) { assert(src.getStride(2) == 1); assert(real.getStride(2) == 1); assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BlockDimY); assert(src.getSize(0) == real.getSize(0)); // This version does not deal with a whole N x N FFT within a single block. // It *cannot* update in place transposed -> ensure we are writing to 2 // different storage areas assert(src.dataAs<float>() != real.data()); int LogFFTSize = getMSB(FFTSize); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs const int batch = adjustedBatch<FFTSize, 1>(); if (batch >= src.getSize(0)) { return; } const int UpperBound = FFTSize; #pragma unroll for (int yiter = 0; yiter < UpperBound; yiter += RowsPerKernel * BlockDimY) { // Split into lower and upper half, upper half will be cut by symmetry FFT1DCoeffs<FFTSize> coeffsArray[RowsPerKernel]; const int ColumnsPerWarp = coeffsArray[0].ColumnsPerWarp; __shared__ Complex buffer[BlockDimY][FFTSize]; // At this point, the buffer contains data in the form // 1^y (FFTSize of them) // e^-ix /--------------------------- // (FFTSize) | // | // We store the whole thing, no symmetry #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int x = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int y = threadIdx.x + reg * blockDim.x; // This is the key: uncoalesced, transposed reads using ldg work // really well and remove the need for doing an actual transpose. coeffsArray[row][reg] = ldg(src[batch][y][x].dataAs<Complex>()); } } { // Twiddles is the same as for 1D but fully data parallel wrt threadIdx.y FFT1DRoots<FFTSize> roots; roots.template twiddles<false>(); decimateInFrequency1D<FFTSize, 1, RowsPerKernel, 0, RowsPerKernel>(coeffsArray, roots, batch); } if (BitReverse) { { FFT1DBitReversal<FFTSize> bits; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { bits.computeBitReversal(reg); } Complex (*buffer2) [FFTSize] = (Complex(*)[FFTSize])buffer; // bitReverse all #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = getLaneId() + reg * WARP_SIZE; buffer2[threadIdx.y][x] = coeffsArray[row][reg]; } #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { coeffsArray[row][reg] = buffer2[threadIdx.y][bits[reg]]; } } // No need to sync up here, no following smem access } } // FFT is untransposed, this is untransposed -> back to correct order // Eventually store the final result // 1^x (FFTSize of them) // 1^y /--------------------------- // (FFTSize) | // | #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int rrow = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int ccol = threadIdx.x + reg * blockDim.x; if (inBounds(rrow, ccol, padU, padL, real)) { real[batch][rrow - padU][ccol - padL] = coeffsArray[row][reg].re(); if (batch + gridDim.x < real.getSize(0)) { real[batch + gridDim.x][rrow - padU][ccol - padL] = coeffsArray[row][reg].im(); } } } } } } template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __launch_bounds__(32 * 32, 2) __global__ void decimateInFrequencyInverse2DKernel64( DeviceTensor<float, 4> src, DeviceTensor<float, 4> dst) { decimateInFrequencyInverse2DKernel<FFTSize, RowsPerKernel, BlockDimY, BitReverse>(src, dst); } template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __launch_bounds__(32 * 32, 1) __global__ void decimateInFrequencyInverse2DKernel128( DeviceTensor<float, 4> src, DeviceTensor<float, 4> dst) { decimateInFrequencyInverse2DKernel<FFTSize, RowsPerKernel, BlockDimY, BitReverse>(src, dst); } template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __launch_bounds__(32 * 32, 2) __global__ void decimateInFrequencyInverse2DKernel64( const DeviceTensor<Complex, 3> src, DeviceTensor<float, 3> real, const int padL, const int padU) { decimateInFrequencyInverse2DKernel<FFTSize, RowsPerKernel, BlockDimY, BitReverse>(src, real, padL, padU); } template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __global__ void decimateInFrequencyInverse2DKernel128( const DeviceTensor<Complex, 3> src, DeviceTensor<float, 3> real, const int padL, const int padU) { decimateInFrequencyInverse2DKernel<FFTSize, RowsPerKernel, BlockDimY, BitReverse>(src, real, padL, padU); } } // namespace // First half of the inverse 2-D transform for >= 64. // Not called for <= 32 // 2-D IFFT after untransposed 2-D FFT template <int BatchDims> FBFFTParameters::ErrorCode fbifft2D( DeviceTensor<float, BatchDims + 3>& srcComplexAsFloat, DeviceTensor<float, BatchDims + 3>& dstComplexAsFloat, cudaStream_t s) { initTwiddles(); if (srcComplexAsFloat.getSize(1) != numHermitian(srcComplexAsFloat.getSize(2)) || srcComplexAsFloat.getSize(2) > 128 || srcComplexAsFloat.getSize(2) < 32) { return FBFFTParameters::UnsupportedSize; } #define SELECT_FBFFT_2D_DIF_SINGLE( \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE) \ if (srcComplexAsFloat.getSize(2) == FFTSize) { \ dim3 blocks(ceil(srcComplexAsFloat.getSize(0), 2)); \ dim3 threads(32, BLOCKDIMY); \ detail::decimateInFrequencyInverse2DKernel##FFTSize< \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(srcComplexAsFloat, dstComplexAsFloat);\ if (cudaSuccess != cudaPeekAtLastError()) { \ return FBFFTParameters::CudaError; \ } \ return FBFFTParameters::Success; \ } SELECT_FBFFT_2D_DIF_SINGLE(64, 2, 16, true); SELECT_FBFFT_2D_DIF_SINGLE(128, 2, 16, true); #undef SELECT_FBFFT_2D_DIF_SINGLE return FBFFTParameters::UnsupportedSize; } // Second half of the 2-D transform for >= 64 // Only transform to be called for <= 32 template <int BatchDims> FBFFTParameters::ErrorCode fbifft2D( DeviceTensor<Complex, BatchDims + 2>& srcComplex, DeviceTensor<float, BatchDims + 2>& realDst, const int padL, const int padU, cudaStream_t s) { initTwiddles(); bool inputProperlySizedLE32 = srcComplex.getSize(BatchDims + 1) > 32 || (srcComplex.getSize(BatchDims + 1) <= 32 && srcComplex.getSize(BatchDims) != numHermitian(srcComplex.getSize(BatchDims + 1))); bool inputProperlySizedGT32 = srcComplex.getSize(BatchDims + 1) <= 32 || (srcComplex.getSize(BatchDims + 1) > 32 && srcComplex.getSize(BatchDims) != srcComplex.getSize(BatchDims + 1)); if ((!inputProperlySizedLE32 && !inputProperlySizedGT32) || srcComplex.getSize(BatchDims + 1) > 128) { return FBFFTParameters::UnsupportedSize; } { constexpr int FFTSize = 32; constexpr int BatchesPerBlock = 2; if (srcComplex.getSize(BatchDims + 1) == FFTSize) { CHECK_EQ(1, BatchDims); int maxBlocks = facebook::cuda::getCurrentDeviceProperties().maxGridSize[0]; int blx = 1; int bly = 1; if (realDst.getSize(0) / (BatchesPerBlock) > maxBlocks) { blx = maxBlocks; bly = ceil(realDst.getSize(0), maxBlocks * BatchesPerBlock); } else { blx = ceil(realDst.getSize(0), BatchesPerBlock); } CHECK_LE(1, blx); CHECK_LE(1, bly); CHECK_LE(blx, maxBlocks); CHECK_LE(bly, maxBlocks); CHECK_LE(realDst.getSize(0), blx * bly * BatchesPerBlock); dim3 blocks(blx, bly); dim3 threads(FFTSize, BatchesPerBlock); detail::fbifft2DVertical_32<BatchDims, BatchesPerBlock> <<<blocks, threads, 0, s>>>(srcComplex, realDst, padL, padU); if (cudaSuccess != cudaPeekAtLastError()) { return FBFFTParameters::CudaError; } return FBFFTParameters::Success; } } { constexpr int FFTSize = 16; constexpr int BatchesPerBlock = 4; if (srcComplex.getSize(BatchDims + 1) == FFTSize) { CHECK_EQ(1, BatchDims); int maxBlocks = facebook::cuda::getCurrentDeviceProperties().maxGridSize[0]; int blx = 1; int bly = 1; if (realDst.getSize(0) / (BatchesPerBlock) > maxBlocks) { blx = maxBlocks; bly = ceil(realDst.getSize(0), maxBlocks * BatchesPerBlock); } else { blx = ceil(realDst.getSize(0), BatchesPerBlock); } CHECK_LE(1, blx); CHECK_LE(1, bly); CHECK_LE(blx, maxBlocks); CHECK_LE(bly, maxBlocks); CHECK_LE(realDst.getSize(0), blx * bly * BatchesPerBlock); dim3 blocks(blx, bly); dim3 threads(FFTSize, BatchesPerBlock); detail::fbifft2DVertical_16<BatchDims, BatchesPerBlock> <<<blocks, threads, 0, s>>>(srcComplex, realDst, padL, padU); if (cudaSuccess != cudaPeekAtLastError()) { return FBFFTParameters::CudaError; } return FBFFTParameters::Success; } } { constexpr int FFTSize = 8; constexpr int BatchesPerBlock = 16; if (srcComplex.getSize(BatchDims + 1) == FFTSize) { CHECK_EQ(1, BatchDims); int maxBlocks = facebook::cuda::getCurrentDeviceProperties().maxGridSize[0]; int blx = 1; int bly = 1; if (realDst.getSize(0) / (BatchesPerBlock) > maxBlocks) { blx = maxBlocks; bly = ceil(realDst.getSize(0), maxBlocks * BatchesPerBlock); } else { blx = ceil(realDst.getSize(0), BatchesPerBlock); } CHECK_LE(1, blx); CHECK_LE(1, bly); CHECK_LE(blx, maxBlocks); CHECK_LE(bly, maxBlocks); CHECK_LE(realDst.getSize(0), blx * bly * BatchesPerBlock); dim3 blocks(blx, bly); dim3 threads(FFTSize, BatchesPerBlock); detail::fbifft2DVertical_8<BatchDims, BatchesPerBlock> <<<blocks, threads, 0, s>>>(srcComplex, realDst, padL, padU); if (cudaSuccess != cudaPeekAtLastError()) { return FBFFTParameters::CudaError; } return FBFFTParameters::Success; } } // TODO: this drops to 1 FFT per warp if batch size is not an even multiple // of 2 * FFTS_PER_WARP -> implement kernel and epilogue to handle most // cases efficiently. #define SELECT_FBFFT_2D_DIF_WARP_SINGLE( \ FFTSize, FFTS_PER_WARP, BIT_REVERSE) \ if (srcComplex.getSize(BatchDims + 1) == FFTSize) { \ if (srcComplex.getSize(0) % (2 * FFTS_PER_WARP) == 0) { \ dim3 blocks(ceil(srcComplex.getSize(0), 2 * FFTS_PER_WARP)); \ /* The factor 2 is already included as Hermitian symmetry */ \ /* in the implementation -> just multiply by FFTS_PER_WARP */ \ dim3 threads(FFTSize * FFTS_PER_WARP, \ FFTSize); \ detail::decimateInFrequencyInverseHermitian2DWarpKernel< \ FFTSize, FFTS_PER_WARP, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(srcComplex, realDst, padL, padU); \ } else { \ dim3 blocks(ceil(srcComplex.getSize(0), 2)); \ dim3 threads(FFTSize, \ FFTSize); \ detail::decimateInFrequencyInverseHermitian2DWarpKernel< \ FFTSize, 1, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(srcComplex, realDst, padL, padU); \ } \ if (cudaSuccess != cudaPeekAtLastError()) { \ return FBFFTParameters::CudaError; \ } \ return FBFFTParameters::Success; \ } #define SELECT_FBFFT_2D_DIF_SINGLE( \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE) \ if (srcComplex.getSize(BatchDims + 1) == FFTSize) { \ dim3 blocks(ceil(srcComplex.getSize(0), 2)); \ dim3 threads(32, BLOCKDIMY); \ detail::decimateInFrequencyInverse2DKernel##FFTSize< \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(srcComplex, realDst, padL, padU); \ if (cudaSuccess != cudaPeekAtLastError()) { \ return FBFFTParameters::CudaError; \ } \ return FBFFTParameters::Success; \ } SELECT_FBFFT_2D_DIF_WARP_SINGLE(2, 16, true); SELECT_FBFFT_2D_DIF_WARP_SINGLE(4, 8, true); SELECT_FBFFT_2D_DIF_WARP_SINGLE(8, 4, true); SELECT_FBFFT_2D_DIF_WARP_SINGLE(16, 2, true); SELECT_FBFFT_2D_DIF_WARP_SINGLE(32, 1, true); // force 32 registers and unroll outer loop gives best perf SELECT_FBFFT_2D_DIF_SINGLE(64, 1, 16, true); SELECT_FBFFT_2D_DIF_SINGLE(128, 1, 8, true); #undef SELECT_FBFFT_2D_DIF_WARP_SINGLE #undef SELECT_FBFFT_2D_DIF_SINGLE LOG(INFO) << "FBIFFT unsupported size: " << srcComplex << \ " with batchdims = " << BatchDims; return FBFFTParameters::UnsupportedSize; } } } } // namespace
the_stack
#include "dragon/core/context_cuda.h" #include "dragon/utils/conversions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T> __global__ void _Relu(const int N, const float alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = __ldg(x + i) > 0 ? __ldg(x + i) : __ldg(x + i) * alpha; } } template <> __global__ void _Relu<half>(const int N, const float alpha, const half* x, half* y) { CUDA_1D_KERNEL_LOOP(i, N) { const float val = __half2float(__ldg(x + i)); y[i] = val > 0.f ? __ldg(x + i) : __float2half(val * alpha); } } template <> __global__ void _Relu<half2>(const int N, const float alpha, const half2* x, half2* y) { CUDA_1D_KERNEL_LOOP(i, N) { const float2 val = __half22float2(x[i]); y[i] = __floats2half2_rn( val.x > 0.f ? val.x : val.x * alpha, val.y > 0.f ? val.y : val.y * alpha); } } template <typename T> __global__ void _ReluN(const int N, const T max_value, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = __ldg(x + i) > 0 ? (__ldg(x + i) < max_value ? __ldg(x + i) : max_value) : T(0); } } template <> __global__ void _ReluN<half>(const int N, const half max_value, const half* x, half* y) { const half kZero = __float2half(0.f); #if __CUDA_ARCH__ >= 530 CUDA_1D_KERNEL_LOOP(i, N) { y[i] = __hgt(__ldg(x + i), kZero) ? (__hlt(__ldg(x + i), max_value) ? __ldg(x + i) : max_value) : kZero; } #else const float kMax = __half2float(max_value); CUDA_1D_KERNEL_LOOP(i, N) { const float val = __half2float(__ldg(x + i)); y[i] = val > 0.f ? ((val < kMax) ? __ldg(x + i) : max_value) : kZero; } #endif } __global__ void _ReluNHalf2(const int N, const half max_value, const half2* x, half2* y) { const half kZero = __float2half(0.f); CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 530 y[i] = __halves2half2( __hgt(__low2half(__ldg(x + i)), kZero) ? (__hlt(__low2half(__ldg(x + i)), max_value) ? __low2half(__ldg(x + i)) : max_value) : kZero, __hgt(__high2half(__ldg(x + i)), kZero) ? (__hlt(__high2half(__ldg(x + i)), max_value) ? __high2half(__ldg(x + i)) : max_value) : kZero); #else const float kMax = __half2float(max_value); CUDA_1D_KERNEL_LOOP(i, N) { const float2 val = __half22float2(__ldg(x + i)); y[i] = __halves2half2( val.x > 0.f ? ((val.x < kMax) ? __low2half(__ldg(x + i)) : max_value) : kZero, val.y > 0.f ? ((val.y < kMax) ? __high2half(__ldg(x + i)) : max_value) : kZero); } #endif } } template <typename T> __global__ void _ReluGrad(const int N, const float alpha, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, N) { dx[i] = dy[i] * ((__ldg(y + i) > 0) + alpha * (__ldg(y + i) <= 0)); } } template <> __global__ void _ReluGrad<half>( const int N, const float alpha, const half* dy, const half* y, half* dx) { CUDA_1D_KERNEL_LOOP(i, N) { const float val = __half2float(y[i]); dx[i] = __float2half( __half2float(dy[i]) * ((val > 0.f) + alpha * (val <= 0.f))); } } // ReluGrad template <> __global__ void _ReluGrad<half2>( const int N, const float alpha, const half2* dy, const half2* y, half2* dx) { CUDA_1D_KERNEL_LOOP(i, N) { const float2 val = __half22float2(y[i]); const float2 grad = __half22float2(dy[i]); dx[i] = __floats2half2_rn( grad.x * ((val.x > 0.f) + alpha * (val.x <= 0.f)), grad.y * ((val.y > 0.f) + alpha * (val.y <= 0.f))); } } // ReluGrad template <typename T> __global__ void _ReluNGrad(const int N, const T max_value, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, N) { dx[i] = (__ldg(y + i) > 0 && __ldg(y + i) < max_value) ? dy[i] : T(0); } } template <> __global__ void _ReluNGrad<half>( const int N, const half max_value, const half* dy, const half* y, half* dx) { const half kZero = __float2half(0.f); #if __CUDA_ARCH__ >= 530 CUDA_1D_KERNEL_LOOP(i, N) { dx[i] = (__hgt(__ldg(y + i), kZero) && __hlt(__ldg(y + i), max_value)) ? dy[i] : kZero; } #else const float kMax = __half2float(max_value); CUDA_1D_KERNEL_LOOP(i, N) { const float val = __half2float(y[i]); dx[i] = (val > 0.f && val < kMax) ? dy[i] : kZero; } #endif } template <> __global__ void _ReluNGrad<half2>( const int N, const half2 max_value, const half2* dy, const half2* y, half2* dx) { #if __CUDA_ARCH__ >= 530 const half2 kZero = __float2half2_rn(0.f); CUDA_1D_KERNEL_LOOP(i, N) { dx[i] = __hmul2( __hmul2(__hgt2(__ldg(y + i), kZero), __hlt2(__ldg(y + i), max_value)), dy[i]); } #else const half kZero = __float2half(0.f); const float kMax = __half2float(__low2half(max_value)); CUDA_1D_KERNEL_LOOP(i, N) { const float2 val = __half22float2(y[i]); dx[i] = __halves2half2( (val.x > 0.f && val.x < kMax) ? __low2half(__ldg(dy + i)) : kZero, (val.y > 0.f && val.y < kMax) ? __high2half(__ldg(dy + i)) : kZero); } #endif } } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void Relu<float16, CUDAContext>( const int N, const float alpha, const float16* x, float16* y, CUDAContext* ctx) { if ((N & 1) == 0) { _Relu<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N >> 1, alpha, reinterpret_cast<const half2*>(x), reinterpret_cast<half2*>(y)); } else { _Relu<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N, alpha, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } } template <> void ReluN<float16, CUDAContext>( const int N, const float max_value, const float16* x, float16* y, CUDAContext* ctx) { if ((N & 1) == 0) { _ReluNHalf2<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N >> 1, convert::To<half>(max_value), reinterpret_cast<const half2*>(x), reinterpret_cast<half2*>(y)); } else { _ReluN<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N, convert::To<half>(max_value), reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } } template <> void ReluGrad<float16, CUDAContext>( const int N, const float alpha, const float16* dy, const float16* y, float16* dx, CUDAContext* ctx) { if ((N & 1) == 0) { _ReluGrad<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N >> 1, alpha, reinterpret_cast<const half2*>(dy), reinterpret_cast<const half2*>(y), reinterpret_cast<half2*>(dx)); } else { _ReluGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N, alpha, reinterpret_cast<const half*>(dy), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(dx)); } } // ReluGrad template <> void ReluNGrad<float16, CUDAContext>( const int N, const float max_value, const float16* dy, const float16* y, float16* dx, CUDAContext* ctx) { if ((N & 1) == 0) { _ReluNGrad<<<CUDA_BLOCKS(N >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N >> 1, convert::To<half2>(max_value), reinterpret_cast<const half2*>(dy), reinterpret_cast<const half2*>(y), reinterpret_cast<half2*>(dx)); } else { _ReluNGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( N, convert::To<half>(max_value), reinterpret_cast<const half*>(dy), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(dx)); } } // ReluNGrad #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void Relu<T, CUDAContext>( \ const int N, const float alpha, const T* x, T* y, CUDAContext* ctx) { \ _Relu<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ N, convert::To<T>(alpha), x, y); \ } \ template <> \ void ReluN<T, CUDAContext>( \ const int N, \ const float max_value, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ _ReluN<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ N, convert::To<T>(max_value), x, y); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T) \ template <> \ void ReluGrad<T, CUDAContext>( \ const int N, \ const float alpha, \ const T* dy, \ const T* y, \ T* dx, \ CUDAContext* ctx) { \ _ReluGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ N, convert::To<T>(alpha), dy, y, dx); \ } \ template <> \ void ReluNGrad<T, CUDAContext>( \ const int N, \ const float max_value, \ const T* dy, \ const T* y, \ T* dx, \ CUDAContext* ctx) { \ _ReluNGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ N, convert::To<T>(max_value), dy, y, dx); \ } DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); DEFINE_GRAD_KERNEL_LAUNCHER(float); DEFINE_GRAD_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_CUDA
the_stack
// CUDA Global Memory variables // Debug counters for some sanity checks #ifdef _DEBUG __device__ size_t debug_d_n_voxels_marked = 0; __device__ size_t debug_d_n_triangles = 0; __device__ size_t debug_d_n_voxels_tested = 0; #endif // Possible optimization: buffer bitsets (for now: Disabled because too much overhead) //struct bufferedBitSetter{ // unsigned int* voxel_table; // size_t current_int_location; // unsigned int current_mask; // // __device__ __inline__ bufferedBitSetter(unsigned int* voxel_table, size_t index) : // voxel_table(voxel_table), current_mask(0) { // current_int_location = int(index / 32.0f); // } // // __device__ __inline__ void setBit(size_t index){ // size_t new_int_location = int(index / 32.0f); // if (current_int_location != new_int_location){ // flush(); // current_int_location = new_int_location; // } // unsigned int bit_pos = 31 - (unsigned int)(int(index) % 32); // current_mask = current_mask | (1 << bit_pos); // } // // __device__ __inline__ void flush(){ // if (current_mask != 0){ // atomicOr(&(voxel_table[current_int_location]), current_mask); // } // } //}; // Possible optimization: check bit before you set it - don't need to do atomic operation if it's already set to 1 // For now: overhead, so it seems //__device__ __inline__ bool checkBit(unsigned int* voxel_table, size_t index){ // size_t int_location = index / size_t(32); // unsigned int bit_pos = size_t(31) - (index % size_t(32)); // we count bit positions RtL, but array indices LtR // return ((voxel_table[int_location]) & (1 << bit_pos)); //} // Set a bit in the giant voxel table. This involves doing an atomic operation on a 32-bit word in memory. // Blocking other threads writing to it for a very short time __device__ __inline__ void setBit(unsigned int* voxel_table, size_t index){ size_t int_location = index / size_t(32); unsigned int bit_pos = size_t(31) - (index % size_t(32)); // we count bit positions RtL, but array indices LtR unsigned int mask = 1 << bit_pos; atomicOr(&(voxel_table[int_location]), mask); } // Main triangle voxelization method __global__ void voxelize_triangle(voxinfo info, float* triangle_data, unsigned int* voxel_table, bool morton_order){ size_t thread_id = threadIdx.x + blockIdx.x * blockDim.x; size_t stride = blockDim.x * gridDim.x; // Common variables used in the voxelization process glm::vec3 delta_p(info.unit.x, info.unit.y, info.unit.z); glm::vec3 grid_max(info.gridsize.x - 1, info.gridsize.y - 1, info.gridsize.z - 1); // grid max (grid runs from 0 to gridsize-1) while (thread_id < info.n_triangles){ // every thread works on specific triangles in its stride size_t t = thread_id * 9; // triangle contains 9 vertices // COMPUTE COMMON TRIANGLE PROPERTIES // Move vertices to origin using bbox glm::vec3 v0 = glm::vec3(triangle_data[t], triangle_data[t + 1], triangle_data[t + 2]) - info.bbox.min; glm::vec3 v1 = glm::vec3(triangle_data[t + 3], triangle_data[t + 4], triangle_data[t + 5]) - info.bbox.min; glm::vec3 v2 = glm::vec3(triangle_data[t + 6], triangle_data[t + 7], triangle_data[t + 8]) - info.bbox.min; // Edge vectors glm::vec3 e0 = v1 - v0; glm::vec3 e1 = v2 - v1; glm::vec3 e2 = v0 - v2; // Normal vector pointing up from the triangle glm::vec3 n = glm::normalize(glm::cross(e0, e1)); // COMPUTE TRIANGLE BBOX IN GRID // Triangle bounding box in world coordinates is min(v0,v1,v2) and max(v0,v1,v2) AABox<glm::vec3> t_bbox_world(glm::min(v0, glm::min(v1, v2)), glm::max(v0, glm::max(v1, v2))); // Triangle bounding box in voxel grid coordinates is the world bounding box divided by the grid unit vector AABox<glm::ivec3> t_bbox_grid; t_bbox_grid.min = glm::clamp(t_bbox_world.min / info.unit, glm::vec3(0.0f, 0.0f, 0.0f), grid_max); t_bbox_grid.max = glm::clamp(t_bbox_world.max / info.unit, glm::vec3(0.0f, 0.0f, 0.0f), grid_max); // PREPARE PLANE TEST PROPERTIES glm::vec3 c(0.0f, 0.0f, 0.0f); if (n.x > 0.0f) { c.x = info.unit.x; } if (n.y > 0.0f) { c.y = info.unit.y; } if (n.z > 0.0f) { c.z = info.unit.z; } float d1 = glm::dot(n, (c - v0)); float d2 = glm::dot(n, ((delta_p - c) - v0)); // PREPARE PROJECTION TEST PROPERTIES // XY plane glm::vec2 n_xy_e0(-1.0f*e0.y, e0.x); glm::vec2 n_xy_e1(-1.0f*e1.y, e1.x); glm::vec2 n_xy_e2(-1.0f*e2.y, e2.x); if (n.z < 0.0f) { n_xy_e0 = -n_xy_e0; n_xy_e1 = -n_xy_e1; n_xy_e2 = -n_xy_e2; } float d_xy_e0 = (-1.0f * glm::dot(n_xy_e0, glm::vec2(v0.x, v0.y))) + glm::max(0.0f, info.unit.x*n_xy_e0[0]) + glm::max(0.0f, info.unit.y*n_xy_e0[1]); float d_xy_e1 = (-1.0f * glm::dot(n_xy_e1, glm::vec2(v1.x, v1.y))) + glm::max(0.0f, info.unit.x*n_xy_e1[0]) + glm::max(0.0f, info.unit.y*n_xy_e1[1]); float d_xy_e2 = (-1.0f * glm::dot(n_xy_e2, glm::vec2(v2.x, v2.y))) + glm::max(0.0f, info.unit.x*n_xy_e2[0]) + glm::max(0.0f, info.unit.y*n_xy_e2[1]); // YZ plane glm::vec2 n_yz_e0(-1.0f*e0.z, e0.y); glm::vec2 n_yz_e1(-1.0f*e1.z, e1.y); glm::vec2 n_yz_e2(-1.0f*e2.z, e2.y); if (n.x < 0.0f) { n_yz_e0 = -n_yz_e0; n_yz_e1 = -n_yz_e1; n_yz_e2 = -n_yz_e2; } float d_yz_e0 = (-1.0f * glm::dot(n_yz_e0, glm::vec2(v0.y, v0.z))) + glm::max(0.0f, info.unit.y*n_yz_e0[0]) + glm::max(0.0f, info.unit.z*n_yz_e0[1]); float d_yz_e1 = (-1.0f * glm::dot(n_yz_e1, glm::vec2(v1.y, v1.z))) + glm::max(0.0f, info.unit.y*n_yz_e1[0]) + glm::max(0.0f, info.unit.z*n_yz_e1[1]); float d_yz_e2 = (-1.0f * glm::dot(n_yz_e2, glm::vec2(v2.y, v2.z))) + glm::max(0.0f, info.unit.y*n_yz_e2[0]) + glm::max(0.0f, info.unit.z*n_yz_e2[1]); // ZX plane glm::vec2 n_zx_e0(-1.0f*e0.x, e0.z); glm::vec2 n_zx_e1(-1.0f*e1.x, e1.z); glm::vec2 n_zx_e2(-1.0f*e2.x, e2.z); if (n.y < 0.0f) { n_zx_e0 = -n_zx_e0; n_zx_e1 = -n_zx_e1; n_zx_e2 = -n_zx_e2; } float d_xz_e0 = (-1.0f * glm::dot(n_zx_e0, glm::vec2(v0.z, v0.x))) + glm::max(0.0f, info.unit.x*n_zx_e0[0]) + glm::max(0.0f, info.unit.z*n_zx_e0[1]); float d_xz_e1 = (-1.0f * glm::dot(n_zx_e1, glm::vec2(v1.z, v1.x))) + glm::max(0.0f, info.unit.x*n_zx_e1[0]) + glm::max(0.0f, info.unit.z*n_zx_e1[1]); float d_xz_e2 = (-1.0f * glm::dot(n_zx_e2, glm::vec2(v2.z, v2.x))) + glm::max(0.0f, info.unit.x*n_zx_e2[0]) + glm::max(0.0f, info.unit.z*n_zx_e2[1]); // test possible grid boxes for overlap for (int z = t_bbox_grid.min.z; z <= t_bbox_grid.max.z; z++){ for (int y = t_bbox_grid.min.y; y <= t_bbox_grid.max.y; y++){ for (int x = t_bbox_grid.min.x; x <= t_bbox_grid.max.x; x++){ // size_t location = x + (y*info.gridsize) + (z*info.gridsize*info.gridsize); // if (checkBit(voxel_table, location)){ continue; } #ifdef _DEBUG atomicAdd(&debug_d_n_voxels_tested, 1); #endif // TRIANGLE PLANE THROUGH BOX TEST glm::vec3 p(x*info.unit.x, y*info.unit.y, z*info.unit.z); float nDOTp = glm::dot(n, p); if ((nDOTp + d1) * (nDOTp + d2) > 0.0f) { continue; } // PROJECTION TESTS // XY glm::vec2 p_xy(p.x, p.y); if ((glm::dot(n_xy_e0, p_xy) + d_xy_e0) < 0.0f){ continue; } if ((glm::dot(n_xy_e1, p_xy) + d_xy_e1) < 0.0f){ continue; } if ((glm::dot(n_xy_e2, p_xy) + d_xy_e2) < 0.0f){ continue; } // YZ glm::vec2 p_yz(p.y, p.z); if ((glm::dot(n_yz_e0, p_yz) + d_yz_e0) < 0.0f){ continue; } if ((glm::dot(n_yz_e1, p_yz) + d_yz_e1) < 0.0f){ continue; } if ((glm::dot(n_yz_e2, p_yz) + d_yz_e2) < 0.0f){ continue; } // XZ glm::vec2 p_zx(p.z, p.x); if ((glm::dot(n_zx_e0, p_zx) + d_xz_e0) < 0.0f){ continue; } if ((glm::dot(n_zx_e1, p_zx) + d_xz_e1) < 0.0f){ continue; } if ((glm::dot(n_zx_e2, p_zx) + d_xz_e2) < 0.0f){ continue; } #ifdef _DEBUG atomicAdd(&debug_d_n_voxels_marked, 1); #endif if (morton_order){ size_t location = mortonEncode_LUT(x, y, z); setBit(voxel_table, location); } else { size_t location = static_cast<size_t>(x) + (static_cast<size_t>(y)* static_cast<size_t>(info.gridsize.y)) + (static_cast<size_t>(z)* static_cast<size_t>(info.gridsize.y)* static_cast<size_t>(info.gridsize.z)); setBit(voxel_table, location); } continue; } } } #ifdef _DEBUG atomicAdd(&debug_d_n_triangles, 1); #endif thread_id += stride; } } void voxelize(const voxinfo& v, float* triangle_data, unsigned int* vtable, bool useThrustPath, bool morton_code) { float elapsedTime; // These are only used when we're not using UNIFIED memory unsigned int* dev_vtable; // DEVICE pointer to voxel_data size_t vtable_size; // vtable size // Create timers, set start time cudaEvent_t start_vox, stop_vox; checkCudaErrors(cudaEventCreate(&start_vox)); checkCudaErrors(cudaEventCreate(&stop_vox)); // Copy morton LUT if we're encoding to morton if (morton_code){ checkCudaErrors(cudaMemcpyToSymbol(morton256_x, host_morton256_x, 256 * sizeof(uint32_t))); checkCudaErrors(cudaMemcpyToSymbol(morton256_y, host_morton256_y, 256 * sizeof(uint32_t))); checkCudaErrors(cudaMemcpyToSymbol(morton256_z, host_morton256_z, 256 * sizeof(uint32_t))); } // Estimate best block and grid size using CUDA Occupancy Calculator int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxelize_triangle, 0, 0); // Round up according to array size gridSize = (v.n_triangles + blockSize - 1) / blockSize; if (useThrustPath) { // We're not using UNIFIED memory vtable_size = ((size_t)v.gridsize.x * v.gridsize.y * v.gridsize.z) / (size_t) 8.0; fprintf(stdout, "[Voxel Grid] Allocating %llu kB of DEVICE memory for Voxel Grid\n", size_t(vtable_size / 1024.0f)); checkCudaErrors(cudaMalloc(&dev_vtable, vtable_size)); checkCudaErrors(cudaMemset(dev_vtable, 0, vtable_size)); // Start voxelization checkCudaErrors(cudaEventRecord(start_vox, 0)); voxelize_triangle << <gridSize, blockSize >> > (v, triangle_data, dev_vtable, morton_code); } else { // UNIFIED MEMORY checkCudaErrors(cudaEventRecord(start_vox, 0)); voxelize_triangle << <gridSize, blockSize >> > (v, triangle_data, vtable, morton_code); } cudaDeviceSynchronize(); checkCudaErrors(cudaEventRecord(stop_vox, 0)); checkCudaErrors(cudaEventSynchronize(stop_vox)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start_vox, stop_vox)); printf("[Perf] Voxelization GPU time: %.1f ms\n", elapsedTime); // If we're not using UNIFIED memory, copy the voxel table back and free all if (useThrustPath){ fprintf(stdout, "[Voxel Grid] Copying %llu kB to page-locked HOST memory\n", size_t(vtable_size / 1024.0f)); checkCudaErrors(cudaMemcpy((void*)vtable, dev_vtable, vtable_size, cudaMemcpyDefault)); fprintf(stdout, "[Voxel Grid] Freeing %llu kB of DEVICE memory\n", size_t(vtable_size / 1024.0f)); checkCudaErrors(cudaFree(dev_vtable)); } // SANITY CHECKS #ifdef _DEBUG size_t debug_n_triangles, debug_n_voxels_marked, debug_n_voxels_tested; checkCudaErrors(cudaMemcpyFromSymbol((void*)&(debug_n_triangles),debug_d_n_triangles, sizeof(debug_d_n_triangles), 0, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpyFromSymbol((void*)&(debug_n_voxels_marked), debug_d_n_voxels_marked, sizeof(debug_d_n_voxels_marked), 0, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpyFromSymbol((void*) & (debug_n_voxels_tested), debug_d_n_voxels_tested, sizeof(debug_d_n_voxels_tested), 0, cudaMemcpyDeviceToHost)); printf("[Debug] Processed %llu triangles on the GPU \n", debug_n_triangles); printf("[Debug] Tested %llu voxels for overlap on GPU \n", debug_n_voxels_tested); printf("[Debug] Marked %llu voxels as filled (includes duplicates!) \n", debug_n_voxels_marked); #endif // Destroy timers checkCudaErrors(cudaEventDestroy(start_vox)); checkCudaErrors(cudaEventDestroy(stop_vox)); }
the_stack
#include "caffe/filler.hpp" #include "caffe/layers/inner_distance_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #define sign(x) (Dtype(0) < (x)) - ((x) < Dtype(0)) template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num) { Dtype dot = 0; for (int d = 0; d < dim; ++d) { dot += data_1[index * dim + d] * data_2[index * dim + d]; } channel_dot[index] = dot; } } template <typename Dtype> __global__ void kernel_channel_scal(const int num, const int dim, const Dtype* norm_data, Dtype* input_output_data) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; input_output_data[index] *= norm_data[n]; } } template <typename Dtype> __global__ void inner_distance_forward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, Dtype* top_data) { CUDA_KERNEL_LOOP(index, M_ * N_) { int m = index / N_; int n = index % N_; Dtype sum = Dtype(0); for (int k = 0; k < K_; ++k) { sum += (bottom_data[m * K_ + k] - weight[n * K_ + k]) * (bottom_data[m * K_ + k] - weight[n * K_ + k]); } top_data[index] = sum; } } template <typename Dtype> __global__ void inner_distance_forward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, Dtype* top_data) { CUDA_KERNEL_LOOP(index, M_ * N_) { int m = index / N_; int n = index % N_; Dtype sum = Dtype(0); for (int k = 0; k < K_; ++k) { sum += abs(bottom_data[m * K_ + k] - weight[n * K_ + k]); } top_data[index] = sum; } } template <typename Dtype> void InnerDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); if (normalize_ && bottom.size() == 1) { Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weight_norm_data = weight_norm_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_), CAFFE_CUDA_NUM_THREADS >> > (N_, K_, weight, weight, weight_norm_data); caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_), CAFFE_CUDA_NUM_THREADS >> > (N_, K_, weight_norm_data, mutable_weight); } if (distance_type_ == "L2") { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_forward_L2<Dtype> <<<CAFFE_GET_BLOCKS(M_ * N_), CAFFE_CUDA_NUM_THREADS >>> (M_, N_, K_, bottom_data, weight, top_data); } else if (distance_type_ == "L1") { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_forward_L1<Dtype> <<<CAFFE_GET_BLOCKS(M_ * N_), CAFFE_CUDA_NUM_THREADS >>> (M_, N_, K_, bottom_data, weight, top_data); } else { NOT_IMPLEMENTED; } } template <typename Dtype> __global__ void inner_distance_backward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, M_ * K_) { int m = index / K_; int k = index % K_; for (int n = 0; n < N_; ++n) { bottom_diff[index] += top_diff[m * N_ + n] * (bottom_data[m * K_ + k] - weight[n * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_backward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, M_ * K_) { int m = index / K_; int k = index % K_; for (int n = 0; n < N_; ++n) { bottom_diff[index] += top_diff[m * N_ + n] * sign(bottom_data[m * K_ + k] - weight[n * K_ + k]); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, N_ * K_) { int n = index / K_; int k = index % K_; for (int m = 0; m < M_; ++m) { weight_diff[index] += top_diff[m * N_ + n] * (weight[index] - bottom_data[m * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L2_center_only(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* label_data, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, K_) { int k = index; for (int m = 0; m < M_; ++m) { int n = static_cast<int>(label_data[m]); weight_diff[n * K_ + k] += top_diff[m * N_ + n] * (weight[n * K_ + k] - bottom_data[m * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, N_ * K_) { int n = index / K_; int k = index % K_; for (int m = 0; m < M_; ++m) { weight_diff[index] += top_diff[m * N_ + n] * sign(weight[index] - bottom_data[m * K_ + k]); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L1_center_only(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* label_data, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, K_) { int k = index; for (int m = 0; m < M_; ++m) { int n = static_cast<int>(label_data[m]); weight_diff[n * K_ + k] += top_diff[m * N_ + n] * sign(weight[n * K_ + k] - bottom_data[m * K_ + k]); } } } template <typename Dtype> void InnerDistanceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); if ((bottom.size() == 1 && this->param_propagate_down_[0]) || (bottom.size() >= 2 && propagate_down[1])) { Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff(); if (bottom.size() >= 2) { caffe_gpu_set(bottom[1]->count(), Dtype(0), weight_diff); } const Dtype* label_data = NULL; if (update_center_only_) { label_data = bottom[bottom.size() - 1]->gpu_data(); } // Gradient with respect to weight if (distance_type_ == "L2") { if (update_center_only_) { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_weight_backward_L2_center_only<Dtype> << <CAFFE_GET_BLOCKS(K_), CAFFE_CUDA_NUM_THREADS >> > (M_, N_, K_, bottom_data, weight, label_data, top_diff, weight_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_weight_backward_L2<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_), CAFFE_CUDA_NUM_THREADS >> > (M_, N_, K_, bottom_data, weight, top_diff, weight_diff); } } else if (distance_type_ == "L1") { if (update_center_only_) { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_weight_backward_L1_center_only<Dtype> << <CAFFE_GET_BLOCKS(K_), CAFFE_CUDA_NUM_THREADS >> > (M_, N_, K_, bottom_data, weight, label_data, top_diff, weight_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_weight_backward_L1<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_), CAFFE_CUDA_NUM_THREADS >> > (M_, N_, K_, bottom_data, weight, top_diff, weight_diff); } } else { NOT_IMPLEMENTED; } } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set<Dtype>(M_ * K_, 0, bottom_diff); if (distance_type_ == "L2") { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_backward_L2<Dtype> << <CAFFE_GET_BLOCKS(M_ * K_), CAFFE_CUDA_NUM_THREADS >> > (M_, N_, K_, bottom_data, weight, top_diff, bottom_diff); } else if (distance_type_ == "L1") { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_backward_L1<Dtype> << <CAFFE_GET_BLOCKS(M_ * K_), CAFFE_CUDA_NUM_THREADS >> > (M_, N_, K_, bottom_data, weight, top_diff, bottom_diff); } else { NOT_IMPLEMENTED; } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerDistanceLayer); } // namespace caffe
the_stack
#pragma once #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/oprtr/oprtr.cuh> #include <gunrock/app/knn/knn_problem.cuh> #include <gunrock/app/knn/knn_helpers.cuh> #include <gunrock/util/scan_device.cuh> #include <gunrock/util/sort_device.cuh> #include <gunrock/oprtr/1D_oprtr/for.cuh> #include <gunrock/oprtr/oprtr.cuh> #include <cstdio> #include <cub/cub.cuh> #include <cub/block/block_load.cuh> #include <cub/block/block_store.cuh> #include <cub/block/block_radix_sort.cuh> //do not remove debug //#define KNN_ENACTOR_DEBUG #ifdef KNN_ENACTOR_DEBUG #define debug(a...) printf(a) #else #define debug(a...) #endif #define debug(a...) namespace gunrock { namespace app { namespace knn { /** * @brief Speciflying parameters for knn Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); return retval; } /** * @brief defination of knn iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct knnIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem::GraphT::CsrT CsrT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop; knnIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of knn, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // -- // Alias variables auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; // K-Nearest Neighbors auto &keys_out = data_slice.knns; auto &distance_out = data_slice.distance_out; // Number of KNNs auto k = data_slice.k; // Number of points auto num_points = data_slice.num_points; // Dimension of labels auto dim = data_slice.dim; // List of points auto &points = data_slice.points; auto &sem = data_slice.sem; bool transpose = this->enactor->problem->transpose; cudaStream_t stream = oprtr_parameters.stream; auto target = util::DEVICE; if (transpose) debug("euclidean_distance will be use transpose version\n"); else debug("euclidean distance wont be use transpose version\n"); auto USE_SHARED_MEM = this->enactor->problem->use_shared_mem; int block_size = this->enactor->problem->block_size; int grid_size = this->enactor->problem->grid_size; int data_size = this->enactor->problem->data_size; int points_size = this->enactor->problem->points_size; int dist_size = this->enactor->problem->dist_size; int keys_size = this->enactor->problem->keys_size; //int shared_point_size = this->enactor->problem->shared_point_size; int shared_mem_size = this->enactor->problem->shared_mem_size; /* Operators */ auto knn_general_op = [num_points, k, dim, points, keys_out, transpose] __device__ (ValueT* d, const SizeT &src, char* shared){ ValueT* new_dist = (ValueT*)shared; int* dist_key = (int*)(shared + (blockDim.x * 8)); dist_key[threadIdx.x] = src; for (SizeT i = 0; i<num_points; ++i){ new_dist[threadIdx.x] = (ValueT)0; if (src == i || src >= num_points) { new_dist[threadIdx.x] = util::PreDefinedValues<ValueT>::MaxValue; } else { new_dist[threadIdx.x] = euclidean_distance(dim, num_points, points.GetPointer(util::DEVICE), src, i, transpose); } if (src < num_points && new_dist[threadIdx.x] < d[src * k + k - 1]) { // new element is smaller than the largest in distance array for "src" row SizeT current = k - 1; #pragma unroll for (; current > 0; --current){ SizeT one_before = current - 1; if (new_dist[threadIdx.x] >= d[src * k + one_before]){ d[src * k + current] = new_dist[threadIdx.x]; keys_out[src * k + current] = i; break; } else { d[src * k + current] = d[src * k + one_before]; keys_out[src * k + current] = keys_out[src * k + one_before]; } } if (current == (SizeT)0){ d[src * k] = new_dist[threadIdx.x]; keys_out[src * k] = i; } } } }; auto knn_half_op = [num_points, k, dim, points, keys_out, transpose, sem] __device__ (ValueT* d, const SizeT &src, char* shared){ ValueT* new_dist = (ValueT*)shared; SizeT* new_keys = (SizeT*)(shared + (blockDim.x * 8)); int offset = (src/(blockDim.x*gridDim.x))*blockDim.x*gridDim.x; for (SizeT i0 = offset; i0<num_points; ++i0){ SizeT i = offset + ((i0 + blockIdx.x)%(num_points-offset)); if (i != src && src < num_points) { new_dist[threadIdx.x] = euclidean_distance(dim, num_points, points.GetPointer(util::DEVICE), src, i, transpose); new_keys[threadIdx.x] = src; }else{ new_dist[threadIdx.x] = util::PreDefinedValues<ValueT>::MaxValue; new_keys[threadIdx.x] = util::PreDefinedValues<SizeT>::InvalidValue; } acquire_semaphore(sem.GetPointer(util::DEVICE), src); if (src < num_points && new_dist[threadIdx.x] < *((volatile ValueT*)(&d[src * k + k - 1]))) { SizeT current = k - 1; #pragma unroll for (; current > 0; --current){ SizeT one_before = current - 1; if (new_dist[threadIdx.x] >= *((volatile ValueT*)(&d[src * k + one_before]))){ *((volatile ValueT*)(&d[src * k + current])) = new_dist[threadIdx.x]; *((volatile int*)(&keys_out[src * k + current])) = i; break; } else { *((volatile ValueT*)(&d[src * k + current])) = *((volatile ValueT*)(&d[src * k + one_before])); *((volatile int*)(&keys_out[src * k + current])) = *((volatile int*)(&keys_out[src * k + one_before])); } } if (current == (SizeT)0){ *((volatile ValueT*)(&d[src * k])) = new_dist[threadIdx.x]; *((volatile int*)(&keys_out[src * k])) = i; } } release_semaphore(sem.GetPointer(util::DEVICE), src); __syncthreads(); if (i >= offset+(blockDim.x*gridDim.x) && i < num_points){ __syncthreads(); // Bitonic sort on new_dist array: bitonic_sort(new_dist, new_keys, blockDim.x); __syncthreads(); // Close semaphore for i row if (threadIdx.x == 0){ acquire_semaphore(sem.GetPointer(util::DEVICE), i); } // Find k smallest elements and merge them together to one array. if (threadIdx.x == 0){ int y = 0; #pragma unroll for (int x = 0; x + y < k;){ if (new_dist[y] <= *((volatile ValueT*)(&d[i * k + x]))){ ++y; }else{ ++x; } } #pragma unroll for (int j = 0; y + j < k; ++j){ new_dist[y + j] = *((volatile ValueT*)(&d[i * k + j])); new_keys[y + j] = *((volatile int*)(&keys_out[i * k + j])); } } __syncthreads(); // Bitonic sort on new_dist array: bitonic_sort(new_dist, new_keys, blockDim.x); __syncthreads(); #pragma unroll for (int j = threadIdx.x; j<k; j += blockDim.x){ *((volatile ValueT*)(&d[i * k + j])) = new_dist[j]; *((volatile int*)(&keys_out[i * k + j])) = new_keys[j]; } __syncthreads(); if (threadIdx.x == 0){ release_semaphore(sem.GetPointer(util::DEVICE), i); } } } }; auto knn_shared_not_transpose_op = [num_points, k, dim, points, keys_out, data_size, points_size, dist_size, keys_size] __device__ (ValueT* d, const SizeT &src, char* shared_mem){ // Get pointers to shared memory arrays ValueT* dist = (ValueT*) (shared_mem); ValueT* b_sh_points = (ValueT*) (shared_mem + dist_size); int* keys = (int*) (shared_mem + dist_size + points_size); ValueT* sh_point = (ValueT*) (shared_mem + dist_size + points_size + keys_size); __shared__ SizeT firstPoint; if (threadIdx.x == 0){ firstPoint = src; } __syncthreads(); // Copying to shared memory if (dim%2 == 0){ #pragma unroll for (SizeT j = threadIdx.x; j < (blockDim.x * dim)/2; j += blockDim.x){ if constexpr(sizeof(ValueT) == 8){ // ValueT == double reinterpret_cast<double2*>(b_sh_points)[j] = reinterpret_cast<double2*>(points + firstPoint*dim)[j]; }else{ // ValueT == float reinterpret_cast<float2*>(b_sh_points)[j] = reinterpret_cast<float2*>(points + firstPoint*dim)[j]; } } }else{ #pragma unroll for (SizeT j = threadIdx.x; j < blockDim.x * dim; j += blockDim.x){ b_sh_points[j] = points[firstPoint*dim + j]; } } __syncthreads(); // Initializations of basic points // 7217ms ValueT array[100]; // Copying shared memory to registers #pragma unroll for (SizeT j = 0; j < dim; ++j){ array[j] = b_sh_points[threadIdx.x * dim + j]; } __syncthreads(); // Transpose to shared memory #pragma unroll for (SizeT j = 0; j<dim; ++j){ b_sh_points[j * (blockDim.x+1) + threadIdx.x] = array[j]; } // Initializations of dist and keys #pragma unroll for (int i = 0; i < k; ++i){ int idx = i * (blockDim.x+1) + threadIdx.x; dist[idx] = util::PreDefinedValues<ValueT>::MaxValue; //keys[idx] = util::PreDefinedValues<int>::InvalidValue; } __syncthreads(); for (SizeT i = 0; i<num_points; ++i){ // Initialization of shared points (points [i...i*blocDim.x] in sh_points) // Proceeding points[[0..dim] * num_points + i]; if (dim%2 == 0){ #pragma unroll for (SizeT j=threadIdx.x; j<dim/2; j+=blockDim.x){ // Doing better with fetching int4 data if constexpr(sizeof(ValueT) == 8){ // ValueT == double reinterpret_cast<double2*>(sh_point)[j] = reinterpret_cast<double2*>(points + (i * dim))[j]; }else{ // ValueT == float reinterpret_cast<float2*>(sh_point)[j] = reinterpret_cast<float2*>(points + (i * dim))[j]; } } }else{ #pragma unroll for (SizeT j=threadIdx.x; j<dim; j+=blockDim.x){ // Doing better with fetching int4 data sh_point[j] = points[i * dim + j]; } } __syncthreads(); ValueT new_dist = 0; if (src == i || src >= num_points) { new_dist = util::PreDefinedValues<ValueT>::MaxValue; } else { new_dist = euclidean_distance(dim, b_sh_points, (int)threadIdx.x, sh_point); } if (new_dist < dist[((k-1) * (blockDim.x + 1)) + threadIdx.x]) { // new element is larger than the largest in distance array for "src" row // new_dist < dist[(k-1) * blockDim.x + threadIdx.x] SizeT current = k-1; #pragma unroll for (; current > 0; --current){ SizeT one_before = current-1; if (new_dist >= dist[(one_before * (blockDim.x + 1)) + threadIdx.x]){ dist[(current * (blockDim.x + 1)) + threadIdx.x] = new_dist; keys[(current * (blockDim.x + 1)) + threadIdx.x] = i; break; } else { dist[(current * (blockDim.x + 1)) + threadIdx.x] = dist[(one_before * (blockDim.x + 1)) + threadIdx.x]; keys[(current * (blockDim.x + 1)) + threadIdx.x] = keys[(one_before * (blockDim.x + 1)) + threadIdx.x]; } } if (current == (SizeT)0){ dist[threadIdx.x] = new_dist; keys[threadIdx.x] = i; } } __syncthreads(); } #pragma unroll for (int i=0; i<k; ++i){ array[i] = keys[i * (blockDim.x+1) + threadIdx.x]; } #pragma unroll for (int i=0; i<k; ++i){ keys[threadIdx.x * k + i] = array[i]; } __syncthreads(); if (k%2 == 0){ #pragma unroll for (SizeT i=threadIdx.x; i<(blockDim.x*k)/2; i+=blockDim.x){ reinterpret_cast<int2*>(keys_out + firstPoint*k)[i] = reinterpret_cast<int2*>(keys)[i]; } }else{ #pragma unroll for (SizeT i=threadIdx.x; i<blockDim.x*k; i+=blockDim.x){ keys_out[firstPoint*k + i] = keys[i]; } } __syncthreads(); }; auto knn_shared_transpose_op = [num_points, k, dim, points, keys_out, data_size, points_size, dist_size, keys_size] __device__ (ValueT* d, const SizeT &src, char* shared_mem){ ValueT* dist = (ValueT*) shared_mem; ValueT* b_sh_points = (ValueT*) (shared_mem + dist_size); int* keys = (int*) (shared_mem + dist_size + points_size); ValueT* sh_point = (ValueT*)(shared_mem + dist_size + points_size + keys_size); __shared__ int firstPoint; if (threadIdx.x == 0){ firstPoint = src; } __syncthreads(); ValueT* ptr = points + firstPoint; int idx = threadIdx.x; if (firstPoint + threadIdx.x < num_points){ b_sh_points[idx] = ptr[threadIdx.x]; } ptr += num_points; ValueT value = util::PreDefinedValues<ValueT>::InvalidValue; if (firstPoint + threadIdx.x < num_points){ value = ptr[threadIdx.x]; } // Initializations of basic points #pragma unroll for (SizeT i = 1; i < dim; ++i){ ptr += num_points; int idx = i * (blockDim.x+1) + threadIdx.x; b_sh_points[idx] = value; value = util::PreDefinedValues<int>::InvalidValue; if (firstPoint + threadIdx.x < num_points){ value = ptr[threadIdx.x]; } } // Initializations of dist and keys #pragma unroll for (int i = 0; i < k; ++i){ int idx = i * (blockDim.x+1) + threadIdx.x; dist[idx] = util::PreDefinedValues<ValueT>::MaxValue; keys[idx] = util::PreDefinedValues<int>::InvalidValue; } #pragma unroll for (SizeT i = 0; i<num_points; ++i){ // Initialization of shared points (points [i...i*blocDim.x] in sh_points) // Proceeding points[[0..dim] * num_points + i]; #pragma unroll for (SizeT j=threadIdx.x; j<dim; j+=blockDim.x){ // Doing better with fetching int4 data sh_point[j] = //b_sh_points[j]; points[j * num_points + i]; //from 500ms to 3500ms (transpose?) // points[i * dim + j]; //from 500ms to 3500ms (transpose?) } __syncthreads(); ValueT new_dist = 0; if (src == i || src >= num_points) { new_dist = util::PreDefinedValues<ValueT>::MaxValue; } else { new_dist = euclidean_distance(dim, b_sh_points, (int)threadIdx.x, sh_point); } //dist[threadIdx.x] = new_dist; // sorting 609ms to 5400ms if (new_dist < dist[((k-1) * (blockDim.x+1)) + threadIdx.x]) { // new element is larger than the largest in distance array for "src" row // new_dist < dist[(k-1) * blockDim.x + threadIdx.x] SizeT current = k-1; #pragma unroll for (; current > 0; --current){ SizeT one_before = current-1; if (new_dist >= dist[(one_before * (blockDim.x+1)) + threadIdx.x]){ dist[(current * (blockDim.x+1)) + threadIdx.x] = new_dist; keys[(current * (blockDim.x+1)) + threadIdx.x] = i; break; } else { dist[(current * (blockDim.x+1)) + threadIdx.x] = dist[(one_before * (blockDim.x+1)) + threadIdx.x]; keys[(current * (blockDim.x+1)) + threadIdx.x] = keys[(one_before * (blockDim.x+1)) + threadIdx.x]; } } if (current == (SizeT)0){ dist[threadIdx.x] = new_dist; keys[threadIdx.x] = i; } } } __syncthreads(); #pragma unroll for (int i=0; i<blockDim.x; ++i){ if (threadIdx.x < k){ keys_out[(firstPoint + i) * k + threadIdx.x] = (ValueT)keys[threadIdx.x * (blockDim.x+1) + i]; } } __syncthreads(); }; if (! USE_SHARED_MEM){ debug("Used block size %d, grid size %d\n", block_size, grid_size); // Calculating theoretical occupancy int maxActiveBlocks; //cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, oprtr::SharedForAll_Kernel<decltype(distance_out), SizeT, decltype(knn_general_op)>, block_size, 0); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, oprtr::SharedForAll_Kernel<decltype(distance_out), SizeT, decltype(knn_half_op)>, block_size, (block_size * 12)); debug("occupancy of SM is %d\n", maxActiveBlocks); // Checking rest of n-k points to choose k nearest. // Insertion n-k elements into sorted list // GUARD_CU(distance_out.SharedForAll(knn_general_op, GUARD_CU(distance_out.SharedForAll(knn_half_op, //num_points, target, stream, 64, 1024)); //time 82 min //num_points, target, stream, 128, 512)); //time 51.6 min //num_points, target, stream, 256, 256)); //time 44.12 min num_points, target, stream, block_size*(sizeof(ValueT)+sizeof(SizeT)), grid_size, block_size)); //time 41.32 min //num_points, target, stream, (block_size*8) + (block_size*4), grid_size, block_size)); //time 41.32 min //num_points, target, stream, shared_point_size, 512, 128)); //time 44.03 min }else{ debug("Used threads %d, single data_size %d, shared memory %u, %d\n", block_size, data_size, shared_mem_size, sizeof(ValueT)); debug("points size = %d, dist_size = %d, keys_size = %d, shared_point_size = %d\n", points_size, dist_size, keys_size, shared_point_size); if (transpose){ // Points is transposed //N M // I1 I2 .. IN //DA L1A L2A .. LNA //DB L1B L2B .. LNB //.. .. .. .. .. //DM L1M L2M .. LNM // Checking rest of n-k points to choose k nearest. // Insertion n-k elements into sorted list GUARD_CU2(distance_out.SharedForAll(knn_shared_transpose_op, num_points, target, stream, shared_mem_size, dim3(grid_size, 1, 1), dim3(block_size, 1, 1)), "shared for all failed"); }else{ // Points is not transposed //N M // DA DB .. DM //I1 L1A L1B .. L1M //I2 L2A L2B .. L2M //.. .. .. .. .. //IN LNA LNB .. LNM // Calculating theoretical occupancy int maxActiveBlocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, oprtr::SharedForAll_Kernel<decltype(distance_out), SizeT, decltype(knn_shared_not_transpose_op)>, block_size, shared_mem_size); debug("occupancy of SM is %d\n", maxActiveBlocks); // Checking rest of n-k points to choose k nearest. // Insertion n-k elements into sorted list GUARD_CU2(distance_out.SharedForAll(knn_shared_not_transpose_op, num_points, target, stream, shared_mem_size, grid_size, block_size), "shared for all failed"); } } return retval; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { // ================ INCOMPLETE TEMPLATE - MULTIGPU ==================== auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; // auto iteration = enactor_slice.enactor_stats.iteration; // TODO: add problem specific data alias here, e.g.: // auto &distance = data_slice.distance; auto expand_op = [ // TODO: pass data used by the lambda, e.g.: // distance ] __host__ __device__(VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { // TODO: fill in the lambda to combine received and local data, e.g.: // ValueT in_val = value__associate_ins[in_pos]; // ValueT old_val = atomicMin(distance + key, in_val); // if (old_val <= in_val) // return false; return true; }; cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } bool Stop_Condition(int gpu_num = 0) { auto it = this->enactor->enactor_slices[0].enactor_stats.iteration; if (it > 0) return true; else return false; } }; // end of knnIteration /** * @brief knn enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase< typename _Problem::GraphT, typename _Problem::GraphT::VertexT, typename _Problem::GraphT::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::GraphT GraphT; typedef typename GraphT::VertexT LabelT; typedef typename GraphT::ValueT ValueT; typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef knnIterationLoop<EnactorT> IterationT; Problem *problem; IterationT *iterations; /** * @brief knn constructor */ Enactor() : BaseEnactor("KNN"), problem(NULL) { this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; } /** * @brief knn destructor */ virtual ~Enactor() { /*Release();*/ } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * @brief Initialize the problem. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; // Lazy initialization GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL,target, false)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0]; auto &graph = problem.sub_graphs[gpu]; GUARD_CU(enactor_slice.frontier.Allocate(1, 1, this->queue_factors)); } iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief one run of knn, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop< // change to how many {VertexT, ValueT} data need to communicate // per element in the inter-GPU sub-frontiers 0, 1, IterationT>(thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Reset enactor ... * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset(SizeT n, SizeT k, util::Location target = util::DEVICE) { typedef typename GraphT::GpT GpT; typedef typename GraphT::VertexT VertexT; cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { if (this->num_gpus == 1) { this->thread_slices[gpu].init_size = 1; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { auto &frontier = this->enactor_slices[gpu * this->num_gpus + peer_].frontier; frontier.queue_length = (peer_ == 0) ? 1 : 0; if (peer_ == 0) { GUARD_CU(frontier.V_Q()->ForEach( [] __host__ __device__(VertexT & v) { v = 0; }, 1, target, 0)); } } } else { // MULTIGPU INCOMPLETE } } GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief Enacts a knn computing on the specified graph. ... * \return cudaError_t error message(s), if any */ cudaError_t Enact() { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU KNN Done.", this->flag & Debug); return retval; } }; } // namespace knn } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
// TODO: Run some/all tests for half-precision floating-point values, e.g __half from: // #include <cuda_fp16.h> // TODO: Also test behavior with warps with some inactive/exited lanes #include <kat/detail/execution_space_specifiers.hpp> using std::vector; using std::uniform_int_distribution; template <typename F> inline F test_epsilon(std::size_t num_operations) { return 0; } template <> inline float test_epsilon<float>(std::size_t num_operations) { return 1e-5 * num_operations; }; template <> inline double test_epsilon<double>(std::size_t num_operations) { return 1e-10 * num_operations; }; struct seq_type { enum { ops_per_thread = 4 }; bool elements[ops_per_thread]; KAT_HD bool do_increment(int i) { return elements[i]; } KAT_HD bool do_decrement(int i) { return not elements[i]; } KAT_HD bool& operator[](int i) { return elements[i]; } KAT_HD bool operator[](int i) const { return elements[i]; } }; template <typename T> inline std::enable_if_t<std::is_floating_point<T>::value, std::size_t> nice_big_value() { // This is very hacky :-( return T(10e6); } template <typename T> inline std::enable_if_t<std::is_integral<T>::value, std::size_t> nice_big_value() { return std::is_signed<T>::value ? std::numeric_limits<T>::max() : std::numeric_limits<T>::max() / 2; } template <typename T> inline T middle_of_domain() { // TODO: Check T is linearly ordered static_assert(std::is_arithmetic<T>::value, "Invalid type"); if (std::is_floating_point<T>::value) { return T(0); } else { return T(std::numeric_limits<T>::max() / 2.0 + std::numeric_limits<T>::min() / 2.0); } } namespace kernels { template <typename T, unsigned ElementsPerThread> __global__ void test_add( T* __restrict__ result, const T* __restrict__ data, std::size_t data_size) { // Notes: // * The access pattern used here is sub-optimal. But it doesn't matter since we're // not trying to optimize speed. // * We could have used something like kat::collaborative::grid::linear::at_grid_stride // but we want to minimize dependencies here. auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; std::size_t pos = global_thread_index * ElementsPerThread; for(int i = 0; (i < ElementsPerThread) and (pos < data_size); i++) { kat::atomic::add(result, data[pos]); pos++; } } template <typename T, unsigned ElementsPerThread> __global__ void test_subtract( T* __restrict__ result, const T* __restrict__ data, std::size_t data_size) { // Notes: // * The access pattern used here is sub-optimal. But it doesn't matter since we're // not trying to optimize speed. // * We could have used something like kat::collaborative::grid::linear::at_grid_stride // but we want to minimize dependencies here. auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; std::size_t pos = global_thread_index * ElementsPerThread; for(int i = 0; (i < ElementsPerThread) and (pos < data_size); i++) { kat::atomic::subtract(result, data[pos]); pos++; } } template <typename T, unsigned ElementsPerThread> __global__ void test_exchange( T* __restrict__ extra_datum, T* __restrict__ data, std::size_t data_size) { auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; std::size_t pos = global_thread_index * ElementsPerThread; for(int i = 0; (i < ElementsPerThread) and (pos < data_size); i++) { auto datum_to_use = data[pos]; auto previous_extra_datum = kat::atomic::exchange(extra_datum, datum_to_use); data[pos] = previous_extra_datum; pos++; } } template <typename T, typename SeqType> __global__ void test_inc_dec_sequences( T* __restrict__ aggregate, SeqType* __restrict__ inc_dec_sequences) { auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; auto seq = inc_dec_sequences[global_thread_index]; for(int i = 0; i < SeqType::ops_per_thread;i ++) { if (seq[i]) { kat::atomic::increment(aggregate); } else { kat::atomic::decrement(aggregate); } } }; template <typename F, typename T, typename... Is> __global__ void execute_testcase( F testcase_device_function, size_t num_values_to_populate, T* __restrict__ values_to_populate, const Is* __restrict__ ... inputs ) { testcase_device_function(num_values_to_populate, values_to_populate, inputs...); } } // namespace kernels template <typename T> struct tag {}; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename F, typename K, typename T, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( std::index_sequence<Indices...>, K testcase_kernel, F testcase_device_function, cuda::launch_configuration_t launch_config, size_t num_values_to_populate, T result_initial_fill_value, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) }; cuda::launch( kernels::fill<T, kat::size_t>, make_busy_config(device), device_side_results.get(), result_initial_fill_value, num_values_to_populate); auto host_side_results { vector<T>(num_values_to_populate) }; auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, testcase_device_function, num_values_to_populate, device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); return host_side_results; } template <typename F, typename T, typename... Is> auto execute_non_uniform_testcase_on_gpu( F testcase_device_function, size_t num_values_to_populate, T result_initial_fill_value, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; return execute_testcase_on_gpu( typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, result_initial_fill_value, inputs... ); } size_t num_threads_in_grid(const cuda::launch_configuration_t& launch_config) { return static_cast<size_t>(launch_config.grid_dimensions.volume()) * launch_config.block_dimensions.volume(); } enum { largest_type_size = 8 }; TEST_SUITE("atomics") { /* * Test description: * * - Generate I.I.D. data using random sampling * - Add up all the elements using atomic add * - Make sure the final value is correct * */ TEST_CASE_TEMPLATE("add", T, INTEGER_TYPES, FLOAT_TYPES ) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. const auto num_grid_blocks { 3 }; const auto block_size { kat::warp_size * 3 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; constexpr const auto elements_per_thread { 5 }; constexpr auto data_size { num_grid_blocks * block_size * elements_per_thread }; constexpr auto data_size_plus_alignment { data_size + (data_size % largest_type_size == 0 ? 0 : (largest_type_size - data_size % largest_type_size)) }; struct { decltype(cuda::memory::device::make_unique<T[]>(device, data_size_plus_alignment)) data; decltype(cuda::memory::device::make_unique<T[]>(device, largest_type_size)) result; // Using largest_type_size so we can safely CAS into the result } device_side; device_side.data = cuda::memory::device::make_unique<T[]>(device, data_size_plus_alignment); device_side.result = cuda::memory::device::make_unique<T[]>(device, largest_type_size); struct { vector<T> data; T result; } host_side; auto mean { 1 }; auto standard_deviation { 11 }; std::normal_distribution<double> distribution(mean, standard_deviation); host_side.data.reserve(data_size); util::random::insertion_generate_n(std::back_inserter(host_side.data), data_size, distribution); // Note that we could easily have overflow/underflow occurring with smaller types T expected_result { std::accumulate(host_side.data.begin(), host_side.data.end(), T{0}) }; cuda::memory::copy(device_side.data.get(), host_side.data.data(), host_side.data.size() * sizeof(T) ); host_side.result = T{0}; cuda::memory::copy(device_side.result.get(), &host_side.result, sizeof(T)); cuda::launch( ::kernels::test_add<T, elements_per_thread>, launch_config, device_side.result.get(), device_side.data.get(), data_size); cuda::outstanding_error::ensure_none(); cuda::memory::copy(&host_side.result, device_side.result.get(), sizeof(T)); int64_t i64result; cuda::memory::copy(&i64result, device_side.result.get(), 8); bool print_results { false }; if (std::is_floating_point<T>::value) { CHECK(host_side.result == doctest::Approx(expected_result).epsilon(test_epsilon<T>(data_size))); } else { CHECK(host_side.result == expected_result); } if (print_results) { std::cout << "Results match for type " << util::type_name<T>() << ".\n"; } } /* * Test description: * * - Generate I.I.D. data using random sampling * - Initialize a device-memory value to the sum of all data, plus an arbitrary extra value (you'll see why in a moment) * - Use atomic subtract to subtract all data from the sum - reaching, not zero, but the arbitrary value */ TEST_CASE_TEMPLATE("subtract", T, INTEGER_TYPES, FLOAT_TYPES ) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. const auto num_grid_blocks { 3 }; const auto block_size { kat::warp_size * 3 }; constexpr const auto elements_per_thread { 5 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; constexpr auto data_size { num_grid_blocks * block_size * elements_per_thread }; constexpr auto data_size_plus_alignment { data_size + (data_size % largest_type_size == 0 ? 0 : (largest_type_size - data_size % largest_type_size)) }; struct { decltype(cuda::memory::device::make_unique<T[]>(device, data_size_plus_alignment)) data; decltype(cuda::memory::device::make_unique<T[]>(device, largest_type_size)) result; // Using largest_type_size so we can safely CAS into the result } device_side; device_side.data = cuda::memory::device::make_unique<T[]>(device, data_size_plus_alignment); device_side.result = cuda::memory::device::make_unique<T[]>(device, largest_type_size); struct { vector<T> data; T result; } host_side; auto mean { 1 }; auto standard_deviation { 11 }; host_side.data.reserve(data_size); std::normal_distribution<double> distribution(mean, standard_deviation); util::random::insertion_generate_n(std::back_inserter(host_side.data), data_size, distribution); // Note that we could easily have overflow/underflow occurring with smaller types T sum_of_data { std::accumulate(host_side.data.begin(), host_side.data.end(), T{0}) }; T arbitrary_extra_value = T{12}; T expected_result = arbitrary_extra_value; cuda::memory::copy(device_side.data.get(), host_side.data.data(), host_side.data.size() * sizeof(T) ); host_side.result = sum_of_data + arbitrary_extra_value; // We're initializing the result value to something high so we can't subtract without underflow cuda::memory::copy(device_side.result.get(), &host_side.result, sizeof(T)); cuda::launch( ::kernels::test_subtract<T, elements_per_thread>, launch_config, device_side.result.get(), device_side.data.get(), data_size); cuda::outstanding_error::ensure_none(); cuda::memory::copy(&host_side.result, device_side.result.get(), sizeof(T)); bool print_results { false }; if (std::is_floating_point<T>::value) { CHECK(host_side.result == doctest::Approx(expected_result).epsilon(test_epsilon<T>(data_size))); } else { CHECK(host_side.result == expected_result); } if (print_results) { std::cout << "Results match for type " << util::type_name<T>() << ".\n"; } } /* * Test description: * * - Generate I.I.D. data using random sampling * - Set some arbitrary value in global device memory * - Play "musical chairs" between the data and the global memory value using atomic exchange * - Finally, make sure we still have all the original data, and the original global-mem single value, * when we consider the entire array + what's in the single-value location (but of course, it * doesn't have to end up in the same positions as we started with) */ TEST_CASE_TEMPLATE("exchange", T, INTEGER_TYPES, FLOAT_TYPES ) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. const auto num_grid_blocks { 3 }; const auto block_size { kat::warp_size * 5 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; constexpr const auto elements_per_thread { 5 }; constexpr auto data_size { num_grid_blocks * block_size * elements_per_thread }; constexpr auto data_size_plus_alignment { data_size + (data_size % largest_type_size == 0 ? 0 : (largest_type_size - data_size % largest_type_size)) }; struct { decltype(cuda::memory::device::make_unique<T[]>(device, data_size_plus_alignment)) data; decltype(cuda::memory::device::make_unique<T[]>(device, largest_type_size)) extra_datum; // Using largest_type_size so we can safely CAS into the result } device_side; device_side.data = cuda::memory::device::make_unique<T[]>(device, data_size_plus_alignment); device_side.extra_datum = cuda::memory::device::make_unique<T[]>(device, largest_type_size); struct { vector<T> input_data; vector<T> output_data; T extra_datum; } host_side; host_side.input_data.reserve(data_size + 1); // we're not using the +1 for now. auto base_value = static_cast<T>( M_PI / 4 ); // This will be 0 for integers and around 0.78525 for floating-point types T delta { 1 }; // This will be 0 for integers and around 0.78525 for floating-point types for(size_t i = 0; i < data_size; i++) { host_side.input_data.push_back(base_value + i * delta); // Note that this may overflow and roll over to negative values, so don't assume // the input data is sorted. In fact, it might be better to just sample randomly here... } std::shuffle(host_side.input_data.begin(), host_side.input_data.end(), util::random::engine); host_side.extra_datum = base_value + data_size * delta; cuda::memory::copy(device_side.data.get(), host_side.input_data.data(), host_side.input_data.size() * sizeof(T) ); cuda::memory::copy(device_side.extra_datum.get(), &host_side.extra_datum, sizeof(T)); cuda::launch( ::kernels::test_exchange<T, elements_per_thread>, launch_config, device_side.extra_datum.get(), device_side.data.get(), data_size); cuda::outstanding_error::ensure_none(); host_side.output_data.resize(data_size + 1); cuda::memory::copy(host_side.output_data.data(), device_side.data.get(), data_size * sizeof(T) ); cuda::memory::copy(host_side.output_data.data() + data_size, device_side.extra_datum.get(), sizeof(T)); bool print_results { false }; host_side.input_data.push_back(host_side.extra_datum); std::sort(host_side.output_data.begin(), host_side.output_data.end()); std::sort(host_side.input_data.begin(), host_side.input_data.end()); auto mismatch_pair = std::mismatch(host_side.input_data.begin(), host_side.input_data.end(), host_side.output_data.begin()); CHECK(mismatch_pair.first == host_side.input_data.end()); if (mismatch_pair.first != host_side.input_data.end()) { std::cerr << "Input data element #" << std::distance(host_side.input_data.begin(), mismatch_pair.first) << "(in sorted order) is " << promote_for_streaming(*(mismatch_pair.first)) << " while output data element #" << std::distance(host_side.output_data.begin(), mismatch_pair.second) << "(in sorted order) is " << promote_for_streaming(*(mismatch_pair.second)) << std::endl; } if (print_results) { std::cout << "Results as expected for type " << util::type_name<T>() << ".\n"; } } /* kind of test: Sample I.I.D. short sequence sequence of increments and decrements (sample a modulus perhaps? not right now) (perhaps have each warp wait for some random amount of time? not right now) Initialize an arbitrary global value each thread applies increments and decrements according to one sampled sequence ensure the overall result is what it should be increment (T* address, T modulus) + decrement (T* address, T modulus); */ TEST_CASE_TEMPLATE("increment and decrement", T, short, int, long, long long, unsigned short, unsigned int, unsigned long, unsigned long long, float, double ) { cuda::device_t device { cuda::device::current::get() }; // TODO: Test shuffles with non-full warps. const auto num_grid_blocks { 3 }; const auto block_size { kat::warp_size * 5 }; auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; auto num_grid_threads = num_threads_in_grid(launch_config); struct { decltype(cuda::memory::device::make_unique<seq_type[]>(device, num_grid_threads)) inc_dec_sequences; decltype(cuda::memory::device::make_unique<T>(device)) aggregate; // Using largest_type_size so we can safely CAS into the result } device_side; device_side.inc_dec_sequences = cuda::memory::device::make_unique<seq_type[]>(device, num_grid_threads); device_side.aggregate = cuda::memory::device::make_unique<T>(device); struct { vector<seq_type> inc_dec_sequences; T aggregate; T expected_aggregate; } host_side; host_side.inc_dec_sequences.reserve(num_grid_threads); auto base_value = static_cast<T>(std::numeric_limits<T>::max() / 2); // Want to make sure we don't underflow or overflow with many decrements // or increments. That's still a danger with 8-bit T's though. if (std::is_floating_point<T>::value) { base_value += T(1/3.0); } std::normal_distribution<float> distribution(0, 1); auto& engine = util::random::engine; host_side.aggregate = base_value + 5 * std::sqrt(num_grid_threads); host_side.expected_aggregate = host_side.aggregate; for(size_t i = 0; i < num_grid_threads; i++) { seq_type seq; for(auto j = 0; j < seq_type::ops_per_thread; j++) { bool do_inc = (util::random::sample_from(distribution, engine) > 0); seq[j] = do_inc; host_side.expected_aggregate += do_inc ? 1 : -1; } host_side.inc_dec_sequences.push_back(seq); } cuda::memory::copy(device_side.inc_dec_sequences.get(), host_side.inc_dec_sequences.data(), host_side.inc_dec_sequences.size() * sizeof(seq_type) ); cuda::memory::copy_single(device_side.aggregate.get(), &host_side.aggregate); cuda::launch( ::kernels::test_inc_dec_sequences<T, seq_type>, launch_config, device_side.aggregate.get(), device_side.inc_dec_sequences.get()); cuda::outstanding_error::ensure_none(); cuda::memory::copy_single(&host_side.aggregate, device_side.aggregate.get()); if (std::is_floating_point<T>::value) { // For some strange reason, this fails: // // const T tolerance = T(10^-5); // CHECK(host_side.aggregate == doctest::Approx(host_side.expected_aggregate).epsilon(tolerance)); // // while this succeeds: CHECK(host_side.aggregate == host_side.expected_aggregate); // ... isn't that weird? } else { CHECK(host_side.aggregate == host_side.expected_aggregate); } } // Note: Testcases before this point in the file were written before the infrastructure // in later test suite files was available (e.g. block_collaboration, sequence_ops) - // even though it now appears this file // From here on we'll try using that TEST_CASE_TEMPLATE("min - random values from host", T, INTEGER_TYPES, FLOAT_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = num_grid_blocks * num_threads_per_block; vector<T> input_data; auto mean { middle_of_domain<T>() }; auto standard_deviation { nice_big_value<T>() / 16.0 }; std::normal_distribution<double> distribution(mean, standard_deviation); input_data.reserve(input_length); util::random::insertion_generate_n(std::back_inserter(input_data), input_length, distribution); // Note that we could easily have overflow/underflow occurring with smaller types T expected_result { *std::min_element(input_data.begin(), input_data.end()) }; auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::min(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. constexpr const T fill_value { std::numeric_limits<T>::max() }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; // Note: We check for exact equality here even for floating-point types, // since there is no arithmetic performed CHECK(result == expected_result); } TEST_CASE_TEMPLATE("max - random values from host", T, INTEGER_TYPES, FLOAT_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = num_grid_blocks * num_threads_per_block; vector<T> input_data; auto mean { middle_of_domain<T>() }; auto standard_deviation { nice_big_value<T>() / 16.0 }; std::normal_distribution<double> distribution(mean, standard_deviation); input_data.reserve(input_length); util::random::insertion_generate_n(std::back_inserter(input_data), input_length, distribution); // Note that we could easily have overflow/underflow occurring with smaller types T expected_result { *std::max_element(input_data.begin(), input_data.end()) }; auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::max(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. constexpr const T fill_value { std::numeric_limits<T>::min() }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; // Note: We check for exact equality here even for floating-point types, // since there is no arithmetic performed CHECK(result == expected_result); } TEST_CASE_TEMPLATE("min - single outlier", T, INTEGER_TYPES, FLOAT_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = num_grid_blocks * num_threads_per_block; auto uniform_value { middle_of_domain<T>() }; vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); input_data[outlier_pos]--; // this is the minimum! auto expected_result { T(uniform_value - T(1)) }; auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::min(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. constexpr const T fill_value { std::numeric_limits<T>::max() }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; // Note: We check for exact equality here even for floating-point types, // since there is no arithmetic performed CHECK(result == expected_result); } TEST_CASE_TEMPLATE("max - single outlier", T, INTEGER_TYPES, FLOAT_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; auto uniform_value { middle_of_domain<T>() }; vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); input_data[outlier_pos]++; // this is the maximum! auto expected_result { T(uniform_value + T(1)) }; auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::max(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. constexpr const T fill_value { std::numeric_limits<T>::min() }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; // Note: We check for exact equality here even for floating-point types, // since there is no arithmetic performed CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_and - single outlier", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = num_grid_blocks * num_threads_per_block; T uniform_value(true); vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); input_data[outlier_pos] = false; // this is the conjunction value! T expected_result(false); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::logical_and(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. const T fill_value { uniform_value }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_or - single outlier", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = num_grid_blocks * num_threads_per_block; T uniform_value(false); vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); input_data[outlier_pos] = true; // this is the disjunction value! T expected_result(true); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::logical_or(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. const T fill_value { uniform_value }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_xor - single outlier 0", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; T uniform_value(1); vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); input_data[outlier_pos] = 0; const T fill_value { 0 }; T expected_result( (input_length-1) % 2 ); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::logical_xor(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_xor - single outlier 1", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; T uniform_value(0); vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); input_data[outlier_pos] = 1; T expected_result( 1 ); const T fill_value { 0 }; auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::logical_xor(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_not - single non-negator", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); const T fill_value { 0 }; T expected_result( (input_length-1) % 2 ); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate) { namespace gi = kat::linear_grid::grid_info; // TODO: Should I sleep here? Use a block::barrier()? if (not (outlier_pos == gi::thread::global_id()) ) { kat::atomic::logical_not(aggregate); } }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_not - single negater", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; uniform_int_distribution<std::size_t> distribution(0, input_length - 1); auto outlier_pos = util::random::sample_from(distribution); T expected_result( 1 ); const T fill_value { 0 }; auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate) { namespace gi = kat::linear_grid::grid_info; if (outlier_pos == gi::thread::global_id() ) { kat::atomic::logical_not(aggregate); } }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("logical_not - by random threads", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; vector<fake_bool> perform_op_indicators; uniform_int_distribution<fake_bool> distribution(0, 1); perform_op_indicators.reserve(input_length); util::random::insertion_generate_n(std::back_inserter(perform_op_indicators), input_length, distribution); // Note that we could easily have overflow/underflow occurring with smaller types constexpr const T fill_value (0); T expected_result (fill_value); std::for_each( std::cbegin(perform_op_indicators), std::cend(perform_op_indicators), [&](bool b){ if(b) expected_result = not expected_result; }); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict target, const fake_bool* __restrict perform_op_indicators) { namespace gi = kat::linear_grid::grid_info; bool perform_op = perform_op_indicators[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? if (perform_op) { kat::atomic::logical_not(target); } }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, perform_op_indicators.data() ); T result { result_container[0] }; // Note: We check for exact equality here even for floating-point types, // since there is no arithmetic performed CHECK(result == expected_result); } TEST_CASE_TEMPLATE("bitwise_and - single outliers", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; T uniform_value(~T(0)); vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); constexpr const auto no_outlier_at_this_bit_index = kat::size_in_bits<T>(); vector<decltype(kat::size_in_bits<T>())> outlier_positions(kat::size_in_bits<T>(), no_outlier_at_this_bit_index); T expected_result(~T(0)); // An outlier for every second bit, starting from the LSB for(auto bit_index = 1; bit_index < kat::size_in_bits<T>(); bit_index += 2) { outlier_positions[bit_index] = util::random::sample_from(distribution); input_data[outlier_positions[bit_index]] &= ~(T(1) << bit_index); expected_result &= ~(T(1) << bit_index); } auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::bitwise_and(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. const T fill_value { uniform_value }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("bitwise_or - single outliers", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; T uniform_value(0); vector<T> input_data(input_length, uniform_value); uniform_int_distribution<std::size_t> distribution(0, input_length - 1); constexpr const auto no_outlier_at_this_bit_index = kat::size_in_bits<T>(); vector<decltype(kat::size_in_bits<T>())> outlier_positions(kat::size_in_bits<T>(), no_outlier_at_this_bit_index); T expected_result(0); // An outlier for every second bit, starting from the LSB for(auto bit_index = 1; bit_index < kat::size_in_bits<T>(); bit_index += 2) { outlier_positions[bit_index] = util::random::sample_from(distribution); input_data[outlier_positions[bit_index]] |= (T(1) << bit_index); expected_result |= (T(1) << bit_index); } auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict aggregate, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::bitwise_or(aggregate, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. const T fill_value { uniform_value }; auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("bitwise_xor - random values from host", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; // constexpr const unsigned elements_per_thread { 3 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; vector<T> input_data; uniform_int_distribution<T> distribution(0, std::numeric_limits<T>::max()); input_data.reserve(input_length); util::random::insertion_generate_n(std::back_inserter(input_data), input_length, distribution); // Note that we could easily have overflow/underflow occurring with smaller types T expected_result (0); std::for_each(std::cbegin(input_data), std::cend(input_data), [&](T x){ expected_result ^= x; }); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict target, const T* __restrict input_data) { namespace gi = kat::linear_grid::grid_info; auto thread_element = input_data[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? auto prev = kat::atomic::bitwise_xor(target, thread_element); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. constexpr const T fill_value (0); auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, input_data.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("bitwise_not - by random threads", T, INTEGER_TYPES) { auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; auto input_length = // elements_per_thread * num_grid_blocks * num_threads_per_block; vector<fake_bool> perform_op_indicators; uniform_int_distribution<fake_bool> distribution(0, 1); perform_op_indicators.reserve(input_length); util::random::insertion_generate_n(std::back_inserter(perform_op_indicators), input_length, distribution); // Note that we could easily have overflow/underflow occurring with smaller types constexpr const T fill_value (T(0xDEADBEEFCAFEBABEllu)); T expected_result (fill_value); std::for_each( std::cbegin(perform_op_indicators), std::cend(perform_op_indicators), [&](bool b){ if(b) expected_result = ~expected_result; }); auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict target, const fake_bool* __restrict perform_op_indicators) { namespace gi = kat::linear_grid::grid_info; bool perform_op = perform_op_indicators[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? if (perform_op) { kat::atomic::bitwise_not(target); } }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, perform_op_indicators.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("set_bit - few outliers", T, long int) { // INTEGER_TYPES) { using bit_index_type = unsigned; auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; size_t input_length = num_grid_blocks * num_threads_per_block; const size_t num_outliers = size_in_bits<T>() / 3; vector<bit_index_type> half_bit_indices; uniform_int_distribution<bit_index_type> half_bit_index_distribution(0, size_in_bits<T>() / 2 - 1); util::random::insertion_generate_n(std::back_inserter(half_bit_indices), input_length , half_bit_index_distribution); vector<bit_index_type> bit_indices; std::transform(std::cbegin(half_bit_indices), std::cend(half_bit_indices), std::back_inserter(bit_indices), [](auto half_bit_index) { return half_bit_index * 2 + 1; } ); auto outlier_positions = util::random::sample_index_subset(input_length, num_outliers); auto outlier_half_bit_indices = util::random::sample_index_subset(size_in_bits<T>() / 2, num_outliers); auto outlier_positions_iter = std::cbegin(outlier_positions); auto outlier_half_bit_indices_iter = std::cbegin(outlier_half_bit_indices); for(size_t i = 0; i < num_outliers; i++) { auto outlier_position = *(outlier_positions_iter++); auto half_bit_index = *(outlier_half_bit_indices_iter++); bit_indices[outlier_position] -= 1; // the outliers set even bits instead of odd ones } constexpr const T fill_value = T(0xDEADBEEFCAFED00Dlu); T expected_result (fill_value); for(auto bit_index : bit_indices) { expected_result |= (T(1) << bit_index); } auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict target, const bit_index_type* __restrict bit_indices ) { namespace gi = kat::linear_grid::grid_info; auto bit_index = bit_indices[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? kat::atomic::set_bit(target, bit_index); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, bit_indices.data() ); T result { result_container[0] }; CHECK(result == expected_result); } TEST_CASE_TEMPLATE("unset_bit - few outliers", T, long int) { // INTEGER_TYPES) { using bit_index_type = unsigned; auto device = cuda::device::current::get(); cuda::grid::dimension_t num_grid_blocks = device.properties().multiProcessorCount * 2; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 5 }; size_t input_length = num_grid_blocks * num_threads_per_block; const size_t num_outliers = size_in_bits<T>() / 3; vector<bit_index_type> half_bit_indices; uniform_int_distribution<bit_index_type> half_bit_index_distribution(0, size_in_bits<T>() / 2 - 1); util::random::insertion_generate_n(std::back_inserter(half_bit_indices), input_length , half_bit_index_distribution); vector<bit_index_type> bit_indices; std::transform(std::cbegin(half_bit_indices), std::cend(half_bit_indices), std::back_inserter(bit_indices), [](auto half_bit_index) { return half_bit_index * 2 + 1; } ); auto outlier_positions = util::random::sample_index_subset(input_length, num_outliers); auto outlier_half_bit_indices = util::random::sample_index_subset(size_in_bits<T>() / 2, num_outliers); auto outlier_positions_iter = std::cbegin(outlier_positions); auto outlier_half_bit_indices_iter = std::cbegin(outlier_half_bit_indices); for(size_t i = 0; i < num_outliers; i++) { auto outlier_position = *(outlier_positions_iter++); auto half_bit_index = *(outlier_half_bit_indices_iter++); bit_indices[outlier_position] -= 1; // the outliers set even bits instead of odd ones } constexpr const T fill_value = T(0xDEADBEEFCAFED00Dlu); T expected_result (fill_value); for(auto bit_index : bit_indices) { expected_result &= ~(T(1) << bit_index); } auto testcase_device_function = [=] KAT_DEV ( size_t, T* __restrict target, const bit_index_type* __restrict bit_indices ) { namespace gi = kat::linear_grid::grid_info; auto bit_index = bit_indices[gi::thread::global_index()]; // TODO: Should I sleep here? Use a block::barrier()? kat::atomic::unset_bit(target, bit_index); }; const auto num_values_to_populate { input_length }; // Note: The result array will also have this many values - but we won't be using them. auto result_container = execute_non_uniform_testcase_on_gpu( testcase_device_function, num_values_to_populate, fill_value, num_grid_blocks, num_threads_per_block, bit_indices.data() ); T result { result_container[0] }; CHECK(result == expected_result); } // Note: Not testing apply_atomically, since half the tests here actually test just that - // functions that are implemented using apply_atomically(). } // TEST_SUITE("atomics")
the_stack
#include "_reg_common_gpu.h" __device__ __constant__ int c_UseBSpline; __device__ __constant__ int c_VoxelNumber; __device__ __constant__ int c_ControlPointNumber; __device__ __constant__ int3 c_ReferenceImageDim; __device__ __constant__ int3 c_ControlPointImageDim; __device__ __constant__ float3 c_ControlPointVoxelSpacing; __device__ __constant__ float3 c_ControlPointSpacing; __device__ __constant__ float3 c_ReferenceSpacing; __device__ __constant__ float c_Weight; __device__ __constant__ float3 c_Weight3; __device__ __constant__ int c_ActiveVoxelNumber; __device__ __constant__ bool c_Type; __device__ __constant__ float3 c_AffineMatrix0; __device__ __constant__ float3 c_AffineMatrix1; __device__ __constant__ float3 c_AffineMatrix2; __device__ __constant__ float4 c_AffineMatrix0b; __device__ __constant__ float4 c_AffineMatrix1b; __device__ __constant__ float4 c_AffineMatrix2b; __device__ __constant__ float4 c_AffineMatrix0c; __device__ __constant__ float4 c_AffineMatrix1c; __device__ __constant__ float4 c_AffineMatrix2c; /* *************************************************************** */ /* *************************************************************** */ texture<float4, 1, cudaReadModeElementType> controlPointTexture; texture<float4, 1, cudaReadModeElementType> secondDerivativesTexture; texture<float4,1, cudaReadModeElementType> voxelDeformationTexture; texture<int, 1, cudaReadModeElementType> maskTexture; texture<float,1, cudaReadModeElementType> jacobianDeterminantTexture; texture<float,1, cudaReadModeElementType> jacobianMatricesTexture; /* *************************************************************** */ /* *************************************************************** */ __device__ float2 operator*(float a, float2 b){ return make_float2(a*b.x, a*b.y); } __device__ float3 operator*(float a, float3 b){ return make_float3(a*b.x, a*b.y, a*b.z); } __device__ float3 operator*(float3 a, float3 b){ return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } __device__ float4 operator*(float4 a, float4 b){ return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } __device__ float4 operator*(float a, float4 b){ return make_float4(a*b.x, a*b.y, a*b.z, 0.0f); } /* *************************************************************** */ __device__ float2 operator/(float2 a, float2 b){ return make_float2(a.x/b.x, a.y/b.y); } __device__ float3 operator/(float3 a, float b){ return make_float3(a.x/b, a.y/b, a.z/b); } __device__ float3 operator/(float3 a, float3 b){ return make_float3(a.x/b.x, a.y/b.y, a.z/b.z); } /* *************************************************************** */ __device__ float2 operator+(float2 a, float2 b){ return make_float2(a.x+b.x, a.y+b.y); } __device__ float4 operator+(float4 a, float4 b){ return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, 0.0f); } __device__ float3 operator+(float3 a, float3 b){ return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } /* *************************************************************** */ __device__ float3 operator-(float3 a, float3 b){ return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } __device__ float4 operator-(float4 a, float4 b){ return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, 0.f); } /* *************************************************************** */ /* *************************************************************** */ __device__ void GetBasisBSplineValues(float basis, float *values) { float FF= basis*basis; float FFF= FF*basis; float MF=1.f-basis; values[0] = (MF)*(MF)*(MF)/(6.f); values[1] = (3.f*FFF - 6.f*FF + 4.f)/6.f; values[2] = (-3.f*FFF + 3.f*FF + 3.f*basis + 1.f)/6.f; values[3] = (FFF/6.f); } /* *************************************************************** */ __device__ void GetFirstBSplineValues(float basis, float *values, float *first) { GetBasisBSplineValues(basis, values); first[3]= basis * basis / 2.f; first[0]= basis - 0.5f - first[3]; first[2]= 1.f + first[0] - 2.f*first[3]; first[1]= - first[0] - first[2] - first[3]; } /* *************************************************************** */ /* *************************************************************** */ __device__ void GetBasisSplineValues(float basis, float *values) { float FF= basis*basis; values[0] = (basis * ((2.f-basis)*basis - 1.f))/2.f; values[1] = (FF * (3.f*basis-5.f) + 2.f)/2.f; values[2] = (basis * ((4.f-3.f*basis)*basis + 1.f))/2.f; values[3] = (basis-1.f) * FF/2.f; } /* *************************************************************** */ __device__ void GetBasisSplineValuesX(float basis, float4 *values) { float FF= basis*basis; values->x = (basis * ((2.f-basis)*basis - 1.f))/2.f; values->y = (FF * (3.f*basis-5.f) + 2.f)/2.f; values->z = (basis * ((4.f-3.f*basis)*basis + 1.f))/2.f; values->w = (basis-1.f) * FF/2.f; } /* *************************************************************** */ __device__ void getBSplineBasisValue(float basis, int index, float *value, float *first) { switch(index){ case 0: *value = (1.f-basis)*(1.f-basis)*(1.f-basis)/6.f; *first = (2.f*basis - basis*basis - 1.f)/2.f; break; case 1: *value = (3.f*basis*basis*basis - 6.f*basis*basis + 4.f)/6.f; *first = (3.f*basis*basis - 4.f*basis)/2.f; break; case 2: *value = (3.f*basis*basis - 3.f*basis*basis*basis + 3.f*basis + 1.f)/6.f; *first = (2.f*basis - 3.f*basis*basis + 1.f)/2.f; break; case 3: *value = basis*basis*basis/6.f; *first = basis*basis/2.f; break; default: *value = 0.f; *first = 0.f; break; } } /* *************************************************************** */ __device__ void GetFirstDerivativeBasisValues2D(int index, float *xBasis, float *yBasis){ switch(index){ case 0: xBasis[0]=-0.0833333f;yBasis[0]=-0.0833333f;break; case 1: xBasis[1]=0.f;yBasis[1]=-0.333333f;break; case 2: xBasis[2]=0.0833333f;yBasis[2]=-0.0833333f;break; case 3: xBasis[3]=-0.333333f;yBasis[3]=0.f;break; case 4: xBasis[4]=0.f;yBasis[4]=0.f;break; case 5: xBasis[5]=0.333333f;yBasis[5]=0.f;break; case 6: xBasis[6]=-0.0833333f;yBasis[6]=0.0833333f;break; case 7: xBasis[7]=0.f;yBasis[7]=0.333333f;break; case 8: xBasis[8]=0.0833333f;yBasis[8]=0.0833333f;break; } } /* *************************************************************** */ __device__ void GetFirstDerivativeBasisValues3D(int index, float *xBasis, float *yBasis, float *zBasis){ switch(index){ case 0: xBasis[0]=-0.013889f;yBasis[0]=-0.013889f;zBasis[0]=-0.013889f;break; case 1: xBasis[1]=0.000000f;yBasis[1]=-0.055556f;zBasis[1]=-0.055556f;break; case 2: xBasis[2]=0.013889f;yBasis[2]=-0.013889f;zBasis[2]=-0.013889f;break; case 3: xBasis[3]=-0.055556f;yBasis[3]=0.000000f;zBasis[3]=-0.055556f;break; case 4: xBasis[4]=0.000000f;yBasis[4]=0.000000f;zBasis[4]=-0.222222f;break; case 5: xBasis[5]=0.055556f;yBasis[5]=0.000000f;zBasis[5]=-0.055556f;break; case 6: xBasis[6]=-0.013889f;yBasis[6]=0.013889f;zBasis[6]=-0.013889f;break; case 7: xBasis[7]=0.000000f;yBasis[7]=0.055556f;zBasis[7]=-0.055556f;break; case 8: xBasis[8]=0.013889f;yBasis[8]=0.013889f;zBasis[8]=-0.013889f;break; case 9: xBasis[9]=-0.055556f;yBasis[9]=-0.055556f;zBasis[9]=0.000000f;break; case 10: xBasis[10]=0.000000f;yBasis[10]=-0.222222f;zBasis[10]=0.000000f;break; case 11: xBasis[11]=0.055556f;yBasis[11]=-0.055556f;zBasis[11]=0.000000f;break; case 12: xBasis[12]=-0.222222f;yBasis[12]=0.000000f;zBasis[12]=0.000000f;break; case 13: xBasis[13]=0.000000f;yBasis[13]=0.000000f;zBasis[13]=0.000000f;break; case 14: xBasis[14]=0.222222f;yBasis[14]=0.000000f;zBasis[14]=0.000000f;break; case 15: xBasis[15]=-0.055556f;yBasis[15]=0.055556f;zBasis[15]=0.000000f;break; case 16: xBasis[16]=0.000000f;yBasis[16]=0.222222f;zBasis[16]=0.000000f;break; case 17: xBasis[17]=0.055556f;yBasis[17]=0.055556f;zBasis[17]=0.000000f;break; case 18: xBasis[18]=-0.013889f;yBasis[18]=-0.013889f;zBasis[18]=0.013889f;break; case 19: xBasis[19]=0.000000f;yBasis[19]=-0.055556f;zBasis[19]=0.055556f;break; case 20: xBasis[20]=0.013889f;yBasis[20]=-0.013889f;zBasis[20]=0.013889f;break; case 21: xBasis[21]=-0.055556f;yBasis[21]=0.000000f;zBasis[21]=0.055556f;break; case 22: xBasis[22]=0.000000f;yBasis[22]=0.000000f;zBasis[22]=0.222222f;break; case 23: xBasis[23]=0.055556f;yBasis[23]=0.000000f;zBasis[23]=0.055556f;break; case 24: xBasis[24]=-0.013889f;yBasis[24]=0.013889f;zBasis[24]=0.013889f;break; case 25: xBasis[25]=0.000000f;yBasis[25]=0.055556f;zBasis[25]=0.055556f;break; case 26: xBasis[26]=0.013889f;yBasis[26]=0.013889f;zBasis[26]=0.013889f;break; } } /* *************************************************************** */ __device__ void GetSecondDerivativeBasisValues2D(int index, float *xxBasis, float *yyBasis, float *xyBasis){ switch(index){ case 0: xxBasis[0]=0.166667f;yyBasis[0]=0.166667f;xyBasis[0]=0.25f; break; case 1: xxBasis[1]=-0.333333f;yyBasis[1]=0.666667f;xyBasis[1]=-0.f; break; case 2: xxBasis[2]=0.166667f;yyBasis[2]=0.166667f;xyBasis[2]=-0.25f; break; case 3: xxBasis[3]=0.666667f;yyBasis[3]=-0.333333f;xyBasis[3]=-0.f; break; case 4: xxBasis[4]=-1.33333f;yyBasis[4]=-1.33333f;xyBasis[4]=0.f; break; case 5: xxBasis[5]=0.666667f;yyBasis[5]=-0.333333f;xyBasis[5]=0.f; break; case 6: xxBasis[6]=0.166667f;yyBasis[6]=0.166667f;xyBasis[6]=-0.25f; break; case 7: xxBasis[7]=-0.333333f;yyBasis[7]=0.666667f;xyBasis[7]=0.f; break; case 8: xxBasis[8]=0.166667f;yyBasis[8]=0.166667f;xyBasis[8]=0.25f; break; } } /* *************************************************************** */ __device__ void GetSecondDerivativeBasisValues3D(int index, float *xxBasis, float *yyBasis, float *zzBasis, float *xyBasis, float *yzBasis, float *xzBasis){ switch(index){ case 0: xxBasis[0]=0.027778f;yyBasis[0]=0.027778f;zzBasis[0]=0.027778f; xyBasis[0]=0.041667f;yzBasis[0]=0.041667f;xzBasis[0]=0.041667f; break; case 1: xxBasis[1]=-0.055556f;yyBasis[1]=0.111111f;zzBasis[1]=0.111111f; xyBasis[1]=-0.000000f;yzBasis[1]=0.166667f;xzBasis[1]=-0.000000f; break; case 2: xxBasis[2]=0.027778f;yyBasis[2]=0.027778f;zzBasis[2]=0.027778f; xyBasis[2]=-0.041667f;yzBasis[2]=0.041667f;xzBasis[2]=-0.041667f; break; case 3: xxBasis[3]=0.111111f;yyBasis[3]=-0.055556f;zzBasis[3]=0.111111f; xyBasis[3]=-0.000000f;yzBasis[3]=-0.000000f;xzBasis[3]=0.166667f; break; case 4: xxBasis[4]=-0.222222f;yyBasis[4]=-0.222222f;zzBasis[4]=0.444444f; xyBasis[4]=0.000000f;yzBasis[4]=-0.000000f;xzBasis[4]=-0.000000f; break; case 5: xxBasis[5]=0.111111f;yyBasis[5]=-0.055556f;zzBasis[5]=0.111111f; xyBasis[5]=0.000000f;yzBasis[5]=-0.000000f;xzBasis[5]=-0.166667f; break; case 6: xxBasis[6]=0.027778f;yyBasis[6]=0.027778f;zzBasis[6]=0.027778f; xyBasis[6]=-0.041667f;yzBasis[6]=-0.041667f;xzBasis[6]=0.041667f; break; case 7: xxBasis[7]=-0.055556f;yyBasis[7]=0.111111f;zzBasis[7]=0.111111f; xyBasis[7]=0.000000f;yzBasis[7]=-0.166667f;xzBasis[7]=-0.000000f; break; case 8: xxBasis[8]=0.027778f;yyBasis[8]=0.027778f;zzBasis[8]=0.027778f; xyBasis[8]=0.041667f;yzBasis[8]=-0.041667f;xzBasis[8]=-0.041667f; break; case 9: xxBasis[9]=0.111111f;yyBasis[9]=0.111111f;zzBasis[9]=-0.055556f; xyBasis[9]=0.166667f;yzBasis[9]=-0.000000f;xzBasis[9]=-0.000000f; break; case 10: xxBasis[10]=-0.222222f;yyBasis[10]=0.444444f;zzBasis[10]=-0.222222f; xyBasis[10]=-0.000000f;yzBasis[10]=-0.000000f;xzBasis[10]=0.000000f; break; case 11: xxBasis[11]=0.111111f;yyBasis[11]=0.111111f;zzBasis[11]=-0.055556f; xyBasis[11]=-0.166667f;yzBasis[11]=-0.000000f;xzBasis[11]=0.000000f; break; case 12: xxBasis[12]=0.444444f;yyBasis[12]=-0.222222f;zzBasis[12]=-0.222222f; xyBasis[12]=-0.000000f;yzBasis[12]=0.000000f;xzBasis[12]=-0.000000f; break; case 13: xxBasis[13]=-0.888889f;yyBasis[13]=-0.888889f;zzBasis[13]=-0.888889f; xyBasis[13]=0.000000f;yzBasis[13]=0.000000f;xzBasis[13]=0.000000f; break; case 14: xxBasis[14]=0.444444f;yyBasis[14]=-0.222222f;zzBasis[14]=-0.222222f; xyBasis[14]=0.000000f;yzBasis[14]=0.000000f;xzBasis[14]=0.000000f; break; case 15: xxBasis[15]=0.111111f;yyBasis[15]=0.111111f;zzBasis[15]=-0.055556f; xyBasis[15]=-0.166667f;yzBasis[15]=0.000000f;xzBasis[15]=-0.000000f; break; case 16: xxBasis[16]=-0.222222f;yyBasis[16]=0.444444f;zzBasis[16]=-0.222222f; xyBasis[16]=0.000000f;yzBasis[16]=0.000000f;xzBasis[16]=0.000000f; break; case 17: xxBasis[17]=0.111111f;yyBasis[17]=0.111111f;zzBasis[17]=-0.055556f; xyBasis[17]=0.166667f;yzBasis[17]=0.000000f;xzBasis[17]=0.000000f; break; case 18: xxBasis[18]=0.027778f;yyBasis[18]=0.027778f;zzBasis[18]=0.027778f; xyBasis[18]=0.041667f;yzBasis[18]=-0.041667f;xzBasis[18]=-0.041667f; break; case 19: xxBasis[19]=-0.055556f;yyBasis[19]=0.111111f;zzBasis[19]=0.111111f; xyBasis[19]=-0.000000f;yzBasis[19]=-0.166667f;xzBasis[19]=0.000000f; break; case 20: xxBasis[20]=0.027778f;yyBasis[20]=0.027778f;zzBasis[20]=0.027778f; xyBasis[20]=-0.041667f;yzBasis[20]=-0.041667f;xzBasis[20]=0.041667f; break; case 21: xxBasis[21]=0.111111f;yyBasis[21]=-0.055556f;zzBasis[21]=0.111111f; xyBasis[21]=-0.000000f;yzBasis[21]=0.000000f;xzBasis[21]=-0.166667f; break; case 22: xxBasis[22]=-0.222222f;yyBasis[22]=-0.222222f;zzBasis[22]=0.444444f; xyBasis[22]=0.000000f;yzBasis[22]=0.000000f;xzBasis[22]=0.000000f; break; case 23: xxBasis[23]=0.111111f;yyBasis[23]=-0.055556f;zzBasis[23]=0.111111f; xyBasis[23]=0.000000f;yzBasis[23]=0.000000f;xzBasis[23]=0.166667f; break; case 24: xxBasis[24]=0.027778f;yyBasis[24]=0.027778f;zzBasis[24]=0.027778f; xyBasis[24]=-0.041667f;yzBasis[24]=0.041667f;xzBasis[24]=-0.041667f; break; case 25: xxBasis[25]=-0.055556f;yyBasis[25]=0.111111f;zzBasis[25]=0.111111f; xyBasis[25]=0.000000f;yzBasis[25]=0.166667f;xzBasis[25]=0.000000f; break; case 26: xxBasis[26]=0.027778f;yyBasis[26]=0.027778f;zzBasis[26]=0.027778f; xyBasis[26]=0.041667f;yzBasis[26]=0.041667f;xzBasis[26]=0.041667f; break; } } /* *************************************************************** */ /* *************************************************************** */ __device__ float4 get_SlidedValues_gpu(int x, int y) { int newX=x; int newY=y; if(x<0){ newX=0; } else if(x>=c_ReferenceImageDim.x){ newX=c_ReferenceImageDim.x-1; } if(y<0){ newY=0; } else if(y>=c_ReferenceImageDim.y){ newY=c_ReferenceImageDim.y-1; } x=x-newX; y=y-newY; float4 slidedValues = make_float4( x * c_AffineMatrix0c.x + y * c_AffineMatrix0c.y, x * c_AffineMatrix1c.x + y * c_AffineMatrix1c.y, 0.f, 0.f); slidedValues = slidedValues + tex1Dfetch(voxelDeformationTexture, newY*c_ReferenceImageDim.x+newX); return slidedValues; } /* *************************************************************** */ /* *************************************************************** */ __device__ float4 get_SlidedValues_gpu(int x, int y, int z) { int newX=x; int newY=y; int newZ=z; if(x<0){ newX=0; } else if(x>=c_ReferenceImageDim.x){ newX=c_ReferenceImageDim.x-1; } if(y<0){ newY=0; } else if(y>=c_ReferenceImageDim.y){ newY=c_ReferenceImageDim.y-1; } if(z<0){ newZ=0; } else if(z>=c_ReferenceImageDim.z){ newZ=c_ReferenceImageDim.z-1; } x=x-newX; y=y-newY; z=z-newZ; float4 slidedValues = make_float4( x * c_AffineMatrix0c.x + y * c_AffineMatrix0c.y + z * c_AffineMatrix0c.z, x * c_AffineMatrix1c.x + y * c_AffineMatrix1c.y + z * c_AffineMatrix1c.z, x * c_AffineMatrix2c.x + y * c_AffineMatrix2c.y + z * c_AffineMatrix2c.z, 0.f); slidedValues = slidedValues + tex1Dfetch(voxelDeformationTexture, (newZ*c_ReferenceImageDim.y+newY)*c_ReferenceImageDim.x+newX); return slidedValues; } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_spline_getDeformationField3D(float4 *positionField) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ // Allocate the shared memory extern __shared__ float yBasis[]; // Compute the shared memory offset which correspond to four times the number of thread per block float *zBasis=&yBasis[4*blockDim.x*blockDim.y*blockDim.z]; int3 imageSize = c_ReferenceImageDim; unsigned int tempIndex=tex1Dfetch(maskTexture,tid); const int z = tempIndex/(imageSize.x*imageSize.y); tempIndex -= z*imageSize.x*imageSize.y; const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; // the "nearest previous" node is determined [0,0,0] int3 nodeAnte; float3 gridVoxelSpacing = c_ControlPointVoxelSpacing; nodeAnte.x = (int)floorf((float)x/gridVoxelSpacing.x); nodeAnte.y = (int)floorf((float)y/gridVoxelSpacing.y); nodeAnte.z = (int)floorf((float)z/gridVoxelSpacing.z); const int shareMemIndex = 4*threadIdx.x; // Z basis values float relative = fabsf((float)z/gridVoxelSpacing.z-(float)nodeAnte.z); relative=relative>0?relative:0.f; if(c_UseBSpline) GetBasisBSplineValues(relative, &zBasis[shareMemIndex]); else GetBasisSplineValues(relative, &zBasis[shareMemIndex]); // Y basis values relative = fabsf((float)y/gridVoxelSpacing.y-(float)nodeAnte.y); relative=relative>0?relative:0.f; if(c_UseBSpline) GetBasisBSplineValues(relative, &yBasis[shareMemIndex]); else GetBasisSplineValues(relative, &yBasis[shareMemIndex]); // X basis values float xBasis[4]; relative = fabsf((float)x/gridVoxelSpacing.x-(float)nodeAnte.x); relative=relative>0?relative:0.f; if(c_UseBSpline) GetBasisBSplineValues(relative, xBasis); else GetBasisSplineValues(relative, xBasis); int3 controlPointImageDim = c_ControlPointImageDim; float4 displacement=make_float4(0.0f,0.0f,0.0f,0.0f); float basis; float3 tempDisplacement; for(int c=0; c<4; c++){ tempDisplacement=make_float3(0.0f,0.0f,0.0f); int indexYZ= ( (nodeAnte.z + c) * controlPointImageDim.y + nodeAnte.y) * controlPointImageDim.x; for(int b=0; b<4; b++){ int indexXYZ = indexYZ + nodeAnte.x; float4 nodeCoefficientA = tex1Dfetch(controlPointTexture,indexXYZ++); float4 nodeCoefficientB = tex1Dfetch(controlPointTexture,indexXYZ++); float4 nodeCoefficientC = tex1Dfetch(controlPointTexture,indexXYZ++); float4 nodeCoefficientD = tex1Dfetch(controlPointTexture,indexXYZ); basis=yBasis[shareMemIndex+b]; tempDisplacement.x += ( nodeCoefficientA.x * xBasis[0] + nodeCoefficientB.x * xBasis[1] + nodeCoefficientC.x * xBasis[2] + nodeCoefficientD.x * xBasis[3] ) * basis; tempDisplacement.y += ( nodeCoefficientA.y * xBasis[0] + nodeCoefficientB.y * xBasis[1] + nodeCoefficientC.y * xBasis[2] + nodeCoefficientD.y * xBasis[3] ) * basis; tempDisplacement.z += ( nodeCoefficientA.z * xBasis[0] + nodeCoefficientB.z * xBasis[1] + nodeCoefficientC.z * xBasis[2] + nodeCoefficientD.z * xBasis[3] ) * basis; indexYZ += controlPointImageDim.x; } basis = zBasis[shareMemIndex+c]; displacement.x += tempDisplacement.x * basis; displacement.y += tempDisplacement.y * basis; displacement.z += tempDisplacement.z * basis; } positionField[tid] = displacement; } return; } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_spline_getDeformationField2D(float4 *positionField) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ // Allocate the shared memory extern __shared__ float yBasis[]; int3 imageSize = c_ReferenceImageDim; unsigned int tempIndex=tex1Dfetch(maskTexture,tid); const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; // the "nearest previous" node is determined [0,0,0] int2 nodeAnte; float2 gridVoxelSpacing = make_float2(c_ControlPointVoxelSpacing.x, c_ControlPointVoxelSpacing.y); nodeAnte.x = (int)floorf((float)x/gridVoxelSpacing.x); nodeAnte.y = (int)floorf((float)y/gridVoxelSpacing.y); const int shareMemIndex = 4*threadIdx.x; // Y basis values float relative = fabsf((float)y/gridVoxelSpacing.y-(float)nodeAnte.y); if(c_UseBSpline) GetBasisBSplineValues(relative, &yBasis[shareMemIndex]); else GetBasisSplineValues(relative, &yBasis[shareMemIndex]); // X basis values float xBasis[4]; relative = fabsf((float)x/gridVoxelSpacing.x-(float)nodeAnte.x); if(c_UseBSpline) GetBasisBSplineValues(relative, xBasis); else GetBasisSplineValues(relative, xBasis); int2 controlPointImageDim = make_int2(c_ControlPointImageDim.x, c_ControlPointImageDim.y); float4 displacement=make_float4(0.0f,0.0f,0.0f,0.0f); float basis; for(int b=0; b<4; b++){ int index = (nodeAnte.y + b) * controlPointImageDim.x + nodeAnte.x; float4 nodeCoefficientA = tex1Dfetch(controlPointTexture,index++); float4 nodeCoefficientB = tex1Dfetch(controlPointTexture,index++); float4 nodeCoefficientC = tex1Dfetch(controlPointTexture,index++); float4 nodeCoefficientD = tex1Dfetch(controlPointTexture,index); basis=yBasis[shareMemIndex+b]; displacement.x += basis * ( nodeCoefficientA.x * xBasis[0] + nodeCoefficientB.x * xBasis[1] + nodeCoefficientC.x * xBasis[2] + nodeCoefficientD.x * xBasis[3]); displacement.y += basis * ( nodeCoefficientA.y * xBasis[0] + nodeCoefficientB.y * xBasis[1] + nodeCoefficientC.y * xBasis[2] + nodeCoefficientD.y * xBasis[3]); } positionField[tid] = displacement; } return; } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_spline_getApproxSecondDerivatives2D(float4 *secondDerivativeValues) { __shared__ float xxbasis[9]; __shared__ float yybasis[9]; __shared__ float xybasis[9]; if(threadIdx.x<9) GetSecondDerivativeBasisValues2D(threadIdx.x, xxbasis, yybasis, xybasis); __syncthreads(); const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; const int y =(int)(tid/gridSize.x); const int x = int(tid - y*gridSize.x); float4 XX = make_float4(0.0f,0.0f,0.0f,0.0f); float4 YY = make_float4(0.0f,0.0f,0.0f,0.0f); float4 XY = make_float4(0.0f,0.0f,0.0f,0.0f); int tempIndex; if(0<x && x<gridSize.x-1 && 0<y && y<gridSize.y-1){ tempIndex=0; for(int b=y-1; b<y+2; ++b){ for(int a=x-1; a<x+2; ++a){ int indexXY = b*gridSize.x+a; float4 controlPointValues = tex1Dfetch(controlPointTexture,indexXY); XX.x = XX.x + xxbasis[tempIndex] * controlPointValues.x; XX.y = XX.y + xxbasis[tempIndex] * controlPointValues.y; YY.x = YY.x + yybasis[tempIndex] * controlPointValues.x; YY.y = YY.y + yybasis[tempIndex] * controlPointValues.y; XY.x = XY.x + xybasis[tempIndex] * controlPointValues.x; XY.y = XY.y + xybasis[tempIndex] * controlPointValues.y; ++tempIndex; } } } tempIndex=3*tid; secondDerivativeValues[tempIndex++]=XX; secondDerivativeValues[tempIndex++]=YY; secondDerivativeValues[tempIndex] = XY; } return; } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_spline_getApproxSecondDerivatives3D(float4 *secondDerivativeValues) { __shared__ float xxbasis[27]; __shared__ float yybasis[27]; __shared__ float zzbasis[27]; __shared__ float xybasis[27]; __shared__ float yzbasis[27]; __shared__ float xzbasis[27]; if(threadIdx.x<27) GetSecondDerivativeBasisValues3D(threadIdx.x, xxbasis, yybasis, zzbasis, xybasis, yzbasis, xzbasis); __syncthreads(); const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; int tempIndex=tid; const int z =(int)(tempIndex/(gridSize.x*gridSize.y)); tempIndex -= int(z*gridSize.x*gridSize.y); const int y =(int)(tempIndex/gridSize.x); const int x = int(tempIndex - y*gridSize.x); float4 XX = make_float4(0.0f,0.0f,0.0f,0.0f); float4 YY = make_float4(0.0f,0.0f,0.0f,0.0f); float4 ZZ = make_float4(0.0f,0.0f,0.0f,0.0f); float4 XY = make_float4(0.0f,0.0f,0.0f,0.0f); float4 YZ = make_float4(0.0f,0.0f,0.0f,0.0f); float4 XZ = make_float4(0.0f,0.0f,0.0f,0.0f); if(0<x && x<gridSize.x-1 && 0<y && y<gridSize.y-1 && 0<z && z<gridSize.z-1){ tempIndex=0; for(int c=z-1; c<z+2; ++c){ for(int b=y-1; b<y+2; ++b){ for(int a=x-1; a<x+2; ++a){ int indexXYZ = (c*gridSize.y+b)*gridSize.x+a; float4 controlPointValues = tex1Dfetch(controlPointTexture,indexXYZ); XX = XX + xxbasis[tempIndex] * controlPointValues; YY = YY + yybasis[tempIndex] * controlPointValues; ZZ = ZZ + zzbasis[tempIndex] * controlPointValues; XY = XY + xybasis[tempIndex] * controlPointValues; YZ = YZ + yzbasis[tempIndex] * controlPointValues; XZ = XZ + xzbasis[tempIndex] * controlPointValues; tempIndex++; } } } } tempIndex=6*tid; secondDerivativeValues[tempIndex++]=XX; secondDerivativeValues[tempIndex++]=YY; secondDerivativeValues[tempIndex++]=ZZ; secondDerivativeValues[tempIndex++]=XY; secondDerivativeValues[tempIndex++]=YZ; secondDerivativeValues[tempIndex] = XZ; } } /* *************************************************************** */ __global__ void reg_spline_getApproxBendingEnergy2D_kernel(float *penaltyTerm) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int index=tid*3; float4 XX = tex1Dfetch(secondDerivativesTexture,index++);XX=XX*XX; float4 YY = tex1Dfetch(secondDerivativesTexture,index++);YY=YY*YY; float4 XY = tex1Dfetch(secondDerivativesTexture,index++);XY=XY*XY; penaltyTerm[tid]= XX.x + XX.y + YY.x + YY.y + 2.f*(XY.x + XY.y); } return; } /* *************************************************************** */ __global__ void reg_spline_getApproxBendingEnergy3D_kernel(float *penaltyTerm) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int index=tid*6; float4 XX = tex1Dfetch(secondDerivativesTexture,index++);XX=XX*XX; float4 YY = tex1Dfetch(secondDerivativesTexture,index++);YY=YY*YY; float4 ZZ = tex1Dfetch(secondDerivativesTexture,index++);ZZ=ZZ*ZZ; float4 XY = tex1Dfetch(secondDerivativesTexture,index++);XY=XY*XY; float4 YZ = tex1Dfetch(secondDerivativesTexture,index++);YZ=YZ*YZ; float4 XZ = tex1Dfetch(secondDerivativesTexture,index);XZ=XZ*XZ; penaltyTerm[tid]= XX.x + XX.y + XX.z + YY.x + YY.y + YY.z + ZZ.x + ZZ.y + ZZ.z + 2.f*(XY.x + XY.y + XY.z + YZ.x + YZ.y + YZ.z + XZ.x + XZ.y + XZ.z); } return; } /* *************************************************************** */ __global__ void reg_spline_getApproxBendingEnergyGradient2D_kernel(float4 *nodeGradientArray) { __shared__ float xxbasis[9]; __shared__ float yybasis[9]; __shared__ float xybasis[9]; if(threadIdx.x<9) GetSecondDerivativeBasisValues2D(threadIdx.x, xxbasis, yybasis, xybasis); __syncthreads(); const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; const int y = tid/gridSize.x; const int x = tid - y*gridSize.x; float2 gradientValue=make_float2(0.0f,0.0f); float4 secondDerivativeValues; int coord=0; for(int b=y-1; b<y+2; ++b){ for(int a=x-1; a<x+2; ++a){ if(-1<a && -1<b && a<gridSize.x && b<gridSize.y){ int indexXY = 3*(b*gridSize.x+a); secondDerivativeValues = tex1Dfetch(secondDerivativesTexture,indexXY++); // XX gradientValue.x += secondDerivativeValues.x * xxbasis[coord]; gradientValue.y += secondDerivativeValues.y * xxbasis[coord]; secondDerivativeValues = tex1Dfetch(secondDerivativesTexture,indexXY++); // YY gradientValue.x += secondDerivativeValues.x * yybasis[coord]; gradientValue.y += secondDerivativeValues.y * yybasis[coord]; secondDerivativeValues = 2.f*tex1Dfetch(secondDerivativesTexture,indexXY); // XY gradientValue.x += secondDerivativeValues.x * xybasis[coord]; gradientValue.y += secondDerivativeValues.y * xybasis[coord]; } coord++; } } nodeGradientArray[tid].x += c_Weight*gradientValue.x; nodeGradientArray[tid].y += c_Weight*gradientValue.y; } } /* *************************************************************** */ __global__ void reg_spline_getApproxBendingEnergyGradient3D_kernel(float4 *nodeGradientArray) { __shared__ float xxbasis[27]; __shared__ float yybasis[27]; __shared__ float zzbasis[27]; __shared__ float xybasis[27]; __shared__ float yzbasis[27]; __shared__ float xzbasis[27]; if(threadIdx.x<27) GetSecondDerivativeBasisValues3D(threadIdx.x, xxbasis, yybasis, zzbasis, xybasis, yzbasis, xzbasis); __syncthreads(); const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; int tempIndex=tid; const int z = tempIndex/(gridSize.x*gridSize.y); tempIndex -= z*gridSize.x*gridSize.y; const int y = tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; float3 gradientValue=make_float3(0.0f,0.0f,0.0f); float4 secondDerivativeValues; int coord=0; for(int c=z-1; c<z+2; ++c){ for(int b=y-1; b<y+2; ++b){ for(int a=x-1; a<x+2; ++a){ if(-1<a && -1<b && -1<c && a<gridSize.x && b<gridSize.y && c<gridSize.z){ unsigned int indexXYZ = 6*((c*gridSize.y+b)*gridSize.x+a); secondDerivativeValues = tex1Dfetch(secondDerivativesTexture,indexXYZ++); // XX gradientValue.x += secondDerivativeValues.x * xxbasis[coord]; gradientValue.y += secondDerivativeValues.y * xxbasis[coord]; gradientValue.z += secondDerivativeValues.z * xxbasis[coord]; secondDerivativeValues = tex1Dfetch(secondDerivativesTexture,indexXYZ++); // YY gradientValue.x += secondDerivativeValues.x * yybasis[coord]; gradientValue.y += secondDerivativeValues.y * yybasis[coord]; gradientValue.z += secondDerivativeValues.z * yybasis[coord]; secondDerivativeValues = tex1Dfetch(secondDerivativesTexture,indexXYZ++); //ZZ gradientValue.x += secondDerivativeValues.x * zzbasis[coord]; gradientValue.y += secondDerivativeValues.y * zzbasis[coord]; gradientValue.z += secondDerivativeValues.z * zzbasis[coord]; secondDerivativeValues = 2.f*tex1Dfetch(secondDerivativesTexture,indexXYZ++); // XY gradientValue.x += secondDerivativeValues.x * xybasis[coord]; gradientValue.y += secondDerivativeValues.y * xybasis[coord]; gradientValue.z += secondDerivativeValues.z * xybasis[coord]; secondDerivativeValues = 2.f*tex1Dfetch(secondDerivativesTexture,indexXYZ++); // YZ gradientValue.x += secondDerivativeValues.x * yzbasis[coord]; gradientValue.y += secondDerivativeValues.y * yzbasis[coord]; gradientValue.z += secondDerivativeValues.z * yzbasis[coord]; secondDerivativeValues = 2.f*tex1Dfetch(secondDerivativesTexture,indexXYZ); //XZ gradientValue.x += secondDerivativeValues.x * xzbasis[coord]; gradientValue.y += secondDerivativeValues.y * xzbasis[coord]; gradientValue.z += secondDerivativeValues.z * xzbasis[coord]; } coord++; } } } gradientValue = c_Weight * gradientValue; float4 metricGradientValue; metricGradientValue = nodeGradientArray[tid]; metricGradientValue.x += gradientValue.x; metricGradientValue.y += gradientValue.y; metricGradientValue.z += gradientValue.z; nodeGradientArray[tid]=metricGradientValue; } } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_spline_getApproxJacobianValues2D_kernel(float *jacobianMatrices, float *jacobianDet) { __shared__ float xbasis[9]; __shared__ float ybasis[9]; if(threadIdx.x<9) GetFirstDerivativeBasisValues2D(threadIdx.x, xbasis, ybasis); __syncthreads(); const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; int tempIndex=tid; const int y =tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; if(0<x && x<gridSize.x-1 && 0<y && y<gridSize.y-1){ float Tx_x=0, Tx_y=0; float Ty_x=0, Ty_y=0; tempIndex=0; for(int b=y-1; b<y+2; ++b){ for(int a=x-1; a<x+2; ++a){ int indexXY = b * gridSize.x + a; float4 controlPointValues = tex1Dfetch(controlPointTexture,indexXY); Tx_x += xbasis[tempIndex]*controlPointValues.x; Tx_y += ybasis[tempIndex]*controlPointValues.x; Ty_x += xbasis[tempIndex]*controlPointValues.y; Ty_y += ybasis[tempIndex]*controlPointValues.y; tempIndex++; } } // The jacobian matrix is reoriented float Tx_x2=c_AffineMatrix0.x*Tx_x + c_AffineMatrix0.y*Ty_x; float Tx_y2=c_AffineMatrix0.x*Tx_y + c_AffineMatrix0.y*Ty_y; float Ty_x2=c_AffineMatrix1.x*Tx_x + c_AffineMatrix1.y*Ty_x; float Ty_y2=c_AffineMatrix1.x*Tx_y + c_AffineMatrix1.y*Ty_y; // The Jacobian matrix is stored tempIndex=tid*4; jacobianMatrices[tempIndex++]=Tx_x2; jacobianMatrices[tempIndex++]=Tx_y2; jacobianMatrices[tempIndex++]=Ty_x2; jacobianMatrices[tempIndex] = Ty_y2; // The Jacobian determinant is computed and stored jacobianDet[tid]= Tx_x2 * Ty_y2 - Tx_y2 * Ty_x2; } else{ tempIndex=tid*4; jacobianMatrices[tempIndex++]=1.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex]=1.f; jacobianDet[tid]= 1.0f; } } return; } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_spline_getApproxJacobianValues3D_kernel(float *jacobianMatrices, float *jacobianDet) { __shared__ float xbasis[27]; __shared__ float ybasis[27]; __shared__ float zbasis[27]; if(threadIdx.x<27) GetFirstDerivativeBasisValues3D(threadIdx.x, xbasis, ybasis, zbasis); __syncthreads(); const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; int tempIndex=tid; const int z =tempIndex/(gridSize.x*gridSize.y); tempIndex -= z*gridSize.x*gridSize.y; const int y =tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; if(0<x && x<gridSize.x-1 && 0<y && y<gridSize.y-1 && 0<z && z<gridSize.z-1){ float Tx_x=0, Tx_y=0, Tx_z=0; float Ty_x=0, Ty_y=0, Ty_z=0; float Tz_x=0, Tz_y=0, Tz_z=0; tempIndex=0; for(int c=z-1; c<z+2; ++c){ for(int b=y-1; b<y+2; ++b){ for(int a=x-1; a<x+2; ++a){ int indexXYZ = (c*gridSize.y+b)*gridSize.x+a; float4 controlPointValues = tex1Dfetch(controlPointTexture,indexXYZ); Tx_x += xbasis[tempIndex]*controlPointValues.x; Tx_y += ybasis[tempIndex]*controlPointValues.x; Tx_z += zbasis[tempIndex]*controlPointValues.x; Ty_x += xbasis[tempIndex]*controlPointValues.y; Ty_y += ybasis[tempIndex]*controlPointValues.y; Ty_z += zbasis[tempIndex]*controlPointValues.y; Tz_x += xbasis[tempIndex]*controlPointValues.z; Tz_y += ybasis[tempIndex]*controlPointValues.z; Tz_z += zbasis[tempIndex]*controlPointValues.z; tempIndex++; } } } // The jacobian matrix is reoriented float Tx_x2=c_AffineMatrix0.x*Tx_x + c_AffineMatrix0.y*Ty_x + c_AffineMatrix0.z*Tz_x; float Tx_y2=c_AffineMatrix0.x*Tx_y + c_AffineMatrix0.y*Ty_y + c_AffineMatrix0.z*Tz_y; float Tx_z2=c_AffineMatrix0.x*Tx_z + c_AffineMatrix0.y*Ty_z + c_AffineMatrix0.z*Tz_z; float Ty_x2=c_AffineMatrix1.x*Tx_x + c_AffineMatrix1.y*Ty_x + c_AffineMatrix1.z*Tz_x; float Ty_y2=c_AffineMatrix1.x*Tx_y + c_AffineMatrix1.y*Ty_y + c_AffineMatrix1.z*Tz_y; float Ty_z2=c_AffineMatrix1.x*Tx_z + c_AffineMatrix1.y*Ty_z + c_AffineMatrix1.z*Tz_z; float Tz_x2=c_AffineMatrix2.x*Tx_x + c_AffineMatrix2.y*Ty_x + c_AffineMatrix2.z*Tz_x; float Tz_y2=c_AffineMatrix2.x*Tx_y + c_AffineMatrix2.y*Ty_y + c_AffineMatrix2.z*Tz_y; float Tz_z2=c_AffineMatrix2.x*Tx_z + c_AffineMatrix2.y*Ty_z + c_AffineMatrix2.z*Tz_z; // The Jacobian matrix is stored tempIndex=tid*9; jacobianMatrices[tempIndex++]=Tx_x2; jacobianMatrices[tempIndex++]=Tx_y2; jacobianMatrices[tempIndex++]=Tx_z2; jacobianMatrices[tempIndex++]=Ty_x2; jacobianMatrices[tempIndex++]=Ty_y2; jacobianMatrices[tempIndex++]=Ty_z2; jacobianMatrices[tempIndex++]=Tz_x2; jacobianMatrices[tempIndex++]=Tz_y2; jacobianMatrices[tempIndex] = Tz_z2; // The Jacobian determinant is computed and stored jacobianDet[tid]= Tx_x2*Ty_y2*Tz_z2 + Tx_y2*Ty_z2*Tz_x2 + Tx_z2*Ty_x2*Tz_y2 - Tx_x2*Ty_z2*Tz_y2 - Tx_y2*Ty_x2*Tz_z2 - Tx_z2*Ty_y2*Tz_x2; } else{ tempIndex=tid*9; jacobianMatrices[tempIndex++]=1.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex++]=1.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex++]=0.f; jacobianMatrices[tempIndex]=1.f; jacobianDet[tid]= 1.0f; } } return; } /* *************************************************************** */ __global__ void reg_spline_getJacobianValues2D_kernel(float *jacobianMatrices, float *jacobianDet) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ int2 imageSize = make_int2(c_ReferenceImageDim.x,c_ReferenceImageDim.y); unsigned int tempIndex=tid; const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; // the "nearest previous" node is determined [0,0,0] int2 nodeAnte; float2 gridVoxelSpacing = make_float2(c_ControlPointVoxelSpacing.x,c_ControlPointVoxelSpacing.y); nodeAnte.x = (int)floorf((float)x/gridVoxelSpacing.x); nodeAnte.y = (int)floorf((float)y/gridVoxelSpacing.y); float xBasis[4], yBasis[4], xFirst[4], yFirst[4], relative; relative = fabsf((float)x/gridVoxelSpacing.x-(float)nodeAnte.x); GetFirstBSplineValues(relative, xBasis, xFirst); relative = fabsf((float)y/gridVoxelSpacing.y-(float)nodeAnte.y); GetFirstBSplineValues(relative, yBasis, yFirst); int2 controlPointImageDim = make_int2(c_ControlPointImageDim.x,c_ControlPointImageDim.y); float2 Tx=make_float2(0.f,0.f); float2 Ty=make_float2(0.f,0.f); for(int b=0; b<4; ++b){ int indexXY= ( nodeAnte.y + b) * controlPointImageDim.x + nodeAnte.x; float4 nodeCoefficient = tex1Dfetch(controlPointTexture,indexXY++); float2 tempBasis = make_float2(xFirst[0]*yBasis[b], xBasis[0]*yFirst[b]); Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; nodeCoefficient = tex1Dfetch(controlPointTexture,indexXY++); tempBasis = make_float2(xFirst[1]*yBasis[b], xBasis[1]*yFirst[b]); Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; nodeCoefficient = tex1Dfetch(controlPointTexture,indexXY++); tempBasis = make_float2(xFirst[2]*yBasis[b], xBasis[2]*yFirst[b]); Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; nodeCoefficient = tex1Dfetch(controlPointTexture,indexXY); tempBasis = make_float2(xFirst[3]*yBasis[b], xBasis[3]*yFirst[b]); Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; } // The jacobian matrix is reoriented float Tx_x2=c_AffineMatrix0.x*Tx.x + c_AffineMatrix0.y*Ty.x; float Tx_y2=c_AffineMatrix0.x*Tx.y + c_AffineMatrix0.y*Ty.y; float Ty_x2=c_AffineMatrix1.x*Tx.x + c_AffineMatrix1.y*Ty.x; float Ty_y2=c_AffineMatrix1.x*Tx.y + c_AffineMatrix1.y*Ty.y; // The Jacobian matrix is stored tempIndex=tid*4; jacobianMatrices[tempIndex++]=Tx_x2; jacobianMatrices[tempIndex++]=Tx_y2; jacobianMatrices[tempIndex++]=Ty_x2; jacobianMatrices[tempIndex] = Ty_y2; // The Jacobian determinant is computed and stored jacobianDet[tid]= Tx_x2 * Ty_y2 - Tx_y2 * Ty_x2; } } /* *************************************************************** */ __global__ void reg_spline_getJacobianValues3D_kernel(float *jacobianMatrices, float *jacobianDet) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ int3 imageSize = c_ReferenceImageDim; unsigned int tempIndex=tid; const int z = tempIndex/(imageSize.x*imageSize.y); tempIndex -= z*imageSize.x*imageSize.y; const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; // the "nearest previous" node is determined [0,0,0] int3 nodeAnte; float3 gridVoxelSpacing = c_ControlPointVoxelSpacing; nodeAnte.x = (int)floorf((float)x/gridVoxelSpacing.x); nodeAnte.y = (int)floorf((float)y/gridVoxelSpacing.y); nodeAnte.z = (int)floorf((float)z/gridVoxelSpacing.z); extern __shared__ float yFirst[]; float *zFirst=&yFirst[4*blockDim.x*blockDim.y*blockDim.z]; float xBasis[4], yBasis[4], zBasis[4], xFirst[4], relative; const int shareMemIndex = 4*threadIdx.x; relative = fabsf((float)x/gridVoxelSpacing.x-(float)nodeAnte.x); GetFirstBSplineValues(relative, xBasis, xFirst); relative = fabsf((float)y/gridVoxelSpacing.y-(float)nodeAnte.y); GetFirstBSplineValues(relative, yBasis, &yFirst[shareMemIndex]); relative = fabsf((float)z/gridVoxelSpacing.z-(float)nodeAnte.z); GetFirstBSplineValues(relative, zBasis, &zFirst[shareMemIndex]); int3 controlPointImageDim = c_ControlPointImageDim; float3 Tx=make_float3(0.f,0.f,0.f); float3 Ty=make_float3(0.f,0.f,0.f); float3 Tz=make_float3(0.f,0.f,0.f); for(int c=0; c<4; ++c){ for(int b=0; b<4; ++b){ int indexXYZ= ( (nodeAnte.z + c) * controlPointImageDim.y + nodeAnte.y + b) * controlPointImageDim.x + nodeAnte.x; float3 tempBasisXY=make_float3(yBasis[b]*zBasis[c], yFirst[shareMemIndex+b]*zBasis[c], yBasis[b]*zFirst[shareMemIndex+c]); float4 nodeCoefficient = tex1Dfetch(controlPointTexture,indexXYZ++); float3 tempBasis = make_float3(xFirst[0],xBasis[0],xBasis[0])*tempBasisXY; Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; Tz = Tz + nodeCoefficient.z * tempBasis; nodeCoefficient = tex1Dfetch(controlPointTexture,indexXYZ++); tempBasis = make_float3(xFirst[1],xBasis[1],xBasis[1])*tempBasisXY; Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; Tz = Tz + nodeCoefficient.z * tempBasis; nodeCoefficient = tex1Dfetch(controlPointTexture,indexXYZ++); tempBasis = make_float3(xFirst[2],xBasis[2],xBasis[2])*tempBasisXY; Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; Tz = Tz + nodeCoefficient.z * tempBasis; nodeCoefficient = tex1Dfetch(controlPointTexture,indexXYZ); tempBasis = make_float3(xFirst[3],xBasis[3],xBasis[3])*tempBasisXY; Tx = Tx + nodeCoefficient.x * tempBasis; Ty = Ty + nodeCoefficient.y * tempBasis; Tz = Tz + nodeCoefficient.z * tempBasis; } } // The jacobian matrix is reoriented float Tx_x2=c_AffineMatrix0.x*Tx.x + c_AffineMatrix0.y*Ty.x + c_AffineMatrix0.z*Tz.x; float Tx_y2=c_AffineMatrix0.x*Tx.y + c_AffineMatrix0.y*Ty.y + c_AffineMatrix0.z*Tz.y; float Tx_z2=c_AffineMatrix0.x*Tx.z + c_AffineMatrix0.y*Ty.z + c_AffineMatrix0.z*Tz.z; float Ty_x2=c_AffineMatrix1.x*Tx.x + c_AffineMatrix1.y*Ty.x + c_AffineMatrix1.z*Tz.x; float Ty_y2=c_AffineMatrix1.x*Tx.y + c_AffineMatrix1.y*Ty.y + c_AffineMatrix1.z*Tz.y; float Ty_z2=c_AffineMatrix1.x*Tx.z + c_AffineMatrix1.y*Ty.z + c_AffineMatrix1.z*Tz.z; float Tz_x2=c_AffineMatrix2.x*Tx.x + c_AffineMatrix2.y*Ty.x + c_AffineMatrix2.z*Tz.x; float Tz_y2=c_AffineMatrix2.x*Tx.y + c_AffineMatrix2.y*Ty.y + c_AffineMatrix2.z*Tz.y; float Tz_z2=c_AffineMatrix2.x*Tx.z + c_AffineMatrix2.y*Ty.z + c_AffineMatrix2.z*Tz.z; // The Jacobian matrix is stored tempIndex=tid*9; jacobianMatrices[tempIndex++]=Tx_x2; jacobianMatrices[tempIndex++]=Tx_y2; jacobianMatrices[tempIndex++]=Tx_z2; jacobianMatrices[tempIndex++]=Ty_x2; jacobianMatrices[tempIndex++]=Ty_y2; jacobianMatrices[tempIndex++]=Ty_z2; jacobianMatrices[tempIndex++]=Tz_x2; jacobianMatrices[tempIndex++]=Tz_y2; jacobianMatrices[tempIndex] = Tz_z2; // The Jacobian determinant is computed and stored jacobianDet[tid]= Tx_x2*Ty_y2*Tz_z2 + Tx_y2*Ty_z2*Tz_x2 + Tx_z2*Ty_x2*Tz_y2 - Tx_x2*Ty_z2*Tz_y2 - Tx_y2*Ty_x2*Tz_z2 - Tx_z2*Ty_y2*Tz_x2; } } /* *************************************************************** */ __global__ void reg_spline_logSquaredValues_kernel(float *det) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ float val = logf(det[tid]); det[tid]=val*val; } } /* *************************************************************** */ __device__ void getJacobianGradientValues2D(float *jacobianMatrix, float detJac, float basisX, float basisY, float2 *jacobianConstraint) { jacobianConstraint->x += detJac * ( basisX * jacobianMatrix[3] - basisY * jacobianMatrix[2] ); jacobianConstraint->y += detJac * ( basisY * jacobianMatrix[0] - basisX * jacobianMatrix[1] ); } /* *************************************************************** */ __device__ void getJacobianGradientValues3D(float *jacobianMatrix, float detJac, float basisX, float basisY, float basisZ, float3 *jacobianConstraint) { jacobianConstraint->x += detJac * ( basisX * (jacobianMatrix[4]*jacobianMatrix[8] - jacobianMatrix[5]*jacobianMatrix[7]) + basisY * (jacobianMatrix[5]*jacobianMatrix[6] - jacobianMatrix[3]*jacobianMatrix[8]) + basisZ * (jacobianMatrix[3]*jacobianMatrix[7] - jacobianMatrix[4]*jacobianMatrix[6]) ); jacobianConstraint->y += detJac * ( basisX * (jacobianMatrix[2]*jacobianMatrix[7] - jacobianMatrix[1]*jacobianMatrix[8]) + basisY * (jacobianMatrix[0]*jacobianMatrix[8] - jacobianMatrix[2]*jacobianMatrix[6]) + basisZ * (jacobianMatrix[1]*jacobianMatrix[6] - jacobianMatrix[0]*jacobianMatrix[7]) ); jacobianConstraint->z += detJac * ( basisX * (jacobianMatrix[1]*jacobianMatrix[5] - jacobianMatrix[2]*jacobianMatrix[4]) + basisY * (jacobianMatrix[2]*jacobianMatrix[3] - jacobianMatrix[0]*jacobianMatrix[5]) + basisZ * (jacobianMatrix[0]*jacobianMatrix[4] - jacobianMatrix[1]*jacobianMatrix[3]) ); } /* *************************************************************** */ __global__ void reg_spline_computeApproxJacGradient2D_kernel(float4 *gradient) { __shared__ float xbasis[9]; __shared__ float ybasis[9]; if(threadIdx.x<9) GetFirstDerivativeBasisValues2D(threadIdx.x, xbasis, ybasis); __syncthreads(); const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; unsigned int tempIndex=tid; const int y =(int)(tempIndex/(gridSize.x)); const int x = tempIndex - y*(gridSize.x); float2 jacobianGradient=make_float2(0.f,0.f); tempIndex=8; for(int pixelY=(int)(y-1); pixelY<(int)(y+2); ++pixelY){ if(pixelY>0 && pixelY<gridSize.y-1){ int jacIndex = pixelY*gridSize.x+x-1; for(int pixelX=(int)(x-1); pixelX<(int)(x+2); ++pixelX){ if(pixelX>0 && pixelX<gridSize.x-1){ float detJac = tex1Dfetch(jacobianDeterminantTexture,jacIndex); if(detJac>0.f){ detJac = 2.f*logf(detJac) / detJac; float jacobianMatrix[4]; jacobianMatrix[0] = tex1Dfetch(jacobianMatricesTexture,jacIndex*4); jacobianMatrix[1] = tex1Dfetch(jacobianMatricesTexture,jacIndex*4+1); jacobianMatrix[2] = tex1Dfetch(jacobianMatricesTexture,jacIndex*4+2); jacobianMatrix[3] = tex1Dfetch(jacobianMatricesTexture,jacIndex*4+3); getJacobianGradientValues2D(jacobianMatrix, detJac, xbasis[tempIndex], ybasis[tempIndex], &jacobianGradient); } } jacIndex++; tempIndex--; } } else tempIndex-=3; } gradient[tid] = gradient[tid] + make_float4(c_Weight3.x * (c_AffineMatrix0.x * jacobianGradient.x + c_AffineMatrix0.y * jacobianGradient.y), c_Weight3.y * (c_AffineMatrix1.x * jacobianGradient.x + c_AffineMatrix1.y * jacobianGradient.y), 0.f, 0.f); } } /* *************************************************************** */ __global__ void reg_spline_computeApproxJacGradient3D_kernel(float4 *gradient) { __shared__ float xbasis[27]; __shared__ float ybasis[27]; __shared__ float zbasis[27]; if(threadIdx.x<27) GetFirstDerivativeBasisValues3D(threadIdx.x, xbasis, ybasis, zbasis); __syncthreads(); const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; unsigned int tempIndex=tid; const int z =(int)(tempIndex/(gridSize.x*gridSize.y)); tempIndex -= z*(gridSize.x)*(gridSize.y); const int y =(int)(tempIndex/(gridSize.x)); const int x = tempIndex - y*(gridSize.x); float3 jacobianGradient=make_float3(0.f,0.f,0.f); tempIndex=26; for(int pixelZ=(int)(z-1); pixelZ<(int)(z+2); ++pixelZ){ if(pixelZ>0 && pixelZ<gridSize.z-1){ for(int pixelY=(int)(y-1); pixelY<(int)(y+2); ++pixelY){ if(pixelY>0 && pixelY<gridSize.y-1){ int jacIndex = (pixelZ*gridSize.y+pixelY)*gridSize.x+x-1; for(int pixelX=(int)(x-1); pixelX<(int)(x+2); ++pixelX){ if(pixelX>0 && pixelX<gridSize.x-1){ float detJac = tex1Dfetch(jacobianDeterminantTexture,jacIndex); if(detJac>0.f){ detJac = 2.f*logf(detJac) / detJac; float jacobianMatrix[9]; jacobianMatrix[0] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9); jacobianMatrix[1] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+1); jacobianMatrix[2] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+2); jacobianMatrix[3] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+3); jacobianMatrix[4] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+4); jacobianMatrix[5] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+5); jacobianMatrix[6] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+6); jacobianMatrix[7] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+7); jacobianMatrix[8] = tex1Dfetch(jacobianMatricesTexture,jacIndex*9+8); getJacobianGradientValues3D(jacobianMatrix, detJac, xbasis[tempIndex], ybasis[tempIndex], zbasis[tempIndex], &jacobianGradient); } } jacIndex++; tempIndex--; } } else tempIndex-=3; } } else tempIndex-=9; } gradient[tid] = gradient[tid] + make_float4(c_Weight3.x * (c_AffineMatrix0.x * jacobianGradient.x + c_AffineMatrix0.y * jacobianGradient.y + c_AffineMatrix0.z * jacobianGradient.z), c_Weight3.y * (c_AffineMatrix1.x * jacobianGradient.x + c_AffineMatrix1.y * jacobianGradient.y + c_AffineMatrix1.z * jacobianGradient.z), c_Weight3.z * (c_AffineMatrix2.x * jacobianGradient.x + c_AffineMatrix2.y * jacobianGradient.y + c_AffineMatrix2.z * jacobianGradient.z), 0.f); } } /* *************************************************************** */ __global__ void reg_spline_computeJacGradient2D_kernel(float4 *gradient) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; int tempIndex=tid; const int y = tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; float2 jacobianGradient=make_float2(0.f,0.f); float3 spacingVoxel = c_ControlPointVoxelSpacing; for(int pixelY=(int)ceilf((y-3)*spacingVoxel.y); pixelY<=(int)ceilf((y+1)*spacingVoxel.y); ++pixelY){ if(pixelY>-1 && pixelY<c_ReferenceImageDim.y){ int yPre = (int)((float)pixelY/spacingVoxel.y); float basis = (float)pixelY/spacingVoxel.y - (float)yPre; float yBasis, yFirst; getBSplineBasisValue(basis,y-yPre,&yBasis,&yFirst); for(int pixelX=(int)ceilf((x-3)*spacingVoxel.x); pixelX<=(int)ceilf((x+1)*spacingVoxel.x); ++pixelX){ if(pixelX>-1 && pixelX<c_ReferenceImageDim.x && (yFirst!=0.f || yBasis!=0.f)){ int xPre = (int)((float)pixelX/spacingVoxel.x); basis = (float)pixelX/spacingVoxel.x - (float)xPre; float xBasis, xFirst; getBSplineBasisValue(basis,x-xPre,&xBasis,&xFirst); int jacIndex = pixelY*c_ReferenceImageDim.x + pixelX; float detJac = tex1Dfetch(jacobianDeterminantTexture,jacIndex); if(detJac>0.f && (xFirst!=0.f || xBasis!=0.f)){ detJac = 2.f*logf(detJac) / detJac; float jacobianMatrix[4]; jacIndex *= 4; jacobianMatrix[0] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[1] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[2] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[3] = tex1Dfetch(jacobianMatricesTexture,jacIndex); float2 basisValues = make_float2( xFirst*yBasis, xBasis*yFirst); getJacobianGradientValues2D(jacobianMatrix, detJac, basisValues.x, basisValues.y, &jacobianGradient); } } } } } gradient[tid] = gradient[tid] + make_float4( c_Weight3.x * (c_AffineMatrix0.x * jacobianGradient.x + c_AffineMatrix0.y * jacobianGradient.y), c_Weight3.y * (c_AffineMatrix1.x * jacobianGradient.x + c_AffineMatrix1.y * jacobianGradient.y), 0.f, 0.f); } } /* *************************************************************** */ __global__ void reg_spline_computeJacGradient3D_kernel(float4 *gradient) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; int tempIndex=tid; const int z = tempIndex/(gridSize.x*gridSize.y); tempIndex -= z*gridSize.x*gridSize.y; const int y = tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; float3 jacobianGradient=make_float3(0.f,0.f,0.f); float3 spacingVoxel = c_ControlPointVoxelSpacing; for(int pixelZ=(int)ceilf((z-3)*spacingVoxel.z); pixelZ<=(int)ceilf((z+1)*spacingVoxel.z); ++pixelZ){ if(pixelZ>-1 && pixelZ<c_ReferenceImageDim.z){ int zPre = (int)((float)pixelZ/spacingVoxel.z); float basis = (float)pixelZ/spacingVoxel.z - (float)zPre; float zBasis, zFirst; getBSplineBasisValue(basis,z-zPre,&zBasis,&zFirst); for(int pixelY=(int)ceilf((y-3)*spacingVoxel.y); pixelY<=(int)ceilf((y+1)*spacingVoxel.y); ++pixelY){ if(pixelY>-1 && pixelY<c_ReferenceImageDim.y && (zFirst!=0.f || zBasis!=0.f)){ int yPre = (int)((float)pixelY/spacingVoxel.y); basis = (float)pixelY/spacingVoxel.y - (float)yPre; float yBasis, yFirst; getBSplineBasisValue(basis,y-yPre,&yBasis,&yFirst); for(int pixelX=(int)ceilf((x-3)*spacingVoxel.x); pixelX<=(int)ceilf((x+1)*spacingVoxel.x); ++pixelX){ if(pixelX>-1 && pixelX<c_ReferenceImageDim.x && (yFirst!=0.f || yBasis!=0.f)){ int xPre = (int)((float)pixelX/spacingVoxel.x); basis = (float)pixelX/spacingVoxel.x - (float)xPre; float xBasis, xFirst; getBSplineBasisValue(basis,x-xPre,&xBasis,&xFirst); int jacIndex = (pixelZ*c_ReferenceImageDim.y+pixelY)*c_ReferenceImageDim.x + pixelX; float detJac = tex1Dfetch(jacobianDeterminantTexture,jacIndex); if(detJac>0.f && (xFirst!=0.f || xBasis!=0.f)){ detJac = 2.f*logf(detJac) / detJac; float jacobianMatrix[9]; jacIndex *= 9; jacobianMatrix[0] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[1] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[2] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[3] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[4] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[5] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[6] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[7] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[8] = tex1Dfetch(jacobianMatricesTexture,jacIndex); float3 basisValues = make_float3( xFirst*yBasis*zBasis, xBasis*yFirst*zBasis, xBasis*yBasis*zFirst); getJacobianGradientValues3D(jacobianMatrix, detJac, basisValues.x, basisValues.y, basisValues.z, &jacobianGradient); } } } } } } } gradient[tid] = gradient[tid] + make_float4( c_Weight3.x * (c_AffineMatrix0.x * jacobianGradient.x + c_AffineMatrix0.y * jacobianGradient.y + c_AffineMatrix0.z * jacobianGradient.z), c_Weight3.y * (c_AffineMatrix1.x * jacobianGradient.x + c_AffineMatrix1.y * jacobianGradient.y + c_AffineMatrix1.z * jacobianGradient.z), c_Weight3.z * (c_AffineMatrix2.x * jacobianGradient.x + c_AffineMatrix2.y * jacobianGradient.y + c_AffineMatrix2.z * jacobianGradient.z), 0.f); } } /* *************************************************************** */ __global__ void reg_spline_approxCorrectFolding3D_kernel(float4 *controlPointGrid_d) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; unsigned int tempIndex=tid; const int z = tempIndex/(gridSize.x*gridSize.y); tempIndex -= z*gridSize.x*gridSize.y; const int y = tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; float3 foldingCorrection=make_float3(0.f,0.f,0.f); for(int pixelZ=(int)(z-1); pixelZ<(int)(z+2); ++pixelZ){ if(pixelZ>0 && pixelZ<gridSize.z-1){ for(int pixelY=(int)(y-1); pixelY<(int)(y+2); ++pixelY){ if(pixelY>0 && pixelY<gridSize.y-1){ for(int pixelX=(int)(x-1); pixelX<(int)(x+2); ++pixelX){ if(pixelX>0 && pixelX<gridSize.x-1){ int jacIndex = (pixelZ*gridSize.y+pixelY)*gridSize.x+pixelX; float detJac = tex1Dfetch(jacobianDeterminantTexture,jacIndex); if(detJac<=0.f){ float jacobianMatrix[9]; jacIndex*=9; jacobianMatrix[0] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[1] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[2] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[3] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[4] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[5] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[6] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[7] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[8] = tex1Dfetch(jacobianMatricesTexture,jacIndex); float xBasis, xFirst, yBasis, yFirst, zBasis, zFirst; getBSplineBasisValue(0.f,x-pixelX+1,&xBasis,&xFirst); getBSplineBasisValue(0.f,y-pixelY+1,&yBasis,&yFirst); getBSplineBasisValue(0.f,z-pixelZ+1,&zBasis,&zFirst); float3 basisValue = make_float3( xFirst*yBasis*zBasis, xBasis*yFirst*zBasis, xBasis*yBasis*zFirst); getJacobianGradientValues3D(jacobianMatrix, 1.f, basisValue.x, basisValue.y, basisValue.z, &foldingCorrection); } } } } } } } if(foldingCorrection.x!=0.f && foldingCorrection.y!=0.f && foldingCorrection.z!=0.f){ float3 gradient = make_float3( c_AffineMatrix0.x * foldingCorrection.x + c_AffineMatrix0.y * foldingCorrection.y + c_AffineMatrix0.z * foldingCorrection.z, c_AffineMatrix1.x * foldingCorrection.x + c_AffineMatrix1.y * foldingCorrection.y + c_AffineMatrix1.z * foldingCorrection.z, c_AffineMatrix2.x * foldingCorrection.x + c_AffineMatrix2.y * foldingCorrection.y + c_AffineMatrix2.z * foldingCorrection.z); float norm = 5.f * sqrtf(gradient.x*gradient.x + gradient.y*gradient.y + gradient.z*gradient.z); controlPointGrid_d[tid] = controlPointGrid_d[tid] + make_float4(gradient.x*c_ControlPointSpacing.x/norm, gradient.y*c_ControlPointSpacing.y/norm, gradient.z*c_ControlPointSpacing.z/norm, 0.f); } } } /* *************************************************************** */ __global__ void reg_spline_correctFolding3D_kernel(float4 *controlPointGrid_d) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ControlPointNumber){ int3 gridSize = c_ControlPointImageDim; unsigned int tempIndex=tid; const int z = tempIndex/(gridSize.x*gridSize.y); tempIndex -= z*gridSize.x*gridSize.y; const int y = tempIndex/gridSize.x; const int x = tempIndex - y*gridSize.x; float3 spacingVoxel = c_ControlPointVoxelSpacing; float3 foldingCorrection=make_float3(0.f,0.f,0.f); for(int pixelZ=(int)ceilf((z-3)*spacingVoxel.z); pixelZ<(int)ceilf((z+1)*spacingVoxel.z); ++pixelZ){ if(pixelZ>-1 && pixelZ<c_ReferenceImageDim.z){ for(int pixelY=(int)ceilf((y-3)*spacingVoxel.y); pixelY<(int)ceilf((y+1)*spacingVoxel.y); ++pixelY){ if(pixelY>-1 && pixelY<c_ReferenceImageDim.y){ for(int pixelX=(int)ceilf((x-3)*spacingVoxel.x); pixelX<(int)ceilf((x+1)*spacingVoxel.x); ++pixelX){ if(pixelX>-1 && pixelX<c_ReferenceImageDim.x){ int jacIndex = (pixelZ*c_ReferenceImageDim.y+pixelY)*c_ReferenceImageDim.x+pixelX; float detJac = tex1Dfetch(jacobianDeterminantTexture,jacIndex); if(detJac<=0.f){ float jacobianMatrix[9]; jacIndex*=9; jacobianMatrix[0] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[1] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[2] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[3] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[4] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[5] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[6] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[7] = tex1Dfetch(jacobianMatricesTexture,jacIndex++); jacobianMatrix[8] = tex1Dfetch(jacobianMatricesTexture,jacIndex); float xBasis, xFirst, yBasis, yFirst, zBasis, zFirst; int pre=(int)((float)pixelX/spacingVoxel.x); float basis=(float)pixelX/spacingVoxel.x-(float)pre; getBSplineBasisValue(basis,x-pre,&xBasis,&xFirst); pre=(int)((float)pixelY/spacingVoxel.y); basis=(float)pixelY/spacingVoxel.y-(float)pre; getBSplineBasisValue(basis,y-pre,&yBasis,&yFirst); pre=(int)((float)pixelZ/spacingVoxel.z); basis=(float)pixelZ/spacingVoxel.z-(float)pre; getBSplineBasisValue(basis,z-pre,&zBasis,&zFirst); float3 basisValue = make_float3( xFirst*yBasis*zBasis, xBasis*yFirst*zBasis, xBasis*yBasis*zFirst); getJacobianGradientValues3D(jacobianMatrix, 1.f, basisValue.x, basisValue.y, basisValue.z, &foldingCorrection); } } } } } } } if(foldingCorrection.x!=0.f && foldingCorrection.y!=0.f && foldingCorrection.z!=0.f){ float3 gradient = make_float3( c_AffineMatrix0.x * foldingCorrection.x + c_AffineMatrix0.y * foldingCorrection.y + c_AffineMatrix0.z * foldingCorrection.z, c_AffineMatrix1.x * foldingCorrection.x + c_AffineMatrix1.y * foldingCorrection.y + c_AffineMatrix1.z * foldingCorrection.z, c_AffineMatrix2.x * foldingCorrection.x + c_AffineMatrix2.y * foldingCorrection.y + c_AffineMatrix2.z * foldingCorrection.z); float norm = 5.f * sqrtf(gradient.x*gradient.x + gradient.y*gradient.y + gradient.z*gradient.z); controlPointGrid_d[tid] = controlPointGrid_d[tid] + make_float4(gradient.x*c_ControlPointSpacing.x/norm, gradient.y*c_ControlPointSpacing.y/norm, gradient.z*c_ControlPointSpacing.z/norm, 0.f); } } } /* *************************************************************** */ __global__ void reg_getDeformationFromDisplacement3D_kernel(float4 *imageArray_d) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ int3 imageSize = c_ReferenceImageDim; unsigned int tempIndex=tid; const int z = tempIndex/(imageSize.x*imageSize.y); tempIndex -= z*imageSize.x*imageSize.y; const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; float4 initialPosition; initialPosition.x=x*c_AffineMatrix0b.x + y*c_AffineMatrix0b.y + z*c_AffineMatrix0b.z + c_AffineMatrix0b.w; initialPosition.y=x*c_AffineMatrix1b.x + y*c_AffineMatrix1b.y + z*c_AffineMatrix1b.z + c_AffineMatrix1b.w; initialPosition.z=x*c_AffineMatrix2b.x + y*c_AffineMatrix2b.y + z*c_AffineMatrix2b.z + c_AffineMatrix2b.w; initialPosition.w=0.f; imageArray_d[tid] = imageArray_d[tid] + initialPosition; } } /* *************************************************************** */ __global__ void reg_getDisplacementFromDeformation3D_kernel(float4 *imageArray_d) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ int3 imageSize = c_ReferenceImageDim; unsigned int tempIndex=tid; const int z = tempIndex/(imageSize.x*imageSize.y); tempIndex -= z*imageSize.x*imageSize.y; const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; float4 initialPosition; initialPosition.x=x*c_AffineMatrix0b.x + y*c_AffineMatrix0b.y + z*c_AffineMatrix0b.z + c_AffineMatrix0b.w; initialPosition.y=x*c_AffineMatrix1b.x + y*c_AffineMatrix1b.y + z*c_AffineMatrix1b.z + c_AffineMatrix1b.w; initialPosition.z=x*c_AffineMatrix2b.x + y*c_AffineMatrix2b.y + z*c_AffineMatrix2b.z + c_AffineMatrix2b.w; initialPosition.w=0.f; imageArray_d[tid] = imageArray_d[tid] - initialPosition; } } /* *************************************************************** */ __global__ void reg_defField_compose2D_kernel(float4 *outDef) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ // Extract the original voxel position float4 position=outDef[tid]; // Conversion from real position to voxel coordinate float4 voxelPosition; voxelPosition.x= position.x*c_AffineMatrix0b.x + position.y*c_AffineMatrix0b.y + c_AffineMatrix0b.w; voxelPosition.y= position.x*c_AffineMatrix1b.x + position.y*c_AffineMatrix1b.y + c_AffineMatrix1b.w; voxelPosition.z=0.f; voxelPosition.w=0.f; // linear interpolation int2 ante=make_int2(floorf(voxelPosition.x), floorf(voxelPosition.y)); float relX[2], relY[2]; relX[1]=voxelPosition.x-(float)ante.x;relX[0]=1.f-relX[1]; relY[1]=voxelPosition.y-(float)ante.y;relY[0]=1.f-relY[1]; position=make_float4(0.f,0.f,0.f,0.f); for(int b=0;b<2;++b){ for(int a=0;a<2;++a){ unsigned int index=(ante.y+b)*c_ReferenceImageDim.x+ante.x+a; float4 deformation; if((ante.x+a)>-1 && (ante.y+b)>-1 && (ante.x+a)<c_ReferenceImageDim.x && (ante.y+b)<c_ReferenceImageDim.y){ deformation=tex1Dfetch(voxelDeformationTexture,index); } else{ deformation = get_SlidedValues_gpu((ante.x+a), (ante.y+b)); } float basis=relX[a]*relY[b]; position=position+basis*deformation; } } outDef[tid]=position; } } /* *************************************************************** */ __global__ void reg_defField_compose3D_kernel(float4 *outDef) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ // Extract the original voxel position float4 position=outDef[tid]; // Conversion from real position to voxel coordinate float4 voxelPosition; voxelPosition.x= position.x*c_AffineMatrix0b.x + position.y*c_AffineMatrix0b.y + position.z*c_AffineMatrix0b.z + c_AffineMatrix0b.w; voxelPosition.y= position.x*c_AffineMatrix1b.x + position.y*c_AffineMatrix1b.y + position.z*c_AffineMatrix1b.z + c_AffineMatrix1b.w; voxelPosition.z= position.x*c_AffineMatrix2b.x + position.y*c_AffineMatrix2b.y + position.z*c_AffineMatrix2b.z + c_AffineMatrix2b.w; voxelPosition.w=0.f; // linear interpolation int3 ante=make_int3(floorf(voxelPosition.x), floorf(voxelPosition.y), floorf(voxelPosition.z)); float relX[2], relY[2], relZ[2]; relX[1]=voxelPosition.x-(float)ante.x;relX[0]=1.f-relX[1]; relY[1]=voxelPosition.y-(float)ante.y;relY[0]=1.f-relY[1]; relZ[1]=voxelPosition.z-(float)ante.z;relZ[0]=1.f-relZ[1]; position=make_float4(0.f,0.f,0.f,0.f); for(int c=0;c<2;++c){ for(int b=0;b<2;++b){ for(int a=0;a<2;++a){ unsigned int index=((ante.z+c)*c_ReferenceImageDim.y+ante.y+b)*c_ReferenceImageDim.x+ante.x+a; float4 deformation; if((ante.x+a)>-1 && (ante.y+b)>-1 && (ante.z+c)>-1 && (ante.x+a)<c_ReferenceImageDim.x && (ante.y+b)<c_ReferenceImageDim.y && (ante.z+c)<c_ReferenceImageDim.z){ deformation=tex1Dfetch(voxelDeformationTexture,index); } else{ deformation = get_SlidedValues_gpu((ante.x+a), (ante.y+b), (ante.z+c)); } float basis=relX[a]*relY[b]*relZ[c]; position=position+basis*deformation; } } } outDef[tid]=position; } } /* *************************************************************** */ __global__ void reg_defField_getJacobianMatrix3D_kernel(float *jacobianMatrices) { const unsigned int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_VoxelNumber){ int3 imageSize = c_ReferenceImageDim; unsigned int tempIndex=tid; const int z = tempIndex/(imageSize.x*imageSize.y); tempIndex -= z*imageSize.x*imageSize.y; const int y = tempIndex/imageSize.x; const int x = tempIndex - y*imageSize.x; if(x==imageSize.x-1 || y==imageSize.y-1 || z==imageSize.z-1 ){ int index=tid*9; jacobianMatrices[index++]=1.0; jacobianMatrices[index++]=0.0; jacobianMatrices[index++]=0.0; jacobianMatrices[index++]=0.0; jacobianMatrices[index++]=1.0; jacobianMatrices[index++]=0.0; jacobianMatrices[index++]=0.0; jacobianMatrices[index++]=0.0; jacobianMatrices[index]=1.0; return; } float matrix[9]; int index=(z*imageSize.y+y)*imageSize.x+x; float4 deformation = tex1Dfetch(voxelDeformationTexture,index); matrix[0] = deformation.x * -1.f; matrix[1] = deformation.x * -1.f; matrix[2] = deformation.x * -1.f; matrix[3] = deformation.y * -1.f; matrix[4] = deformation.y * -1.f; matrix[5] = deformation.y * -1.f; matrix[6] = deformation.z * -1.f; matrix[7] = deformation.z * -1.f; matrix[8] = deformation.z * -1.f; deformation = tex1Dfetch(voxelDeformationTexture,index+1); matrix[0] += deformation.x * 1.f; matrix[3] += deformation.y * 1.f; matrix[6] += deformation.z * 1.f; index=(z*imageSize.y+y+1)*imageSize.x+x; deformation = tex1Dfetch(voxelDeformationTexture,index); matrix[1] += deformation.x * 1.f; matrix[4] += deformation.y * 1.f; matrix[7] += deformation.z * 1.f; index=((z+1)*imageSize.y+y)*imageSize.x+x; deformation = tex1Dfetch(voxelDeformationTexture,index); matrix[2] += deformation.x * 1.f; matrix[5] += deformation.y * 1.f; matrix[8] += deformation.z * 1.f; index=tid*9; jacobianMatrices[index++]=c_AffineMatrix0.x*matrix[0] + c_AffineMatrix0.y*matrix[3] + c_AffineMatrix0.z*matrix[6]; jacobianMatrices[index++]=c_AffineMatrix0.x*matrix[1] + c_AffineMatrix0.y*matrix[4] + c_AffineMatrix0.z*matrix[7]; jacobianMatrices[index++]=c_AffineMatrix0.x*matrix[2] + c_AffineMatrix0.y*matrix[5] + c_AffineMatrix0.z*matrix[8]; jacobianMatrices[index++]=c_AffineMatrix1.x*matrix[0] + c_AffineMatrix1.y*matrix[3] + c_AffineMatrix1.z*matrix[6]; jacobianMatrices[index++]=c_AffineMatrix1.x*matrix[1] + c_AffineMatrix1.y*matrix[4] + c_AffineMatrix1.z*matrix[7]; jacobianMatrices[index++]=c_AffineMatrix1.x*matrix[2] + c_AffineMatrix1.y*matrix[5] + c_AffineMatrix1.z*matrix[8]; jacobianMatrices[index++]=c_AffineMatrix2.x*matrix[0] + c_AffineMatrix2.y*matrix[3] + c_AffineMatrix2.z*matrix[6]; jacobianMatrices[index++]=c_AffineMatrix2.x*matrix[1] + c_AffineMatrix2.y*matrix[4] + c_AffineMatrix2.z*matrix[7]; jacobianMatrices[index] = c_AffineMatrix2.x*matrix[2] + c_AffineMatrix2.y*matrix[5] + c_AffineMatrix2.z*matrix[8]; } } /* *************************************************************** */ /* *************************************************************** */ /* *************************************************************** */ #endif
the_stack
DnnHandle init_cudnn(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, HighLevelRuntime *runtime) { assert(regions.size() == 0); assert(task->arglen == sizeof(size_t)); size_t workSpaceSize = *(const size_t*) task->args; DnnHandle handle; handle.workSpaceSize = workSpaceSize; printf("workSpaceSize = %zu\n", workSpaceSize); #ifndef DISABLE_COMPUTATION checkCUDA(cublasCreate(&handle.blas)); checkCUDNN(cudnnCreate(&handle.dnn)); #endif checkCUDA(cudaMalloc(&handle.workSpace, workSpaceSize)); return handle; } const SharedVariable SharedVariable::NO_VARIABLE = SharedVariable(); RnnOp::RnnOp(Tensor input, ParallelConfig pc, SharedVariable _params) : paraConfig(pc), params(_params) { inputs[0] = input; } RnnOp::RnnOp(Tensor t1, Tensor t2, Tensor t3, ParallelConfig pc, SharedVariable _params) : paraConfig(pc), params(_params) { inputs[0] = t1; inputs[1] = t2; inputs[2] = t3; } RnnOp::RnnOp(int n, Tensor *_inputs) { for (int i = 0; i < n; i++) { inputs[i] = _inputs[i]; } } RnnModel::RnnModel(int batch_size, int numLayers, int seqLength, int hidden_size, int embed_size, int vocab_size, int num_parts, int num_nodes, int num_gpus_per_node, GlobalConfig global, Context ctx, Runtime *runtime) { config.lg_ctx = ctx; config.lg_hlr = runtime; config.batchSize = batch_size; config.hiddenSize = hidden_size; config.embedSize = embed_size; config.vocabSize = vocab_size; config.numLayers = numLayers; config.seqLength = seqLength; config.numParts = num_parts; config.numNodes = num_nodes; config.workersPerNode = num_gpus_per_node; config.field_space = runtime->create_field_space(ctx); { FieldAllocator allocator = runtime->create_field_allocator(ctx, config.field_space); allocator.allocate_field(sizeof(float), FID_DATA); } Rect<1> part_rect(Point<1>(0), Point<1>(num_parts-1)); part_is = runtime->create_index_space(ctx, part_rect); assert(seqLength <= MAX_SEQ_LENGTH); assert(numLayers <= MAX_NUM_LAYERS); int nodes_per_layer = seqLength / LSTM_PER_NODE_LENGTH; // Create srcs/dsts tensors { Rect<2> word_rect(Point<2>(0, 0), Point<2>(batch_size-1, LSTM_PER_NODE_LENGTH-1)); IndexSpaceT<2> word_is = runtime->create_index_space(ctx, word_rect); int extent_n = batch_size / num_parts; Rect<2, coord_t> extent(Point<2>(0, 0), Point<2>(extent_n-1, LSTM_PER_NODE_LENGTH-1)); Transform<2, 1, coord_t> trans; trans[0][0] = extent_n; trans[1][0] = 0; IndexPartition word_ip = runtime->create_partition_by_restriction(ctx, word_is, part_is, trans, extent); assert(runtime->is_index_partition_disjoint(ctx, word_ip)); assert(runtime->is_index_partition_complete(ctx, word_ip)); assert(seqLength % LSTM_PER_NODE_LENGTH == 0); for (int i = 0; i < nodes_per_layer; i++) { srcs[i].numDim = 2; srcs[i].adim[0] = batch_size; srcs[i].adim[1] = LSTM_PER_NODE_LENGTH; srcs[i].pdim[0] = extent_n; srcs[i].pdim[1] = LSTM_PER_NODE_LENGTH; srcs[i].region = runtime->create_logical_region(ctx, word_is, config.field_space); srcs[i].partition = runtime->get_logical_partition(ctx, srcs[i].region, word_ip); srcs[i].region_grad = runtime->create_logical_region(ctx, word_is, config.field_space); srcs[i].partition_grad = runtime->get_logical_partition(ctx, srcs[i].region_grad, word_ip); dsts[i] = srcs[i]; dsts[i].region = runtime->create_logical_region(ctx, word_is, config.field_space); dsts[i].partition = runtime->get_logical_partition(ctx, dsts[i].region, word_ip); dsts[i].region_grad = runtime->create_logical_region(ctx, word_is, config.field_space); dsts[i].partition_grad = runtime->get_logical_partition(ctx, dsts[i].region_grad, word_ip); } } // Create zeroed tensors { Rect<2> hx_rect(Point<2>(0, 0), Point<2>(hidden_size-1, batch_size-1)); IndexSpaceT<2> hx_is = runtime->create_index_space(ctx, hx_rect); int extent_c = hidden_size; int extent_n = batch_size / num_parts; Rect<2> hx_ext(Point<2>(0, 0), Point<2>(extent_c-1, extent_n-1)); Transform<2, 1, coord_t> hx_trans; hx_trans[0][0] = 0; hx_trans[1][0] = extent_n; IndexPartition hx_ip = runtime->create_partition_by_restriction(ctx, hx_is, part_is, hx_trans, hx_ext); assert(runtime->is_index_partition_disjoint(ctx, hx_ip)); assert(runtime->is_index_partition_complete(ctx, hx_ip)); for (int i = 0; i < numLayers; i++) { for (int j = 0; j < 2; j++) { Tensor t; t.numDim = 2; t.adim[0] = hidden_size; t.adim[1] = batch_size; t.pdim[0] = extent_c; t.pdim[1] = extent_n; t.region = runtime->create_logical_region(ctx, hx_is, config.field_space); t.partition = runtime->get_logical_partition(ctx, t.region, hx_ip); t.region_grad = runtime->create_logical_region(ctx, hx_is, config.field_space); t.partition_grad = runtime->get_logical_partition(ctx, t.region_grad, hx_ip); if (j == 0) zero[i].hx = t; else zero[i].cx = t; } } } // Embedding SharedVariable srcEmbed, dstEmbed; { int numParams = config.vocabSize * config.embedSize; Rect<1> params_rect(Point<1>(0), Point<1>(numParams-1)); IndexSpaceT<1> params_is = runtime->create_index_space(ctx, params_rect); srcEmbed.region = runtime->create_logical_region(ctx, params_is, config.field_space); dstEmbed.region = runtime->create_logical_region(ctx, params_is, config.field_space); for (int i = 0; i < 2*nodes_per_layer; i++) { ParallelConfig pc = global.embed[i]; assert(pc.nDims == 1); for (int j = 0; j < pc.dim[0]; j++) { int gpuId = pc.gpu[j]; if (i < nodes_per_layer) { if (srcEmbed.gradients[gpuId] == LogicalRegion::NO_REGION) srcEmbed.gradients[gpuId] = runtime->create_logical_region(ctx, params_is, config.field_space); } else { if (dstEmbed.gradients[gpuId] == LogicalRegion::NO_REGION) dstEmbed.gradients[gpuId] = runtime->create_logical_region(ctx, params_is, config.field_space); } } } // Collect masterOnNode for srcEmbed/dstEmbed for (int i = 0; i < config.numNodes; i++) for (int j = config.workersPerNode-1; j >= 0; j--) { int gpuId = i * config.workersPerNode + j; if (srcEmbed.gradients[gpuId] != LogicalRegion::NO_REGION) srcEmbed.masterOnNode[i] = gpuId; if (dstEmbed.gradients[gpuId] != LogicalRegion::NO_REGION) dstEmbed.masterOnNode[i] = gpuId; } } // Encoders/decoders SharedVariable encoders[MAX_NUM_LAYERS], decoders[MAX_NUM_LAYERS]; for (int i = 0; i < numLayers; i++) { int input_size = (i==0) ? embed_size : hidden_size; int output_size = hidden_size; int numParams = (input_size + 1 + output_size + 1) * output_size * 4; Rect<1> params_rect(Point<1>(0), Point<1>(numParams-1)); IndexSpaceT<1> params_is = runtime->create_index_space(ctx, params_rect); encoders[i].region = runtime->create_logical_region(ctx, params_is, config.field_space); decoders[i].region = runtime->create_logical_region(ctx, params_is, config.field_space); for (int j = 0; j < 2*nodes_per_layer; j++) { ParallelConfig pc = global.lstm[i][j]; assert(pc.nDims == 1); for (int k = 0; k < pc.dim[0]; k++) { int gpuId = pc.gpu[k]; if (j < nodes_per_layer) { if (encoders[i].gradients[gpuId] == LogicalRegion::NO_REGION) encoders[i].gradients[gpuId] = runtime->create_logical_region(ctx, params_is, config.field_space); } else { if (decoders[i].gradients[gpuId] == LogicalRegion::NO_REGION) decoders[i].gradients[gpuId] = runtime->create_logical_region(ctx, params_is, config.field_space); } } } // Collect masterOnNode for encoders[i]/decoders[i] for (int j = 0; j < config.numNodes; j++) for (int k = config.workersPerNode-1; k >= 0; k--) { int gpuId = j * config.workersPerNode + k; if (encoders[i].gradients[gpuId] != LogicalRegion::NO_REGION) encoders[i].masterOnNode[j] = gpuId; if (decoders[i].gradients[gpuId] != LogicalRegion::NO_REGION) decoders[i].masterOnNode[j] = gpuId; } } SharedVariable linear; { int numParams = (hidden_size + 1) * vocab_size; Rect<1> params_rect(Point<1>(0), Point<1>(numParams-1)); IndexSpaceT<1> params_is = runtime->create_index_space(ctx, params_rect); linear.region = runtime->create_logical_region(ctx, params_is, config.field_space); linear.subregions[1] = linear.region; // Create subregions for the shared variable linear for (int parts = 2; parts <= MAX_NUM_PARTS; parts *= 2) { Rect<1> rect(Point<1>(0), Point<1>(parts-1)); IndexSpaceT<1> is = runtime->create_index_space(ctx, rect); IndexPartition ip = runtime->create_equal_partition(ctx, params_is, is); LogicalPartition lp = runtime->get_logical_partition(ctx, linear.region, ip); int idx = 0; for (PointInRectIterator<1> it(rect); it(); it++, idx++) { DomainPoint dp(*it); linear.subregions[parts+idx] = runtime->get_logical_subregion_by_color(ctx, lp, dp); } } // Compute bboxes for the shared variable linear // Also compute masterOnNode which is the largest gradients on each node std::map<int, Rect<1> > bboxes; for (int i = 0; i < nodes_per_layer; i++) { ParallelConfig pc = global.linear[i]; assert(pc.nDims == 2); for (int j = 0; j < pc.dim[1]; j++) for (int k = 0; k < pc.dim[0]; k++) { int gpuIdx = pc.gpu[j * pc.dim[0] +k]; Rect<1> rect = runtime->get_index_space_domain(ctx, linear.subregions[pc.dim[0]+k].get_index_space()); if (bboxes.find(gpuIdx) == bboxes.end()) bboxes[gpuIdx] = rect; else bboxes[gpuIdx] = bboxes[gpuIdx].union_bbox(rect); int nodeIdx = gpuIdx / config.workersPerNode; if (linear.masterOnNode[nodeIdx] == MASTER_NOT_ASSIGNED) linear.masterOnNode[nodeIdx] = gpuIdx; else { int masterIdx = linear.masterOnNode[nodeIdx]; if (bboxes[gpuIdx].volume() > bboxes[masterIdx].volume()) linear.masterOnNode[nodeIdx] = gpuIdx; } } } // The first bbox on each node is a superset of all bboxes on that node for (int n = 0; n < config.numNodes; n++) if (linear.masterOnNode[n] != MASTER_NOT_ASSIGNED) { for (int j = 0; j < config.workersPerNode; j++) if (bboxes.find(n * config.workersPerNode + j) != bboxes.end()) { Rect<1> rect = bboxes[n * config.workersPerNode + j]; bboxes[linear.masterOnNode[n]] = bboxes[linear.masterOnNode[n]].union_bbox(rect); } } for (int i = 0; i < config.numNodes * config.workersPerNode; i++) if (bboxes.find(i) != bboxes.end()) { IndexSpaceT<1> params_is = runtime->create_index_space(ctx, bboxes[i]); linear.gradients[i] = runtime->create_logical_region(ctx, params_is, config.field_space); } else linear.gradients[i] = LogicalRegion::NO_REGION; } Tensor embed[2*MAX_SEQ_LENGTH]; for (int i = 0; i < 2*nodes_per_layer; i++) { embed[i] = add_embed_node(i < nodes_per_layer ? srcs[i] : dsts[i-nodes_per_layer], config.vocabSize, config.embedSize, global.embed[i], i < nodes_per_layer ? srcEmbed : dstEmbed); } for (int i = 0; i < numLayers; i++) { // Add encoder lstm nodes for (int j = 0; j < nodes_per_layer; j++) { Tensor x = (i==0) ? embed[j] : lstm[i-1][j].x; Tensor hx = (j==0) ? zero[i].hx : lstm[i][j-1].hx; Tensor cx = (j==0) ? zero[i].cx : lstm[i][j-1].cx; lstm[i][j] = add_lstm_node(x, hx, cx, global.lstm[i][j], encoders[i]); } // Add decoder lstm nodes for (int j = nodes_per_layer; j < 2*nodes_per_layer; j++) { Tensor x = (i==0) ? embed[j] : lstm[i-1][j].x; Tensor hx = lstm[i][j-1].hx; Tensor cx = lstm[i][j-1].cx; lstm[i][j] = add_lstm_node(x, hx, cx, global.lstm[i][j], decoders[i]); } } // Add linear nodes for (int j = nodes_per_layer; j < 2*nodes_per_layer; j++) { Tensor logit = add_linear_node(lstm[numLayers-1][j].x, vocab_size, global.linear[j-nodes_per_layer], linear); add_softmaxDP_node(logit, dsts[j-nodes_per_layer], global.softmax[j-nodes_per_layer]); } // Add shared variables sharedVariables.push_back(srcEmbed); sharedVariables.push_back(dstEmbed); for (int i = 0; i < config.numLayers; i++) { sharedVariables.push_back(encoders[i]); sharedVariables.push_back(decoders[i]); } sharedVariables.push_back(linear); } void RnnModel::word_init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Rect<2> rect0 = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); int *host_ptr; bool same = *((bool*) task->args); checkCUDA(cudaHostAlloc(&host_ptr, sizeof(int) * rect0.volume(), cudaHostAllocPortable | cudaHostAllocMapped)); for (int i = 0; i < rect0.volume(); i++) host_ptr[i] = same ? 1 : i % 16; for (int i = 0; i < regions.size(); i++) { const AccessorWO<int, 2> acc(regions[i], FID_DATA); Rect<2> rect = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space()); assert(acc.accessor.is_dense_arbitrary(rect)); assert(rect == rect0); int *ptr = acc.ptr(rect.lo); checkCUDA(cudaMemcpy(ptr, host_ptr, sizeof(int) * rect0.volume(), cudaMemcpyHostToDevice)); } checkCUDA(cudaFreeHost(host_ptr)); } void RnnModel::init() { Context ctx = config.lg_ctx; Runtime* runtime = config.lg_hlr; // Init words Rect<1> part_rect = runtime->get_index_space_domain(ctx, part_is); for (PointInRectIterator<1> it(part_rect); it(); it++) { int idx = 0; bool same = false; TaskLauncher launcher(WORD_INIT_TASK_ID, TaskArgument(&same, sizeof(same)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(0)); DomainPoint dp(*it); for (int i = 0; i * LSTM_PER_NODE_LENGTH < config.seqLength; i++) { LogicalRegion x = runtime->get_logical_subregion_by_color(srcs[i].partition, dp); launcher.add_region_requirement( RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, srcs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i * LSTM_PER_NODE_LENGTH < config.seqLength; i++) { LogicalRegion x = runtime->get_logical_subregion_by_color(dsts[i].partition, dp); launcher.add_region_requirement( RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, dsts[i].region)); launcher.add_field(idx++, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); f.get_void_result(); } // Init zero tensors for (PointInRectIterator<1> it(part_rect); it(); it++) { int idx = 0; TaskLauncher launcher(ZERO_2D_INIT_TASK_ID, TaskArgument(NULL, 0), Predicate::TRUE_PRED, 0, RnnMapper::assign_to_gpu(0)); DomainPoint dp(*it); for (int i = 0; i < config.numLayers; i++) { LogicalRegion hx = runtime->get_logical_subregion_by_color(zero[i].hx.partition, dp); launcher.add_region_requirement( RegionRequirement(hx, WRITE_ONLY, EXCLUSIVE, zero[i].hx.region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < config.numLayers; i++) { LogicalRegion cx = runtime->get_logical_subregion_by_color(zero[i].cx.partition, dp); launcher.add_region_requirement( RegionRequirement(cx, WRITE_ONLY, EXCLUSIVE, zero[i].cx.region)); launcher.add_field(idx++, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); f.get_void_result(); } // Init hx_grad/cx_grad for the last LSTM node on each layer int nodes_per_layer = config.seqLength / LSTM_PER_NODE_LENGTH; for (PointInRectIterator<1> it(part_rect); it(); it++) { int idx = 0; TaskLauncher launcher(ZERO_2D_INIT_TASK_ID, TaskArgument(NULL, 0), Predicate::TRUE_PRED, 0, RnnMapper::assign_to_gpu(0)); DomainPoint dp(*it); for (int i = 0; i < config.numLayers; i++) { LSTMTensors last_lstm = lstm[i][2 * nodes_per_layer - 1]; // hx LogicalRegion hx_grad = runtime->get_logical_subregion_by_color(last_lstm.hx.partition_grad, dp); launcher.add_region_requirement( RegionRequirement(hx_grad, WRITE_ONLY, EXCLUSIVE, last_lstm.hx.region_grad)); launcher.add_field(idx++, FID_DATA); // cx LogicalRegion cx_grad = runtime->get_logical_subregion_by_color(last_lstm.cx.partition_grad, dp); launcher.add_region_requirement( RegionRequirement(cx_grad, WRITE_ONLY, EXCLUSIVE, last_lstm.cx.region_grad)); launcher.add_field(idx++, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); f.get_void_result(); } // TODO: to be removed when we have attention layers // Init y_grad for the decoder lstm nodes for (PointInRectIterator<1> it(part_rect); it(); it++) { int idx = 0; TaskLauncher launcher(ZERO_3D_INIT_TASK_ID, TaskArgument(NULL, 0), Predicate::TRUE_PRED, 0, RnnMapper::assign_to_gpu(0)); DomainPoint dp(*it); for (int i = 0; i < nodes_per_layer; i++) { LSTMTensors top_lstm = lstm[config.numLayers - 1][i]; LogicalRegion y_grad = runtime->get_logical_subregion_by_color(top_lstm.x.partition_grad, dp); launcher.add_region_requirement( RegionRequirement(y_grad, WRITE_ONLY, EXCLUSIVE, top_lstm.x.region_grad)); launcher.add_field(idx++, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); f.get_void_result(); } // Init shared variables for (int i = 0; i < sharedVariables.size(); i++) init_shared_variable(sharedVariables[i]); for (size_t i = 0; i < layers.size(); i++) layers[i]->init(*this); } void RnnModel::zero_3d_init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { for (int i = 0; i < task->regions.size(); i++) { const AccessorWO<float, 3> acc_w(regions[i], FID_DATA); Rect<3> rect_w = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space()); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); float *w_ptr = acc_w.ptr(rect_w.lo); assign_kernel<<<GET_BLOCKS(rect_w.volume()), CUDA_NUM_THREADS>>>( w_ptr, rect_w.volume(), 0.0f); } } void RnnModel::zero_2d_init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { for (int i = 0; i < task->regions.size(); i++) { const AccessorWO<float, 2> acc_w(regions[i], FID_DATA); Rect<2> rect_w = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space()); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); float *w_ptr = acc_w.ptr(rect_w.lo); assign_kernel<<<GET_BLOCKS(rect_w.volume()), CUDA_NUM_THREADS>>>( w_ptr, rect_w.volume(), 0.0f); } } void RnnModel::zero_1d_init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { for (int i = 0; i < task->regions.size(); i++) { const AccessorWO<float, 1> acc_w(regions[i], FID_DATA); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space()); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); float *w_ptr = acc_w.ptr(rect_w.lo); assign_kernel<<<GET_BLOCKS(rect_w.volume()), CUDA_NUM_THREADS>>>( w_ptr, rect_w.volume(), 0.0f); } } void RnnModel::dummy_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) {} void RnnModel::forward() { config.iterator ++; Context ctx = config.lg_ctx; Runtime* runtime = config.lg_hlr; // Step 1: launch dummy tasks to prefetch shared variables for (size_t i = 0; i < sharedVariables.size(); i++) { for (int n = 0; n < config.numNodes; n++) if (sharedVariables[i].masterOnNode[n] != MASTER_NOT_ASSIGNED) { int gpuId = sharedVariables[i].masterOnNode[n]; TaskLauncher launcher(DUMMY_TASK_ID, TaskArgument(NULL, 0), Predicate::TRUE_PRED, 0, RnnMapper::assign_to_gpu(gpuId)); launcher.add_region_requirement( RegionRequirement(sharedVariables[i].region, READ_ONLY, EXCLUSIVE, sharedVariables[i].region)); launcher.add_field(0, FID_DATA); runtime->execute_task(ctx, launcher); } } runtime->issue_mapping_fence(ctx); // Step 2: zero gradients for (size_t i = 0; i < sharedVariables.size(); i++) for (int j = 0; j < config.workersPerNode * config.numNodes; j++) if (sharedVariables[i].gradients[j] != LogicalRegion::NO_REGION) { TaskLauncher launcher(ZERO_1D_INIT_TASK_ID, TaskArgument(NULL, 0), Predicate::TRUE_PRED, 0, RnnMapper::assign_to_gpu(j)); LogicalRegion gradient = sharedVariables[i].gradients[j]; launcher.add_region_requirement( RegionRequirement(gradient, WRITE_ONLY, EXCLUSIVE, gradient)); launcher.add_field(0, FID_DATA); runtime->execute_task(ctx, launcher); } // Step 3: launch forward tasks for (size_t i = 0; i < layers.size(); i++) { layers[i]->forward(*this); } } void RnnModel::backward() { for (int i = layers.size() - 1; i >=0; i--) { layers[i]->backward(*this); } } void RnnModel::update() { for (int i = sharedVariables.size() - 1; i >= 0; i--) update_shared_variable(sharedVariables[i]); } /* regions[0](O): w */ void RnnModel::params_init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 1); assert(task->regions.size() == 1); float value = *((float*) task->args); const AccessorWO<float, 1> acc_w(regions[0], FID_DATA); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); float *w_ptr = acc_w.ptr(rect_w.lo); cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); curandGenerator_t genGPU; curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT); curandSetStream(genGPU, stream); curandSetPseudoRandomGeneratorSeed(genGPU, 1234LL); curandGenerateUniform(genGPU, w_ptr, rect_w.volume()); checkCUDA(cudaDeviceSynchronize()); scale_kernel<<<GET_BLOCKS(rect_w.volume()), CUDA_NUM_THREADS>>>( w_ptr, rect_w.volume(), -value, value); //assign_kernel<<<GET_BLOCKS(rect_w.volume()), CUDA_NUM_THREADS>>>( // w_ptr, rect_w.volume(), value); } void RnnModel::init_shared_variable(SharedVariable params) { Context ctx = config.lg_ctx; Runtime* runtime = config.lg_hlr; float value = 0.1f; TaskLauncher launcher(PARAMS_INIT_TASK_ID, TaskArgument(&value, sizeof(value)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(params.masterOnNode[0])); launcher.add_region_requirement( RegionRequirement(params.region, WRITE_ONLY, EXCLUSIVE, params.region)); launcher.add_field(0, FID_DATA); Future f = runtime->execute_task(ctx, launcher); f.get_void_result(); } /* regions[0]: (I/O): w regions[1..]: (O): w_grad */ void RnnModel::params_update_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == task->regions.size()); float rate = *((float*)task->args); const AccessorRW<float, 1> acc_w(regions[0], FID_DATA); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); for (int i = 1; i < regions.size(); i++) { const AccessorRO<float, 1> acc_w_grad(regions[i], FID_DATA); Rect<1> rect_w_grad = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space()); assert(rect_w.contains(rect_w_grad)); assert(acc_w_grad.accessor.is_dense_arbitrary(rect_w_grad)); float *w_ptr = acc_w.ptr(rect_w_grad.lo); const float *w_grad_ptr = acc_w_grad.ptr(rect_w_grad.lo); apply_add_with_scale<<<GET_BLOCKS(rect_w_grad.volume()), CUDA_NUM_THREADS>>>( w_ptr, w_grad_ptr, rect_w_grad.volume(), rate); #ifdef PRINT_INTERMEDIATE_RESULT print_tensor<1, float>(w_grad_ptr, rect_w_grad, "partial_w"); #endif } #ifdef PRINT_INTERMEDIATE_RESULT float *w_ptr = acc_w.ptr(rect_w.lo); print_tensor<1, float>(w_ptr, rect_w, "final_w"); #endif } void RnnModel::update_shared_variable(SharedVariable params) { Context ctx = config.lg_ctx; Runtime* runtime = config.lg_hlr; //for (int i = 0; i < config.workersPerNode; i++) // if (params.gradients[i] != LogicalRegion::NO_REGION) { // Rect<1> rect = // runtime->get_index_space_domain(ctx, params.gradients[i].get_index_space()); // printf("rect[%d]: lo(%d) hi(%d)\n", i, rect.lo[0], rect.hi[0]); // } float rate = 1.0f; for (int node = 0; node < config.numNodes; node++) if (params.masterOnNode[node] != MASTER_NOT_ASSIGNED) { TaskLauncher launcher(PARAMS_UPD_TASK_ID, TaskArgument(&rate, sizeof(rate)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(params.masterOnNode[node])); LogicalRegion masterGrad = params.gradients[params.masterOnNode[node]]; assert(masterGrad != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(masterGrad, READ_WRITE, EXCLUSIVE, masterGrad)); launcher.add_field(0, FID_DATA); int cnt = 1; for (int idx = 0; idx < config.workersPerNode; idx++) { int gpuIdx = node * config.workersPerNode + idx; if (gpuIdx == params.masterOnNode[node]) continue; LogicalRegion grad = params.gradients[gpuIdx]; if (grad == LogicalRegion::NO_REGION) continue; launcher.add_region_requirement( RegionRequirement(grad, READ_ONLY, EXCLUSIVE, grad)); launcher.add_field(cnt++, FID_DATA); } //printf("Step 1: cnt = %d\n", cnt); runtime->execute_task(ctx, launcher); } rate = -0.1f; TaskLauncher launcher(PARAMS_UPD_TASK_ID, TaskArgument(&rate, sizeof(rate)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(params.masterOnNode[0])); launcher.add_region_requirement( RegionRequirement(params.region, READ_WRITE, EXCLUSIVE, params.region)); launcher.add_field(0, FID_DATA); int cnt = 1; for (int node = 0; node < config.numNodes; node++) if (params.masterOnNode[node] != MASTER_NOT_ASSIGNED) { int gpuIdx = params.masterOnNode[node]; LogicalRegion grad = params.gradients[gpuIdx]; assert(grad != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(grad, READ_ONLY, EXCLUSIVE, grad)); launcher.add_field(cnt++, FID_DATA); } //printf("Step 2: cnt = %d\n", cnt); runtime->execute_task(ctx, launcher); }
the_stack
// The key generator template<typename T, typename set_hasher = MurmurHash3_32<T>> class KeyGenerator { public: KeyGenerator(): gen_(rd_()) {} KeyGenerator(T min, T max): gen_(rd_()), distribution_(min, max) {} void fill_unique(T* data, size_t keys_per_set, size_t num_of_set, T empty_value) { if (keys_per_set == 0 || num_of_set == 0) { return; } assert(distribution_.max() - distribution_.min() >= keys_per_set * num_of_set); std::unordered_set<T> set; std::vector<size_t> set_sz(num_of_set, 0); size_t sz = 0; while (sz < keys_per_set * num_of_set) { T x = distribution_(gen_); if(x == empty_value){ continue; } auto res = set.insert(x); if(res.second){ size_t src_set = set_hasher::hash(x) % num_of_set; if(set_sz[src_set] < keys_per_set){ data[src_set * keys_per_set + set_sz[src_set]] = x; set_sz[src_set]++; sz++; } } } assert(sz == keys_per_set * num_of_set); for(size_t i = 0; i < num_of_set; i++){ assert(set_sz[i] == keys_per_set); } } private: std::random_device rd_; std::mt19937 gen_; std::uniform_int_distribution<T> distribution_; }; // The random number generator template<typename T> class IntGenerator { public: IntGenerator(): gen_(rd_()) {} IntGenerator(T min, T max): gen_(rd_()), distribution_(min, max) {} void fill_unique(T* data, size_t len, T empty_value) { if (len == 0) { return; } assert(distribution_.max() - distribution_.min()>= len); std::unordered_set<T> set; size_t sz = 0; while (sz < len) { T x = distribution_(gen_); if(x == empty_value){ continue; } auto res = set.insert(x); if (res.second) { data[sz++] = x; } } assert(sz == set.size()); assert(sz == len); } private: std::random_device rd_; std::mt19937 gen_; std::uniform_int_distribution<T> distribution_; }; template<typename T> class IntGenerator_normal { public: IntGenerator_normal(): gen_(rd_()) {} IntGenerator_normal(double mean, double dev): gen_(rd_()), distribution_(mean, dev) {} void fill_unique(T* data, size_t len, T min, T max) { if (len == 0) { return; } std::unordered_set<T> set; size_t sz = 0; while (sz < len) { T x = (T)(abs( distribution_(gen_) )); if(x < min || x > max){ continue; } auto res = set.insert(x); if (res.second) { data[sz++] = x; } } assert(sz == set.size()); assert(sz == len); } private: std::random_device rd_; std::mt19937 gen_; std::normal_distribution<double> distribution_; }; // Utility to fill len embedding vector template<typename KeyType> void fill_vec(const KeyType* keys, float* vals, size_t embedding_vec_size, size_t len, float ratio) { for (size_t i = 0; i < len; ++i) { for (size_t j = 0; j < embedding_vec_size; ++j) { vals[i * embedding_vec_size + j] = (float)(ratio * keys[i]); } } } // Floating-point compare function template<typename T> bool is_near(T a, T b) { double diff = abs(a - b); bool ret = diff <= std::min(a, b) * 1e-6; if (!ret) { std::cerr << "error: " << a << " != " << b << "; diff = " << diff << std::endl; } return ret; } // Check correctness of result template<typename KeyType> void check_result(const KeyType* keys, const float* vals, size_t embedding_vec_size, size_t len, float ratio) { for (size_t i = 0; i < len; ++i) { for (size_t j = 0; j < embedding_vec_size; ++j) { assert(is_near(vals[i * embedding_vec_size + j], (float)(ratio * keys[i]))); } } } // Compare two sequence of keys and check whether they are the same(but with different order) template<typename KeyType> void compare_key(const KeyType* sequence_a, const KeyType* sequence_b, size_t len){ // Temp buffers for sorting KeyType* sequence_a_copy = (KeyType*)malloc(len * sizeof(KeyType)); KeyType* sequence_b_copy = (KeyType*)malloc(len * sizeof(KeyType)); // Copy data to temp buffers memcpy(sequence_a_copy, sequence_a, len * sizeof(KeyType)); memcpy(sequence_b_copy, sequence_b, len * sizeof(KeyType)); // Sort both arrays std::sort(sequence_a_copy, sequence_a_copy + len); std::sort(sequence_b_copy, sequence_b_copy + len); // Linearly compare elements for(size_t i = 0; i < len; i++){ assert(sequence_a_copy[i] == sequence_b_copy[i]); } // Free temp buffers free(sequence_a_copy); free(sequence_b_copy); } /* Timing funtion */ double W_time(){ timeval marker; gettimeofday(&marker, NULL); return ((double)(marker.tv_sec) * 1e6 + (double)(marker.tv_usec)) * 1e-6; } using key_type = long long; using ref_counter_type = uint64_t; int main(int argc, char **argv) { if (argc != 7) { std::cerr << "usage: " << argv[0] << " embedding_table_size cache_capacity_in_set embedding_vec_size query_length iter_round num_threads" << std::endl; return -1; } // Arguments const size_t emb_size = atoi(argv[1]); const size_t cache_capacity_in_set = atoi(argv[2]); const size_t embedding_vec_size = atoi(argv[3]); const size_t query_length = atoi(argv[4]); const size_t iter_round = atoi(argv[5]); const size_t num_threads = atoi(argv[6]); // Since cache is designed for single-gpu, all threads just use GPU 0 CUDA_CHECK(cudaSetDevice(0)); // Host side buffers shared between threads key_type* h_keys; // Buffer holding all keys in embedding table float* h_vals; // Buffer holding all values in embedding table // host-only buffers placed in normal host memory h_keys = (key_type*)malloc(emb_size * sizeof(key_type)); h_vals = (float*)malloc(emb_size * embedding_vec_size * sizeof(float)); // The empty key to be used const key_type empty_key = std::numeric_limits<key_type>::max(); // The cache to be used, by default the set hasher is based on MurMurHash and slab hasher is based on Mod. using Cache_ = gpu_cache::gpu_cache<key_type, ref_counter_type, empty_key, SET_ASSOCIATIVITY, SLAB_SIZE>; // Create GPU embedding cache auto cache = new Cache_(cache_capacity_in_set, embedding_vec_size); // For random unique keys generation IntGenerator<key_type> gen_key; float increase = 0.1f; // Start 1st test std::cout << "****************************************" << std::endl; std::cout << "****************************************" << std::endl; std::cout << "Start Single-GPU Thread-safe Query and Replace API test " << std::endl; // Timimg variables double time_a; double time_b; time_a = W_time(); std::cout << "Filling data" << std::endl; // Generating random unique keys gen_key.fill_unique(h_keys, emb_size, empty_key); // Filling values vector according to the keys fill_vec(h_keys, h_vals, embedding_vec_size, emb_size, increase); // Elapsed wall time time_b = W_time() - time_a; std::cout << "The Elapsed time for filling data is: " << time_b << "sec." << std::endl; // Insert <k,v> pairs to embedding table (CPU hashtable) time_a = W_time(); std::cout << "Filling embedding table" << std::endl; std::unordered_map<key_type, std::vector<float>> h_emb_table; for(size_t i = 0; i < emb_size; i++){ std::vector<float> emb_vec(embedding_vec_size); for(size_t j = 0; j < embedding_vec_size; j++){ emb_vec[j] = h_vals[i * embedding_vec_size + j]; } h_emb_table.emplace(h_keys[i], emb_vec); } // Elapsed wall time time_b = W_time() - time_a; std::cout << "The Elapsed time for filling embedding table is: " << time_b << "sec." << std::endl; // Free value buffer free(h_vals); #pragma omp parallel default(none) shared(h_keys, cache, h_emb_table, increase, embedding_vec_size, query_length, emb_size, iter_round) num_threads(num_threads) { // The thread ID for this thread int thread_id = omp_get_thread_num(); printf("Worker %d starts testing cache.\n", thread_id); // Since cache is designed for single-gpu, all threads just use GPU 0 CUDA_CHECK(cudaSetDevice(0)); // Thread-private host side buffers size_t* h_query_keys_index; // Buffer holding index for keys to be queried key_type* h_query_keys; // Buffer holding keys to be queried float* h_vals_retrieved; // Buffer holding values retrieved from query key_type* h_missing_keys; // Buffer holding missing keys from query float* h_missing_vals; // Buffer holding values for missing keys uint64_t* h_missing_index; // Buffers holding missing index from query size_t h_missing_len; // missing length // Thread-private device side buffers key_type* d_query_keys; // Buffer holding keys to be queried float* d_vals_retrieved; // Buffer holding values retrieved from query key_type* d_missing_keys; // Buffer holding missing keys from query float* d_missing_vals; // Buffer holding values for missing keys uint64_t* d_missing_index; // Buffers holding missing index from query size_t* d_missing_len; // missing length // host-only buffers placed in normal host memory h_query_keys_index = (size_t*)malloc(query_length * sizeof(size_t)); // host-device interactive buffers placed in pinned memory CUDA_CHECK(cudaHostAlloc((void**)&h_query_keys, query_length * sizeof(key_type), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_vals_retrieved, query_length * embedding_vec_size * sizeof(float), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_missing_keys, query_length * sizeof(key_type), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_missing_vals, query_length * embedding_vec_size * sizeof(float), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_missing_index, query_length * sizeof(uint64_t), cudaHostAllocPortable)); // Allocate device side buffers CUDA_CHECK(cudaMalloc((void**)&d_query_keys, query_length * sizeof(key_type))); CUDA_CHECK(cudaMalloc((void**)&d_vals_retrieved, query_length * embedding_vec_size * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&d_missing_keys, query_length * sizeof(key_type))); CUDA_CHECK(cudaMalloc((void**)&d_missing_vals, query_length * embedding_vec_size * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&d_missing_index, query_length * sizeof(uint64_t))); CUDA_CHECK(cudaMalloc((void**)&d_missing_len, sizeof(size_t))); // Thread-private CUDA stream, all threads just use the #0 device cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); // Timimg variables double time_1; double time_2; /****************************************************************************** ******************************************************************************* ********************************Test start************************************* ******************************************************************************* *******************************************************************************/ // Normal-distribution random number generator size_t foot_print = emb_size - 1; // Memory footprint for access the keys within the key buffer double mean = (double)(emb_size / 2); // mean for normal distribution double dev = (double)(2 * query_length); // dev for normal distribution // IntGenerator<size_t> uni_gen(0, foot_print); // Normal-distribution random number generator IntGenerator_normal<size_t> normal_gen(mean, dev); // Start normal distribution cache test printf("Worker %d : normal distribution test start.\n", thread_id); for(size_t i = 0; i < iter_round; i++){ // Generate random index with normal-distribution normal_gen.fill_unique(h_query_keys_index, query_length, 0, foot_print); // Select keys from total keys buffer with the index for(size_t j = 0; j < query_length; j++){ h_query_keys[j] = h_keys[h_query_keys_index[j]]; } // Copy the keys to GPU memory CUDA_CHECK(cudaMemcpyAsync(d_query_keys, h_query_keys, query_length * sizeof(key_type), cudaMemcpyHostToDevice, stream)); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Record time time_1 = W_time(); // Get pairs from hashtable cache -> Query(d_query_keys, query_length, d_vals_retrieved, d_missing_index, d_missing_keys, d_missing_len, stream); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Elapsed wall time time_2 = W_time() - time_1; printf("Worker %d : The Elapsed time for %zu round normal-distribution query is: %f sec.\n", thread_id, i, time_2); // Copy the data back to host CUDA_CHECK(cudaMemcpyAsync(h_vals_retrieved, d_vals_retrieved, query_length * embedding_vec_size * sizeof(float), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(h_missing_index, d_missing_index, query_length * sizeof(uint64_t), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(h_missing_keys, d_missing_keys, query_length * sizeof(key_type), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&h_missing_len, d_missing_len, sizeof(size_t), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); printf("Worker %d : %zu round : Missing key: %zu. Hit rate: %f %%.\n", thread_id, i, h_missing_len, 100.0f - (((float)h_missing_len / (float)query_length) * 100.0f)); // Get missing key from embedding table // Insert missing values into the retrieved value buffer // Record time time_1 = W_time(); for(size_t missing_idx = 0; missing_idx < h_missing_len; missing_idx++){ auto result = h_emb_table.find(h_missing_keys[missing_idx]); for(size_t emb_vec_idx = 0; emb_vec_idx < embedding_vec_size; emb_vec_idx++){ h_missing_vals[missing_idx * embedding_vec_size + emb_vec_idx] = (result->second)[emb_vec_idx]; h_vals_retrieved[h_missing_index[missing_idx] * embedding_vec_size + emb_vec_idx] = (result->second)[emb_vec_idx]; } } // Elapsed wall time time_2 = W_time() - time_1; printf("Worker %d : The Elapsed time for %zu round normal-distribution fetching missing data is: %f sec.\n", thread_id, i, time_2); // Copy the missing value to device CUDA_CHECK(cudaMemcpyAsync(d_missing_vals, h_missing_vals, query_length * embedding_vec_size * sizeof(float), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); // Record time time_1 = W_time(); // Replace the missing <k,v> pairs into the cache cache -> Replace(d_missing_keys, h_missing_len, d_missing_vals, stream); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Elapsed wall time time_2 = W_time() - time_1; printf("Worker %d : The Elapsed time for %zu round normal-distribution replace is: %f sec.\n", thread_id, i, time_2); // Verification: Check for correctness for retrieved pairs check_result(h_query_keys, h_vals_retrieved, embedding_vec_size, query_length, increase); printf("Worker %d : Result check for %zu round normal-distribution query+replace successfully!\n", thread_id, i); } printf("Worker %d : All Finished!\n", thread_id); // Clean-up cudaStreamDestroy(stream); free(h_query_keys_index); CUDA_CHECK(cudaFreeHost(h_query_keys)); CUDA_CHECK(cudaFreeHost(h_vals_retrieved)); CUDA_CHECK(cudaFreeHost(h_missing_keys)); CUDA_CHECK(cudaFreeHost(h_missing_vals)); CUDA_CHECK(cudaFreeHost(h_missing_index)); CUDA_CHECK(cudaFree(d_query_keys)); CUDA_CHECK(cudaFree(d_vals_retrieved)); CUDA_CHECK(cudaFree(d_missing_keys)); CUDA_CHECK(cudaFree(d_missing_vals)); CUDA_CHECK(cudaFree(d_missing_index)); CUDA_CHECK(cudaFree(d_missing_len)); } // 1st test Clean-up free(h_keys); delete cache; // Start 2nd test std::cout << "****************************************" << std::endl; std::cout << "****************************************" << std::endl; std::cout << "Start Single-GPU Thread-safe Update and Dump API test " << std::endl; // The key and value buffer that contains all the keys and values to be inserted into the cache h_keys = (key_type*)malloc(SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type)); h_vals = (float*)malloc(SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * embedding_vec_size * sizeof(float)); float* h_new_vals = (float*)malloc(SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * embedding_vec_size * sizeof(float)); // The cache object to be tested cache = new Cache_(cache_capacity_in_set, embedding_vec_size); // Key generator KeyGenerator<key_type> cache_key_gen; // Generating random unique keys cache_key_gen.fill_unique(h_keys, SLAB_SIZE * SET_ASSOCIATIVITY, cache_capacity_in_set, empty_key); // Filling values vector according to the keys fill_vec(h_keys, h_vals, embedding_vec_size, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set, increase); // Set new value increase = 1.0f; // Filling values vector according to the keys fill_vec(h_keys, h_new_vals, embedding_vec_size, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set, increase); // Host-side buffers // Buffers holding keys and values to be inserted, each time insert 1 slab to every slabset key_type* h_insert_keys; float* h_insert_vals; // Buffers holding keys and values dumped and retrieved from the cache key_type* h_dump_keys; float* h_vals_retrieved; size_t h_dump_counter; size_t h_missing_len; key_type* h_acc_keys; // Device-side buffers key_type* d_keys; float* d_vals; // Buffers holding keys and values to be inserted, each time insert 1 slab to every slabset key_type* d_insert_keys; float* d_insert_vals; // Buffers holding keys and values dumped and retrieved from the cache key_type* d_dump_keys; float* d_vals_retrieved; size_t* d_dump_counter; uint64_t* d_missing_index; key_type* d_missing_keys; size_t* d_missing_len; CUDA_CHECK(cudaHostAlloc((void**)&h_insert_keys, SLAB_SIZE * cache_capacity_in_set * sizeof(key_type), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_insert_vals, SLAB_SIZE * cache_capacity_in_set * embedding_vec_size * sizeof(float), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_dump_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_vals_retrieved, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * embedding_vec_size * sizeof(float), cudaHostAllocPortable)); CUDA_CHECK(cudaHostAlloc((void**)&h_acc_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type), cudaHostAllocPortable)); CUDA_CHECK(cudaMalloc((void**)&d_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type))); CUDA_CHECK(cudaMalloc((void**)&d_vals, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * embedding_vec_size * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&d_insert_keys, SLAB_SIZE * cache_capacity_in_set * sizeof(key_type))); CUDA_CHECK(cudaMalloc((void**)&d_insert_vals, SLAB_SIZE * cache_capacity_in_set * embedding_vec_size * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&d_dump_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type))); CUDA_CHECK(cudaMalloc((void**)&d_vals_retrieved, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * embedding_vec_size * sizeof(float))); CUDA_CHECK(cudaMalloc((void**)&d_dump_counter, sizeof(size_t))); CUDA_CHECK(cudaMalloc((void**)&d_missing_index, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(uint64_t))); CUDA_CHECK(cudaMalloc((void**)&d_missing_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type))); CUDA_CHECK(cudaMalloc((void**)&d_missing_len, sizeof(size_t))); // CUDA stream cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); // Copy all keys and values from host to device CUDA_CHECK(cudaMemcpyAsync(d_keys, h_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * sizeof(key_type), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_vals, h_new_vals, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set * embedding_vec_size * sizeof(float), cudaMemcpyHostToDevice, stream)); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Each time insert 1 slab per slabset into the cache and check result for(size_t i = 0; i < SET_ASSOCIATIVITY; i++){ // Prepare the keys and values to be inserted for(size_t j = 0; j < cache_capacity_in_set; j++){ memcpy(h_insert_keys + j * SLAB_SIZE, h_keys + j * SLAB_SIZE * SET_ASSOCIATIVITY + i * SLAB_SIZE, SLAB_SIZE * sizeof(key_type)); memcpy(h_insert_vals + j * SLAB_SIZE * embedding_vec_size, h_vals + (j * SLAB_SIZE * SET_ASSOCIATIVITY + i * SLAB_SIZE) * embedding_vec_size, SLAB_SIZE * embedding_vec_size * sizeof(float)); } // Copy the selected keys to accumulate buffer memcpy(h_acc_keys + SLAB_SIZE * cache_capacity_in_set * i, h_insert_keys, SLAB_SIZE * cache_capacity_in_set * sizeof(key_type)); // Copy the <k,v> pairs from host to device CUDA_CHECK(cudaMemcpyAsync(d_insert_keys, h_insert_keys, SLAB_SIZE * cache_capacity_in_set * sizeof(key_type), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_insert_vals, h_insert_vals, SLAB_SIZE * cache_capacity_in_set * embedding_vec_size * sizeof(float), cudaMemcpyHostToDevice, stream)); // Insert the <k,v> pairs into the cache cache -> Replace(d_insert_keys, SLAB_SIZE * cache_capacity_in_set, d_insert_vals, stream); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Record time time_a = W_time(); // Update the new values to the cache(including missing keys) cache -> Update(d_keys, SLAB_SIZE * SET_ASSOCIATIVITY * cache_capacity_in_set, d_vals, stream, SLAB_SIZE); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Elapsed wall time time_b = W_time() - time_a; printf("The Elapsed time for %zu round update is: %f sec.\n", i, time_b); // Record time time_a = W_time(); // Dump the keys from the cache cache -> Dump(d_dump_keys, d_dump_counter, 0, cache_capacity_in_set, stream); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Elapsed wall time time_b = W_time() - time_a; printf("The Elapsed time for %zu round dump is: %f sec.\n", i, time_b); // Copy the dump counter from device to host CUDA_CHECK(cudaMemcpyAsync(&h_dump_counter, d_dump_counter, sizeof(size_t), cudaMemcpyDeviceToHost, stream)); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Check the dump counter assert(h_dump_counter == SLAB_SIZE * cache_capacity_in_set * (i + 1)); // Query all the dumped keys from the cache cache -> Query(d_dump_keys, h_dump_counter, d_vals_retrieved, d_missing_index, d_missing_keys, d_missing_len, stream); // Copy result from device to host CUDA_CHECK(cudaMemcpyAsync(h_dump_keys, d_dump_keys, h_dump_counter * sizeof(key_type), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(h_vals_retrieved, d_vals_retrieved, h_dump_counter * embedding_vec_size * sizeof(float), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&h_missing_len, d_missing_len, sizeof(size_t), cudaMemcpyDeviceToHost, stream)); // Wait for stream to complete CUDA_CHECK(cudaStreamSynchronize(stream)); // Check result assert(h_missing_len == 0); compare_key(h_dump_keys, h_acc_keys, h_dump_counter); check_result(h_dump_keys, h_vals_retrieved, embedding_vec_size, h_dump_counter, increase); } printf("Update and Dump API test all finished!\n"); // 2nd test clean-up CUDA_CHECK(cudaStreamDestroy(stream)); free(h_keys); free(h_vals); free(h_new_vals); CUDA_CHECK(cudaFreeHost(h_insert_keys)); CUDA_CHECK(cudaFreeHost(h_insert_vals)); CUDA_CHECK(cudaFreeHost(h_dump_keys)); CUDA_CHECK(cudaFreeHost(h_vals_retrieved)); CUDA_CHECK(cudaFreeHost(h_acc_keys)); CUDA_CHECK(cudaFree(d_keys)); CUDA_CHECK(cudaFree(d_vals)); CUDA_CHECK(cudaFree(d_insert_keys)); CUDA_CHECK(cudaFree(d_insert_vals)); CUDA_CHECK(cudaFree(d_dump_keys)); CUDA_CHECK(cudaFree(d_vals_retrieved)); CUDA_CHECK(cudaFree(d_dump_counter)); CUDA_CHECK(cudaFree(d_missing_index)); CUDA_CHECK(cudaFree(d_missing_keys)); CUDA_CHECK(cudaFree(d_missing_len)); delete cache; return 0; }
the_stack
#include "common.h" #include "polish_E.cu" //#include "polydet.cu" //#include "sturm.cu" //#include "polyquotient.cu" //#include "cheirality.cu" #include "kernel_functions.cu" // all CUDA definitions must be compiled in the same logical compilation unit // Never compile FooDevice.cu - import it into main.cu //#include <Utilities_CC/Timer.h> #include <chrono> #include <vector> //#include "Utilities_CC/utilities_CC.h" /* * CUDA macros, constants and functions */ #define CudaErrorCheck(ans) {__CudaErrorCheck((ans), __FILE__, __LINE__);} void __CudaErrorCheck(cudaError_t code, const char* file, int line) { if (code != cudaSuccess) { std::cout << "CUDA Error (" << file << ":" << line << "): " << cudaGetErrorString(code) << std::endl; exit(code); } } // (28) Multiprocessors, (128) CUDA Cores/MP // Maximum number of threads per multiprocessor: 2048 // Maximum number of threads per block: 1024 // Maximum number of threads: 57344 const int nBlocks = 256; const int nThreadsPerBlock = 256; const int nProcesses = nBlocks * nThreadsPerBlock; // Two absolute value functions, in case not defined //inline int rhAbs(int a) {return (a > 0 ? a : -a);} //inline double rhAbs(double a) {return (a > 0.0 ? a : -a);} // Function Declarations: void GenerateMatches(const int nPoints, double (*qs)[2], double (*qps)[2]); void GenerateMatches(const int nPoints, double (*Xs)[3], double R[3][3], double t[3], double (*qs)[2], double (*qps)[2]); void PolluteData(double (*qs)[2], double (*qps)[2], const int nPoints, const double noise_std, const double outlier_frac); void ComputeErrorP(Pmatrix P, double R_gt[3][3], double t_gt[3], double &rotation_error, double &translation_error); void ComputeErrorE(Ematrix E1, Ematrix E2, double &error); void ConvertRt2E(double R[3][3], double t[3], Ematrix E); int main(int argc, char *argv[]) { const int subset_size = 5; // Number of points in each subset of the data (>= 5) // other points used for cheirality test only const int nPoints = 78600; // Number of total point correspondences const int num_test_points = 10; const int ransac_points = 1000; // Number of points used for RANSAC const int ransac_iter = 1; const double ransac_threshold = 0.05; // Generate two sets of points, all matching, to test double (*qs)[2] = new double[nPoints][2]; double (*qps)[2] = new double[nPoints][2]; double (*Xs)[3] = new double[nPoints][3]; // double (*Xs)[3]; double R_gt[3][3]; double t_gt[3]; // GenerateMatches(nPoints, qs, qps); GenerateMatches(nPoints, Xs, R_gt, t_gt, qs, qps); double noise_std = 0.01; double outlier_frac = 0.1; PolluteData(qs, qps, nPoints, noise_std, outlier_frac); // CUDA Setup // Set GPU to use // int device = 0; // CudaErrorCheck(cudaSetDevice(device)); cudaSetDevice(qs.get_device()); int *num_inliers; double *qs_ptr; double *qps_ptr; double (*essential_matrices)[3][3]; curandState* state; CudaErrorCheck(cudaMallocManaged((void **) &qs_ptr, 2 * nPoints * sizeof(double))); CudaErrorCheck(cudaMallocManaged((void **) &qps_ptr, 2 * nPoints * sizeof(double))); CudaErrorCheck(cudaMallocManaged((void **) &state, nProcesses * sizeof(curandState))); CudaErrorCheck(cudaMallocManaged((void **) &num_inliers, nProcesses * sizeof(int))); CudaErrorCheck(cudaMallocManaged((void **) &essential_matrices, nProcesses * 3 * 3 * sizeof(double))); for (int i = 0; i < nPoints; ++i) { for (int j = 0; j < 2; ++j) { qs_ptr[2 * i + j] = qs[i][j]; qps_ptr[2 * i + j] = qps[i][j]; } } // Copy constants to device constant memory CudaErrorCheck(cudaMemcpyToSymbol(c_num_points, &nPoints, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_test_points, &ransac_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_iterations, &ransac_iter, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_inlier_threshold, &ransac_threshold, sizeof(double))); // Declare device versions of curandState and generate states, one for each process unsigned long long seed = 1234; SetupRandomState<<<nBlocks, nThreadsPerBlock>>>(seed, state); EstimateEssentialMatrix<subset_size><<<nBlocks, nThreadsPerBlock>>>( qs_ptr, // Two sets of matching points qps_ptr, // (flattened 2D arrays) state, // Random number generator state num_inliers, // Number of inliers per thread essential_matrices); // Essential matrices per thread CudaErrorCheck(cudaPeekAtLastError()); // Check for kernel launch error CudaErrorCheck(cudaDeviceSynchronize()); // Check for kernel execution error // Print number of E and P matrices // for (int i = 0; i < nProcesses; i++) { // printf("Number of Ematrices: %d\n", nEMatrices[i]); // } /***** Testing Polish_E *****/ // auto clock_begin = std::chrono::steady_clock::now(); int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + nProcesses)); cout << "The largest element is " << ind_max << '\n'; cout << "The number of inliers: " << num_inliers[ind_max] << '\n'; Ematrix E_best; memcpy(E_best, &essential_matrices[ind_max], sizeof(E_best)); Ematrix E_before; memcpy(E_before, E_best, sizeof(E_before)); // const int MaxReps = 10; //Matches_n<500> q_test, qp_test; //memcpy(q_test, qs, 3 * 500 * sizeof(double)); //memcpy(qp_test, qps, 3 * 500 * sizeof(double)); //polish_E<50000>(E_best, qs, qps, MaxReps); // polish_E<50000>(E_best, qs, qps, MaxReps); // auto duration = std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count(); // printf("Duration: %0.9fs \n", duration); Ematrix E_gt; ConvertRt2E(R_gt, t_gt, E_gt); printf("Before optimization\n"); for (int j = 0; j < 3; j++) { printf("%f %f %f \n", E_before[j][0], E_before[j][1], E_before[j][2]); } printf("\n"); printf("After optimization\n"); for (int j = 0; j < 3; j++) { printf("%f %f %f \n", E_best[j][0], E_best[j][1], E_best[j][2]); } printf("\n"); printf("Ground truth E\n"); for (int j = 0; j < 3; j++) { printf("%f %f %f \n", E_gt[j][0], E_gt[j][1], E_gt[j][2]); } printf("\n"); double error1; ComputeErrorE(E_gt, E_before, error1); double error2; ComputeErrorE(E_gt, E_best, error2); printf("Error between E before optimisation and GT: %f \n", error1); printf("Error between E after optimisation and GT: %f \n", error2); // Free Host Memory free(qs); free(qps); free(Xs); // Free Device Memory CudaErrorCheck(cudaFree(qs_ptr)); CudaErrorCheck(cudaFree(qps_ptr)); CudaErrorCheck(cudaFree(essential_matrices)); CudaErrorCheck(cudaFree(num_inliers)); CudaErrorCheck(cudaFree(state)); return 0; } void ConvertRt2E(double R[3][3], double t[3], Ematrix E) { // E =[t]xR double t_skew[3][3] = {{0, -t[2], t[1]}, {t[2], 0, -t[0]}, {-t[1], t[0], 0}}; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { double sum = 0; for (int k = 0; k < 3; k++) { sum += R[i][k] * t_skew[k][j]; } E[i][j] = sum; } } } void ComputeErrorE(Ematrix E1, Ematrix E2, double &error) { // Compute Frobenius norm between two E matrices error = 0.0; for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) error += (E1[i][j] / E1[2][2] - E2[i][j] / E2[2][2]) * (E1[i][j] / E1[2][2] - E2[i][j] / E2[2][2]); error = sqrt(error); } void ComputeErrorP(Pmatrix P, double R_gt[3][3], double t_gt[3], double &rotation_error, double &translation_error) { // Compute translation error (up to scale) double t[3] = {0.0, 0.0, 0.0}; for (int i = 0; i < 3; ++i) for (int j = 0; j < 3; ++j) t[i] += -P[j][i] * P[j][3]; // t = -R' * P4 //t[i] = P[j][3]; for (int i = 0; i < 3; ++i) translation_error += (t[i] / t[2] - t_gt[i] / t_gt[2]) * (t[i] / t[2] - t_gt[i] / t_gt[2]); // Normalize and compute squared error translation_error = sqrt(translation_error); // Compute rotation error // angle = arccos((trace(R'*R_gt) - 1) / 2) double cos_angle = 0.0; for (int i = 0; i < 3; ++i) for (int j = 0; j < 3; ++j) cos_angle += P[i][j] * R_gt[i][j]; cos_angle = (cos_angle - 1.0) / 2.0; rotation_error = (cos_angle >= 1.0) ? 0.0 : acos(cos_angle) * 180.0 / M_PI; // in degrees } void GenerateMatches(const int nPoints, double (*qs)[2], double (*qps)[2]) { for (int i = 0; i < nPoints; i++) { qs[i][0] = urandom(); qs[i][1] = urandom(); } for (int i = 0; i < nPoints; i++) { qps[i][0] = urandom(); qps[i][1] = urandom(); } } /* * Generate matched points. * First generate points in 3D. Then project points to two reference cameras. * Points are in front of two cameras. */ void GenerateMatches(const int nPoints, double (*Xs)[3], double R[3][3], double t[3], double (*qs)[2], double (*qps)[2]) { // 1. Generate random rotation and translation: R, t // Rotation matrix // First fill matrix with random entries for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { R[i][j] = urandom(); } } // Do QR factorization, using Gram-Schmidt. Q is rotation matrix for (int row = 0; row < 3; row++) { // Normalize the row double sum = 0.0; for (int j = 0; j < 3; j++) sum += R[row][j] * R[row][j]; double fac = 1.0 / sqrt(sum); for (int j = 0; j < 3; j++) R[row][j] *= fac; // Use to sweep out the subsequent rows for (int i = row + 1; i < 3; i++) { // Inner product of row i and row j double prod = 0.0; for (int j = 0; j < 3; j++) prod += R[row][j] * R[i][j]; // Inner product for (int j = 0; j < 3; j++) R[i][j] -= prod * R[row][j]; } } printf("Print R and t\n"); printf("Rotation\n"); for (int i = 0; i < 3; i++) { printf("%f %f %f\n", R[i][0], R[i][1], R[i][2]); } // //code verifying rotation matrix, R^{T} = R^{-1} // for (int i = 0; i < 3; i++) { // for (int j = 0; j < 3; j++) { // double row_sum = 0.0; // for (int k = 0; k < 3; k++) { // row_sum += R[i][k] * R[j][k]; // } // printf ("%f\n", row_sum); // } // } // // Verify determinant == 1 // double determinant = R[0][0] * R[1][1] * R[2][2] // + R[0][1] * R[1][2] * R[2][0] // + R[0][2] * R[1][0] * R[2][1] // - R[0][2] * R[1][1] * R[2][0] // - R[0][1] * R[1][0] * R[2][2] // - R[0][0] * R[1][2] * R[2][1]; // printf ("determinant: %f\n", determinant); // translation vector for (int i = 0; i < 3; i++) { t[i] = urandom(); // t[i] = 0.0; } printf("Translation: %f %f %f\n", t[0], t[1], t[2]); // 2. Generate random 3D points in [-1,1]^3: Xs // 3. Transform 3D points Xps = R*(Xs - t) // 4. Force all points to be in front of both cameras double PXs[nPoints][3]; for (int i = 0; i < nPoints; i++) { double depth = -1.0; while (depth <= 0) { // Loop until both points are in front of the camera Xs[i][0] = urandom(); Xs[i][1] = urandom(); Xs[i][2] = urandom() + 1.05; // Force points to be in front of the first camera // Transform 3D points Xps = R*(Xs - t) // Xs - t double X_sub_t[3]; for (int j = 0; j < 3; j++) { X_sub_t[j] = Xs[i][j] - t[j]; } // R*(Xs - t) for (int j = 0; j < 3; j++) { double sum = 0.0; for (int k = 0; k < 3; k++) { sum += R[j][k] * X_sub_t[k]; } PXs[i][j] = sum; } // Transform 3D points Xps = R*Xs + t // for (int j = 0; j < 3; j++) { // double sum = 0.0; // for (int k = 0; k < 3; k++) { // sum += R[j][k] * Xs[i][k]; // } // PXs[i][j] = sum + t[j]; // } depth = PXs[i][2]; } } // 5. Project 3D points to 2D: qs, qps for (int i = 0; i < nPoints; i++) { qs[i][0] = Xs[i][0] / Xs[i][2]; qs[i][1] = Xs[i][1] / Xs[i][2]; qps[i][0] = PXs[i][0] / PXs[i][2]; qps[i][1] = PXs[i][1] / PXs[i][2]; } } void PolluteData(double (*qs)[2], double (*qps)[2], const int nPoints, const double noise_std, const double outlier_frac) { // Add noise to Data std::default_random_engine generator; std::normal_distribution<double> dist(0.0, noise_std); for (int i = 0; i < nPoints; i++) { qps[i][0] = qps[i][0] + dist(generator); qps[i][1] = qps[i][1] + dist(generator); } // Create outliers in data int outlier_begin = (int) nPoints * (1 - outlier_frac); std::random_shuffle(&qps[outlier_begin], &qps[nPoints]); }
the_stack
//////////////////////////////////////////////////////////////////////////////// void get_frustum_bounds(float* K, std::vector<std::vector<float>> &extrinsic_poses, int base_frame, int curr_frame, float* camera_relative_pose, float* view_bounds, float vox_unit, int* vox_size, float* vox_range_cam) { // Use two extrinsic matrices to compute relative rotations between current frame and first frame std::vector<float> ex_pose1 = extrinsic_poses[base_frame]; std::vector<float> ex_pose2 = extrinsic_poses[curr_frame]; float * ex_mat1 = &ex_pose1[0]; float * ex_mat2 = &ex_pose2[0]; float ex_mat1_inv[16] = {0}; invert_matrix(ex_mat1, ex_mat1_inv); multiply_matrix(ex_mat1_inv, ex_mat2, camera_relative_pose); // Init cam view frustum float max_depth = 0.8; float cam_view_frustum[15] = { 0, -320 * max_depth / K[0], -320 * max_depth / K[0], 320 * max_depth / K[0], 320 * max_depth / K[0], 0, -240 * max_depth / K[0], 240 * max_depth / K[0], 240 * max_depth / K[0], -240 * max_depth / K[0], 0, max_depth, max_depth, max_depth, max_depth }; // Rotate cam view frustum wrt Rt for (int i = 0; i < 5; i++) { float tmp_arr[3] = {0}; tmp_arr[0] = camera_relative_pose[0 * 4 + 0] * cam_view_frustum[0 + i] + camera_relative_pose[0 * 4 + 1] * cam_view_frustum[5 + i] + camera_relative_pose[0 * 4 + 2] * cam_view_frustum[2 * 5 + i]; tmp_arr[1] = camera_relative_pose[1 * 4 + 0] * cam_view_frustum[0 + i] + camera_relative_pose[1 * 4 + 1] * cam_view_frustum[5 + i] + camera_relative_pose[1 * 4 + 2] * cam_view_frustum[2 * 5 + i]; tmp_arr[2] = camera_relative_pose[2 * 4 + 0] * cam_view_frustum[0 + i] + camera_relative_pose[2 * 4 + 1] * cam_view_frustum[5 + i] + camera_relative_pose[2 * 4 + 2] * cam_view_frustum[2 * 5 + i]; cam_view_frustum[0 * 5 + i] = tmp_arr[0] + camera_relative_pose[3]; cam_view_frustum[1 * 5 + i] = tmp_arr[1] + camera_relative_pose[7]; cam_view_frustum[2 * 5 + i] = tmp_arr[2] + camera_relative_pose[11]; } // Compute frustum endpoints float range2test[3][2] = {0}; for (int i = 0; i < 3; i++) { range2test[i][0] = *std::min_element(&cam_view_frustum[i * 5], &cam_view_frustum[i * 5] + 5); range2test[i][1] = *std::max_element(&cam_view_frustum[i * 5], &cam_view_frustum[i * 5] + 5); } // Compute frustum bounds wrt volume for (int i = 0; i < 3; i++) { view_bounds[i * 2 + 0] = std::max(0.0f, std::floor((range2test[i][0] - vox_range_cam[i * 2 + 0]) / vox_unit)); view_bounds[i * 2 + 1] = std::min((float)(vox_size[i]), std::ceil((range2test[i][1] - vox_range_cam[i * 2 + 0]) / vox_unit + 1)); } } //////////////////////////////////////////////////////////////////////////////// void save_volume_to_ply(const std::string &file_name, int* vox_size, float* vox_tsdf, float* vox_weight) { float tsdf_threshold = 0.2f; float weight_threshold = 1.0f; // float radius = 5.0f; // Count total number of points in point cloud int num_points = 0; for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; i++) if (std::abs(vox_tsdf[i]) < tsdf_threshold && vox_weight[i] > weight_threshold) num_points++; // Create header for ply file FILE *fp = fopen(file_name.c_str(), "w"); fprintf(fp, "ply\n"); fprintf(fp, "format binary_little_endian 1.0\n"); fprintf(fp, "element vertex %d\n", num_points); fprintf(fp, "property float x\n"); fprintf(fp, "property float y\n"); fprintf(fp, "property float z\n"); fprintf(fp, "end_header\n"); // Create point cloud content for ply file for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; i++) { // If TSDF value of voxel is less than some threshold, add voxel coordinates to point cloud if (std::abs(vox_tsdf[i]) < tsdf_threshold && vox_weight[i] > weight_threshold) { // Compute voxel indices in int for higher positive number range int z = floor(i / (vox_size[0] * vox_size[1])); int y = floor((i - (z * vox_size[0] * vox_size[1])) / vox_size[0]); int x = i - (z * vox_size[0] * vox_size[1]) - (y * vox_size[0]); // Convert voxel indices to float, and save coordinates to ply file float float_x = (float) x; float float_y = (float) y; float float_z = (float) z; fwrite(&float_x, sizeof(float), 1, fp); fwrite(&float_y, sizeof(float), 1, fp); fwrite(&float_z, sizeof(float), 1, fp); } } fclose(fp); } bool read_depth_data(const std::string &file_name, unsigned short * data) { png::image< png::gray_pixel_16 > img(file_name.c_str(), png::require_color_space< png::gray_pixel_16 >()); int index = 0; for (int i = 0; i < 480; ++i) { for (int j = 0; j < 640; ++j) { unsigned short s = img.get_pixel(j, i); *(data + index) = s; ++index; } } return true; } //////////////////////////////////////////////////////////////////////////////// __global__ void integrate(float* K, unsigned short* depth_data, float* view_bounds, float* camera_relative_pose, float vox_unit, float vox_mu, int* vox_size, float* vox_range_cam, float* vox_tsdf, float* vox_weight) { int z = blockIdx.x; int y = threadIdx.x; if (z < (int)view_bounds[2 * 2 + 0] || z >= (int)view_bounds[2 * 2 + 1]) return; if (y < (int)view_bounds[1 * 2 + 0] || y >= (int)view_bounds[1 * 2 + 1]) return; for (int x = view_bounds[0 * 2 + 0]; x < view_bounds[0 * 2 + 1]; x++) { // grid to world coords float tmp_pos[3] = {0}; tmp_pos[0] = (x + 1) * vox_unit + vox_range_cam[0 * 2 + 0]; tmp_pos[1] = (y + 1) * vox_unit + vox_range_cam[1 * 2 + 0]; tmp_pos[2] = (z + 1) * vox_unit + vox_range_cam[2 * 2 + 0]; // transform float tmp_arr[3] = {0}; tmp_arr[0] = tmp_pos[0] - camera_relative_pose[3]; tmp_arr[1] = tmp_pos[1] - camera_relative_pose[7]; tmp_arr[2] = tmp_pos[2] - camera_relative_pose[11]; tmp_pos[0] = camera_relative_pose[0 * 4 + 0] * tmp_arr[0] + camera_relative_pose[1 * 4 + 0] * tmp_arr[1] + camera_relative_pose[2 * 4 + 0] * tmp_arr[2]; tmp_pos[1] = camera_relative_pose[0 * 4 + 1] * tmp_arr[0] + camera_relative_pose[1 * 4 + 1] * tmp_arr[1] + camera_relative_pose[2 * 4 + 1] * tmp_arr[2]; tmp_pos[2] = camera_relative_pose[0 * 4 + 2] * tmp_arr[0] + camera_relative_pose[1 * 4 + 2] * tmp_arr[1] + camera_relative_pose[2 * 4 + 2] * tmp_arr[2]; if (tmp_pos[2] <= 0) continue; int px = roundf(K[0] * (tmp_pos[0] / tmp_pos[2]) + K[2]); int py = roundf(K[4] * (tmp_pos[1] / tmp_pos[2]) + K[5]); if (px < 1 || px > 640 || py < 1 || py > 480) continue; float p_depth = *(depth_data + (py - 1) * 640 + (px - 1)) / 1000.f; if (p_depth < 0.2 || p_depth > 0.8) continue; if (roundf(p_depth * 1000.0f) == 0) continue; float eta = (p_depth - tmp_pos[2]) * sqrtf(1 + powf((tmp_pos[0] / tmp_pos[2]), 2) + powf((tmp_pos[1] / tmp_pos[2]), 2)); if (eta <= -vox_mu) continue; // Integrate int volumeIDX = z * vox_size[0] * vox_size[1] + y * vox_size[0] + x; float sdf = fmin(1.0f, eta / vox_mu); float w_old = vox_weight[volumeIDX]; float w_new = w_old + 1.0f; vox_weight[volumeIDX] = w_new; vox_tsdf[volumeIDX] = (vox_tsdf[volumeIDX] * w_old + sdf) / w_new; } } void vol2bin() { // Write data to binary file // std::string volume_filename = "volume.tsdf.bin"; // std::ofstream out_file(volume_filename, std::ios::binary | std::ios::out); // for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; i++) // out_file.write((char*)&vox_tsdf[i], sizeof(float)); // out_file.close(); } void FatalError(const int lineNumber = 0) { std::cerr << "FatalError"; if (lineNumber != 0) std::cerr << " at LINE " << lineNumber; std::cerr << ". Program Terminated." << std::endl; cudaDeviceReset(); exit(EXIT_FAILURE); } void checkCUDA(const int lineNumber, cudaError_t status) { if (status != cudaSuccess) { std::cerr << "CUDA failure at LINE " << lineNumber << ": " << status << std::endl; FatalError(); } } int main(int argc, char **argv) { std::string data_directory = "data"; std::string sequence_directory = data_directory + "/sample"; // Get file list of color images std::vector<std::string> file_list_color; std::string color_regex = ".color.png"; get_files_in_directory(sequence_directory, file_list_color, color_regex); std::sort(file_list_color.begin(), file_list_color.end()); // Get file list of depth images std::vector<std::string> file_list_depth; std::string depth_regex = ".depth.png"; get_files_in_directory(sequence_directory, file_list_depth, depth_regex); std::sort(file_list_depth.begin(), file_list_depth.end()); // Get file list of intrinsics std::vector<std::string> file_list_intrinsics; std::string intrinsics_regex = ".K.txt"; get_files_in_directory(sequence_directory, file_list_intrinsics, intrinsics_regex); std::sort(file_list_intrinsics.begin(), file_list_intrinsics.end()); // Get file list of extrinsics std::vector<std::string> file_list_extrinsics; std::string extrinsics_regex = ".pose.txt"; get_files_in_directory(sequence_directory, file_list_extrinsics, extrinsics_regex); std::sort(file_list_extrinsics.begin(), file_list_extrinsics.end()); // Load intrinsics (3x3 matrix) std::string intrinsic_filename = sequence_directory + "/intrinsics.K.txt"; std::vector<float> K_vec = load_matrix_from_file(intrinsic_filename, 3, 3); float K[9]; for (int i = 0; i < 9; i++) K[i] = K_vec[i]; // Load extrinsics (4x4 matrices) std::vector<std::vector<float>> extrinsics; for (std::string &curr_filename : file_list_extrinsics) { std::string curr_extrinsic_filename = sequence_directory + "/" + curr_filename; std::vector<float> curr_extrinsic = load_matrix_from_file(curr_extrinsic_filename, 4, 4); extrinsics.push_back(curr_extrinsic); } // Init voxel volume params float vox_unit = 0.001; // in meters float vox_mu_grid = 5; float vox_mu = vox_unit * vox_mu_grid; int vox_size[3]; float vox_range_cam[6]; float * vox_tsdf; float * vox_weight; vox_size[0] = 512; vox_size[1] = 512; vox_size[2] = 512; vox_range_cam[0 * 2 + 0] = -(float)(vox_size[0]) * vox_unit / 2; vox_range_cam[0 * 2 + 1] = vox_range_cam[0 * 2 + 0] + (float)(vox_size[0]) * vox_unit; vox_range_cam[1 * 2 + 0] = -(float)(vox_size[1]) * vox_unit / 2; vox_range_cam[1 * 2 + 1] = vox_range_cam[1 * 2 + 0] + (float)(vox_size[1]) * vox_unit; vox_range_cam[2 * 2 + 0] = -50.0f * vox_unit; vox_range_cam[2 * 2 + 1] = vox_range_cam[2 * 2 + 0] + (float)(vox_size[2]) * vox_unit; vox_tsdf = new float[vox_size[0] * vox_size[1] * vox_size[2]]; vox_weight = new float[vox_size[0] * vox_size[1] * vox_size[2]]; memset(vox_weight, 0, sizeof(float) * vox_size[0] * vox_size[1] * vox_size[2]); for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; i++) vox_tsdf[i] = 1.0f; // Copy voxel volume to GPU int * d_vox_size; float * d_vox_tsdf; float * d_vox_weight; cudaMalloc(&d_vox_size, 3 * sizeof(float)); cudaMalloc(&d_vox_tsdf, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float)); cudaMalloc(&d_vox_weight, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float)); checkCUDA(__LINE__, cudaGetLastError()); cudaMemcpy(d_vox_size, vox_size, 3 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_vox_tsdf, vox_tsdf, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_vox_weight, vox_weight, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyHostToDevice); checkCUDA(__LINE__, cudaGetLastError()); // Allocate GPU to hold fusion params float * d_K; unsigned short * d_depth_data; float * d_view_bounds; float * d_camera_relative_pose; float * d_vox_range_cam; cudaMalloc(&d_K, 9 * sizeof(float)); cudaMalloc(&d_depth_data, 480 * 640 * sizeof(unsigned short)); cudaMalloc(&d_view_bounds, 6 * sizeof(float)); cudaMalloc(&d_camera_relative_pose, 16 * sizeof(float)); cudaMalloc(&d_vox_range_cam, 6 * sizeof(float)); checkCUDA(__LINE__, cudaGetLastError()); // Set first frame of sequence as base coordinate frame int base_frame = 0; // Fuse frames for (int curr_frame = 0; curr_frame < file_list_depth.size(); curr_frame++) { std::cerr << "Fusing frame " << curr_frame << "..."; // Load image/depth/extrinsic data for current frame unsigned short * depth_data = (unsigned short *) malloc(480 * 640 * sizeof(unsigned short)); std::string curr_filename = sequence_directory + "/" + file_list_depth[curr_frame]; read_depth_data(curr_filename, depth_data); // Compute relative camera pose transform between current frame and base frame // Compute camera view frustum bounds within the voxel volume float camera_relative_pose[16] = {0}; float view_bounds[6] = {0}; get_frustum_bounds(K, extrinsics, base_frame, curr_frame, camera_relative_pose, view_bounds, vox_unit, vox_size, vox_range_cam); // Copy fusion params to GPU cudaMemcpy(d_K, K, 9 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_depth_data, depth_data, 480 * 640 * sizeof(unsigned short), cudaMemcpyHostToDevice); cudaMemcpy(d_view_bounds, view_bounds, 6 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_camera_relative_pose, camera_relative_pose, 16 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_vox_range_cam, vox_range_cam, 6 * sizeof(float), cudaMemcpyHostToDevice); checkCUDA(__LINE__, cudaGetLastError()); // Integrate // integrateCPU(K, depth_data, view_bounds, camera_relative_pose, // vox_unit, vox_mu, vox_range_cam, vox_tsdf, vox_weight); int CUDA_NUM_BLOCKS = vox_size[2]; int CUDA_NUM_THREADS = vox_size[1]; integrate <<< CUDA_NUM_BLOCKS, CUDA_NUM_THREADS >>>(d_K, d_depth_data, d_view_bounds, d_camera_relative_pose, vox_unit, vox_mu, d_vox_size, d_vox_range_cam, d_vox_tsdf, d_vox_weight); checkCUDA(__LINE__, cudaGetLastError()); // Clear memory free(depth_data); std::cerr << " done!" << std::endl; // If at the last frame, save current TSDF volume to point cloud visualization std::string scene_ply_name = "volume.pointcloud.ply"; if (curr_frame == file_list_depth.size() - 1) { // Copy data back to memory cudaMemcpy(vox_tsdf, d_vox_tsdf, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(vox_weight, d_vox_weight, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyDeviceToHost); checkCUDA(__LINE__, cudaGetLastError()); save_volume_to_ply(scene_ply_name, vox_size, vox_tsdf, vox_weight); } } return 0; }
the_stack
//////////////////////////////////////////////////////////////////////// Full Volume Scan6 enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 6, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y, MAX_LOCAL_POINTS = 3 }; __device__ int global_count = 0; __device__ int output_count; __device__ unsigned int blocks_done = 0; __shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_W[CTA_SIZE * MAX_LOCAL_POINTS]; //////////////////////////////////////////////////////////////////////// Full Volume Scan6 Slice struct FullScan6Slice { PtrStep<short> volume; float3 cell_size; int subsample; int3 voxelWrap; int3 realVoxelWrap; int minX; int maxX; int minY; int maxY; int minZ; int maxZ; int xOffset; int yOffset; mutable PtrSz<PointXYZRGB> output; PtrStep<uchar4> color_volume; __device__ __forceinline__ float fetch (int x, int y, int z, int& weight) const { const short * pos = &volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; float tsdf = unpack_tsdf(*pos); const uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; weight = ptrColor->w; return tsdf; } __device__ __forceinline__ void fetchColor (int x, int y, int z, int & r, int & g, int & b) const { const uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; r = ptrColor->x; g = ptrColor->y; b = ptrColor->z; } __device__ __forceinline__ void operator () () const { int x = (threadIdx.x + blockIdx.x * blockDim.x) + xOffset; int y = (threadIdx.y + blockIdx.y * blockDim.y) + yOffset; if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y)) return; float3 V; V.x = (x + 0.5f) * cell_size.x; V.y = (y + 0.5f) * cell_size.y; int ftid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; for (int z = minZ; z < maxZ; z += subsample) { float4 points[MAX_LOCAL_POINTS]; int local_count = 0; if (x < VOLUME_X && y < VOLUME_Y && x >= minX && x < maxX && y >= minY && y < maxY && x % subsample == 0 && y % subsample == 0) { int W; float F = fetch (x, y, z, W); if (W != 0 && F != 1.f) { V.z = (z + 0.5f) * cell_size.z; //process dx if (x + 1 < VOLUME_X) { int Wn; float Fn = fetch (x + 1, y, z, Wn); if (Wn != 0 && Fn != 1.f) if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) { float4 p; p.y = V.y; p.z = V.z; float Vnx = V.x + cell_size.x; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv; int3 colorsPacked; fetchColor(x + 1, y, z, colorsPacked.x, colorsPacked.y, colorsPacked.z); int rgb = ((int)colorsPacked.x << 24 | (int)colorsPacked.y << 16 | (int)colorsPacked.z << 8 | (int)W); p.w = *reinterpret_cast<float*>(&rgb); points[local_count++] = p; } } /* if (x + 1 < VOLUME_X) */ //process dy if (y + 1 < VOLUME_Y) { int Wn; float Fn = fetch (x, y + 1, z, Wn); if (Wn != 0 && Fn != 1.f) if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) { float4 p; p.x = V.x; p.z = V.z; float Vny = V.y + cell_size.y; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv; int3 colorsPacked; fetchColor(x, y + 1, z, colorsPacked.x, colorsPacked.y, colorsPacked.z); int rgb = ((int)colorsPacked.x << 24 | (int)colorsPacked.y << 16 | (int)colorsPacked.z << 8 | (int)W); p.w = *reinterpret_cast<float*>(&rgb); points[local_count++] = p; } } /* if (y + 1 < VOLUME_Y) */ //process dz //if (z + 1 < VOLUME_Z) // guaranteed by loop { int Wn; float Fn = fetch (x, y, z + 1, Wn); if (Wn != 0 && Fn != 1.f) if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) { float4 p; p.x = V.x; p.y = V.y; float Vnz = V.z + cell_size.z; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv; int3 colorsPacked; fetchColor(x, y, z + 1, colorsPacked.x, colorsPacked.y, colorsPacked.z); int rgb = ((int)colorsPacked.x << 24 | (int)colorsPacked.y << 16 | (int)colorsPacked.z << 8 | (int)W); p.w = *reinterpret_cast<float*>(&rgb); points[local_count++] = p; } } /* if (z + 1 < VOLUME_Z) */ } /* if (W != 0 && F != 1.f) */ } /* if (x < VOLUME_X && y < VOLUME_Y) */ ///not we fulfilled points array at current iteration int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2)); if (total_warp > 0) { int lane = Warp::laneId (); int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; volatile int* cta_buffer = (int*)(storage_X + storage_index); cta_buffer[lane] = local_count; int offset = scan_warp<exclusive>(cta_buffer, lane); if (lane == 0) { int old_global_count = atomicAdd (&global_count, total_warp); cta_buffer[0] = old_global_count; } int old_global_count = cta_buffer[0]; for (int l = 0; l < local_count; ++l) { storage_X[storage_index + offset + l] = points[l].x; storage_Y[storage_index + offset + l] = points[l].y; storage_Z[storage_index + offset + l] = points[l].z; storage_W[storage_index + offset + l] = points[l].w; } PointXYZRGB *pos = output.data + old_global_count + lane; for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE) { float x = storage_X[storage_index + idx]; float y = storage_Y[storage_index + idx]; float z = storage_Z[storage_index + idx]; float w = storage_W[storage_index + idx]; int rgb = *reinterpret_cast<int*>(&w); int r = (rgb >> 24) & 0x0000ff; int g = (rgb >> 16) & 0x0000ff; int b = (rgb >> 8) & 0x0000ff; int weight = (rgb) & 0x0000ff; store_point_type (x, y, z, r, g, b, weight, pos); } bool full = (old_global_count + total_warp) >= output.size; if (full) break; } } /* for(int z = 0; z < VOLUME_Z - 1; ++z) */ ////// // prepare for future scans if (ftid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc (&blocks_done, total_blocks); //last block if (value == total_blocks - 1) { output_count = min ((int)output.size, global_count); blocks_done = 0; global_count = 0; } } } /* operator() */ __device__ __forceinline__ void store_point_type (float x, float y, float z, int r, int g, int b, int w, PointXYZRGB * ptr) const { ptr->x = x + realVoxelWrap.x * cell_size.x - ((cell_size.x * VOLUME_X) / 2); ptr->y = y + realVoxelWrap.y * cell_size.y - ((cell_size.y * VOLUME_Y) / 2); ptr->z = z + realVoxelWrap.z * cell_size.z - ((cell_size.z * VOLUME_Z) / 2); ptr->r = b; ptr->g = g; ptr->b = r; ptr->a = w; } }; __global__ void extractKernelSlice (const FullScan6Slice fs) { fs (); } size_t extractCloudSlice(const PtrStep<short>& volume, const float3& volume_size, PtrSz<PointXYZRGB> output, int3 voxelWrap, PtrStep<uchar4> & color_volume, int minX, int maxX, int minY, int maxY, int minZ, int maxZ, int subsample, int3 realVoxelWrap) { FullScan6Slice fs; fs.volume = volume; fs.cell_size.x = (volume_size.x / VOLUME_X); fs.cell_size.y = (volume_size.y / VOLUME_Y); fs.cell_size.z = (volume_size.z / VOLUME_Z); fs.output = output; fs.subsample = subsample; fs.voxelWrap = voxelWrap; fs.realVoxelWrap = realVoxelWrap; fs.minX = minX; fs.maxX = maxX; fs.minY = minY; fs.maxY = maxY; fs.minZ = minZ; fs.maxZ = maxZ; fs.xOffset = 0; fs.yOffset = 0; fs.color_volume = color_volume; int amountX = maxX - minX; int amountY = maxY - minY; int amountZ = maxZ - minZ; dim3 block (CTA_SIZE_X, CTA_SIZE_Y); bool fullCloud = amountX == VOLUME_X && amountY == VOLUME_Y && amountZ == VOLUME_Z; if(fullCloud) { dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); extractKernelSlice<<<grid, block>>>(fs); } else { int amount = amountZ; bool x = false, y = false; if(amountX < amountY && amountX < amountZ) { fs.xOffset = minX; amount = amountX; x = true; } else if(amountY < amountX && amountY < amountZ) { fs.yOffset = minY; amount = amountY; y = true; } int remainder = amount % 16; if(remainder != 0) { remainder = amount + 16 - remainder; } else { remainder = amount; } dim3 grid (x ? divUp(remainder, block.x) : divUp(VOLUME_X, block.x), y ? divUp(remainder, block.y) : divUp(VOLUME_Y, block.y)); extractKernelSlice<<<grid, block>>>(fs); } cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); int size; cudaSafeCall(cudaMemcpyFromSymbol(&size, output_count, sizeof(size))); return (size_t)(size); }
the_stack
#include "fast_lsh_cumulation_cuda.h" #include "common_cuda_device.h" #include "common_cuda.h" #include "common.h" #include <stdio.h> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void fast_hadamard_transform(float *vector_buffer, int vector_dim, int dim_idx) { int stride = vector_dim / 2; while (stride > (WARP_SIZE / 2)) { __syncthreads(); int sign = 1 - ((dim_idx / stride) % 2) * 2; float val1 = vector_buffer[dim_idx]; float val2 = vector_buffer[dim_idx + sign * stride]; __syncthreads(); vector_buffer[dim_idx] = float(sign) * val1 + val2; stride = stride / 2; } float val = vector_buffer[dim_idx]; #pragma unroll for (stride = (WARP_SIZE / 2); stride > 0; stride = stride / 2) { int sign = 1 - ((dim_idx / stride) % 2) * 2; val = float(sign) * val + __shfl_xor_sync(FULL_MASK, val, stride); } vector_buffer[dim_idx] = val; } __global__ void fast_hash_ver1_cuda_kernel( int *mask, // [batch_size, num_vector] float *vector, // [batch_size, num_vector, vector_dim] int *Dmat, // [batch_size, 3, num_part, vector_dim] int *hash_code, // [batch_size, num_vector, num_hash_f] int batch_size, int num_vector, int vector_dim, int num_part, int num_hash_f, int hash_code_len ) { int batch_idx = blockIdx.z; int vector_idx = blockIdx.y; int part_idx = blockIdx.x; int dim_idx = threadIdx.x; int batch_idx__vector_idx = batch_idx * num_vector + vector_idx; if (mask[batch_idx__vector_idx] == 0) { return; } extern __shared__ float buffer[]; float *vector_buffer = buffer; vector_buffer[dim_idx] = vector[batch_idx__vector_idx * vector_dim + dim_idx]; vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 0) * num_part + part_idx) * vector_dim + dim_idx]; fast_hadamard_transform(vector_buffer, vector_dim, dim_idx); vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 1) * num_part + part_idx) * vector_dim + dim_idx]; fast_hadamard_transform(vector_buffer, vector_dim, dim_idx); vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 2) * num_part + part_idx) * vector_dim + dim_idx]; fast_hadamard_transform(vector_buffer, vector_dim, dim_idx); int num_hash_per_part = vector_dim / hash_code_len; if (hash_code_len == 8 || hash_code_len == 16) { int code = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0); for (int offset = 1; offset < hash_code_len; offset = offset * 2) { code += __shfl_xor_sync(FULL_MASK, code, offset); } if (dim_idx % hash_code_len == 0) { int hash_f_idx = part_idx * num_hash_per_part + dim_idx / hash_code_len; if (hash_f_idx < num_hash_f) { hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code; } } } else { vector_buffer[dim_idx] = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0); __syncthreads(); if (dim_idx < num_hash_per_part) { int code = 0; for (int i = 0; i < hash_code_len; i++) { code += vector_buffer[dim_idx * hash_code_len + i]; } int hash_f_idx = part_idx * num_hash_per_part + dim_idx; if (hash_f_idx < num_hash_f) { hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code; } } } } __global__ void lsh_cumulation_ver1_step1_cuda_kernel( int *key_mask, // [batch_size, num_key] int *key_hash_code, // [batch_size, num_key, num_hash_f] float *value, // [batch_size, num_key, value_dim] float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] int batch_size, int num_hash_f, int hashtable_capacity, int num_key, int value_dim, int offset_warp ) { int warp_thread_idx = threadIdx.x; int batch_idx = blockIdx.y; int key_idx = blockIdx.x * blockDim.y + threadIdx.y; int batch_idx__key_idx = batch_idx * num_key + key_idx; if (key_mask[batch_idx__key_idx] == 0) { return; } if (num_hash_f > WARP_SIZE) { float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx]; #pragma unroll for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); } } } else { float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; int warp_hashcode = 0; if (warp_thread_idx < num_hash_f) { warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx]; } for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); } } } __global__ void lsh_cumulation_ver1_step2_cuda_kernel( int *query_mask, // [batch_size, num_query] int *query_hash_code, // [batch_size, num_query, num_hash_f] float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] float *cumulation_value, // [batch_size, num_query, value_dim] int batch_size, int num_hash_f, int hashtable_capacity, int num_query, int value_dim, int offset_warp ) { int warp_thread_idx = threadIdx.x; int batch_idx = blockIdx.y; int query_idx = blockIdx.x * blockDim.y + threadIdx.y; int batch_idx__query_idx = batch_idx * num_query + query_idx; if (query_mask[batch_idx__query_idx] == 0) { return; } if (num_hash_f > WARP_SIZE) { float warp_value = 0; for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx]; #pragma unroll for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; } } cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f); } else { float warp_value = 0; int warp_hashcode = 0; if (warp_thread_idx < num_hash_f) { warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx]; } for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; } cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f); } } __global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel( int *key_mask, // [batch_size, num_key] int *key_hash_code, // [batch_size, num_key, num_hash_f] float *key_weight, // [batch_size, num_key, weight_dim] float *value, // [batch_size, num_key, value_dim] float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] int batch_size, int num_hash_f, int hashtable_capacity, int num_key, int value_dim, int weight_dim, int offset_warp, int weight_idx ) { int warp_thread_idx = threadIdx.x; int batch_idx = blockIdx.y; int key_idx = blockIdx.x * blockDim.y + threadIdx.y; int batch_idx__key_idx = batch_idx * num_key + key_idx; if (key_mask[batch_idx__key_idx] == 0) { return; } if (num_hash_f > WARP_SIZE) { float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx]; #pragma unroll for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); } } } else { float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; int warp_hashcode = 0; if (warp_thread_idx < num_hash_f) { warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx]; } for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); } } } __global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel( int *query_mask, // [batch_size, num_query] int *query_hash_code, // [batch_size, num_query, num_hash_f] float *query_weight, // [batch_size, num_query, weight_dim] float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] float *cumulation_value, // [batch_size, num_query, value_dim] int batch_size, int num_hash_f, int hashtable_capacity, int num_query, int value_dim, int weight_dim, int offset_warp, int weight_idx ) { int warp_thread_idx = threadIdx.x; int batch_idx = blockIdx.y; int query_idx = blockIdx.x * blockDim.y + threadIdx.y; int batch_idx__query_idx = batch_idx * num_query + query_idx; if (query_mask[batch_idx__query_idx] == 0) { return; } if (num_hash_f > WARP_SIZE) { float warp_value = 0; for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx]; #pragma unroll for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; } } float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx]; cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f); } else { float warp_value = 0; int warp_hashcode = 0; if (warp_thread_idx < num_hash_f) { warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx]; } for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { int current_hashcode = warp_hashcode; current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; } float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx]; cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f); } } __global__ void count_sort_step1_cuda_kernel( int *key_mask, // [batch_size, num_key] int *key_hash_code, // [batch_size, num_key, num_hash_f] int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] int batch_size, int num_hash_f, int hashtable_capacity, int num_key ) { int batch_idx = blockIdx.y; int key_idx = blockIdx.x * blockDim.y + threadIdx.y; int hash_f_idx = threadIdx.x; int batch_idx__key_idx = batch_idx * num_key + key_idx; if (key_mask[batch_idx__key_idx] == 0) { return; } int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx]; atomicAdd(&count_sort_table[(batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code], 1); } __global__ void count_sort_step2_cuda_kernel( int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] int batch_size, int num_hash_f, int hashtable_capacity ) { int batch_idx = blockIdx.y; int hash_f_idx = blockIdx.x; int num_threads = blockDim.x; int thread_id = threadIdx.x; int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx; extern __shared__ float buffer[]; int *table_buffer = (int*)buffer; if (thread_id == 0) { table_buffer[0] = 0; } copy_data<int>(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], &table_buffer[1], hashtable_capacity - 1, num_threads, thread_id); for (int table_idx_start = 0; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + num_threads) { int thread_value = table_buffer[table_idx_start + thread_id]; int next_thread_value = 0; for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { next_thread_value = __shfl_up_sync(FULL_MASK, thread_value, offset); if (thread_id % WARP_SIZE >= offset) { thread_value = thread_value + next_thread_value; } } table_buffer[table_idx_start + thread_id] = thread_value; } __syncthreads(); if (hashtable_capacity > WARP_SIZE) { if (thread_id < WARP_SIZE) { for (int table_idx_start = WARP_SIZE; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + WARP_SIZE) { table_buffer[table_idx_start + thread_id] += table_buffer[table_idx_start - 1]; } } } copy_data<int>(table_buffer, &count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], hashtable_capacity, num_threads, thread_id); } __global__ void count_sort_step3_cuda_kernel( int *key_mask, // [batch_size, num_key] int *key_hash_code, // [batch_size, num_key, num_hash_f] int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] int batch_size, int num_hash_f, int hashtable_capacity, int num_key ) { int batch_idx = blockIdx.y; int key_idx = blockIdx.x * blockDim.y + threadIdx.y; int hash_f_idx = threadIdx.x; int batch_idx__key_idx = batch_idx * num_key + key_idx; if (key_mask[batch_idx__key_idx] == 0) { return; } int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx; int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx]; int sort_idx = atomicAdd(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity + hash_code], 1); key_sorted_idxes[batch_idx__hash_f_idx * num_key + sort_idx] = key_idx; } __global__ void extract_query_info_cuda_kernel( int *query_mask, // [batch_size, num_query] int *query_hash_code, // [batch_size, num_query, num_hash_f] int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] int *query_info, // [batch_size, num_query, 2, num_hash_f] int batch_size, int num_hash_f, int hashtable_capacity, int num_query ) { int batch_idx = blockIdx.y; int query_idx = blockIdx.x * blockDim.y + threadIdx.y; int hash_f_idx = threadIdx.x; int batch_idx__query_idx = batch_idx * num_query + query_idx; if (query_mask[batch_idx__query_idx] == 0) { return; } int hash_code = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_idx]; int batch_idx__hash_f_idx__hash_code = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code; int key_offset = select(hash_code == 0, 0, count_sort_table[batch_idx__hash_f_idx__hash_code - 1]); int key_count = count_sort_table[batch_idx__hash_f_idx__hash_code] - key_offset; query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx] = key_offset; query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx] = key_count; } __global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel( int *query_mask, // [batch_size, num_query] int *query_info, // [batch_size, num_query, 2, num_hash_f] int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] float *query_weight, // [batch_size, num_query, weight_dim] float *key_weight, // [batch_size, num_key, weight_dim] float *value, // [batch_size, num_key, value_dim] float *cumulation_value, // [batch_size, num_query, value_dim] int batch_size, int num_hash_f, int num_query, int num_key, int value_dim, int weight_dim ) { int batch_idx = blockIdx.z; int hash_f_idx = blockIdx.y; int query_idx = blockIdx.x; int num_threads = blockDim.y * blockDim.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int num_warps = blockDim.y; int warp_idx = threadIdx.y; int warp_thread_idx = threadIdx.x; int batch_idx__query_idx = batch_idx * num_query + query_idx; if (query_mask[batch_idx__query_idx] == 0) { return; } int key_offset = query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx]; int key_count = query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx]; if (key_count == 0) { return; } extern __shared__ float buffer[]; if (key_count == 1) { if (warp_idx == 0) { int key_idx = key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset]; int batch_idx__key_idx = batch_idx * num_key + key_idx; float weight = 0; for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { int weight_dim_idx = weight_offset + warp_thread_idx; float val = query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx]; #pragma unroll for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { val += __shfl_xor_sync(FULL_MASK, val, offset); } weight = weight + val; } weight = weight / float(num_hash_f); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { int value_dim_idx = value_offset + warp_thread_idx; float val = value[batch_idx__key_idx * value_dim + value_dim_idx]; atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); } } } else { float *weight_buffer = buffer; int *key_idxes_buffer = (int*)&buffer[weight_dim]; copy_data_nonblocking<float>(&query_weight[batch_idx__query_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id); while (key_count > 0) { int work_size = min(WARP_SIZE, key_count); copy_data_nonblocking<int>(&key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset], key_idxes_buffer, work_size, num_threads, thread_id); __syncthreads(); for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) { int work_idx = work_offset + warp_idx; if (work_idx < key_count) { int key_idx = key_idxes_buffer[work_idx]; int batch_idx__key_idx = batch_idx * num_key + key_idx; float weight = 0; for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { int weight_dim_idx = weight_offset + warp_thread_idx; float val = weight_buffer[weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx]; #pragma unroll for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { val += __shfl_xor_sync(FULL_MASK, val, offset); } weight = weight + val; } weight = weight / float(num_hash_f); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { int value_dim_idx = value_offset + warp_thread_idx; float val = value[batch_idx__key_idx * value_dim + value_dim_idx]; atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); } } } key_count = key_count - work_size; key_offset = key_offset + work_size; } } } __global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel( int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] int *key_mask, // [batch_size, num_key] int *key_info, // [batch_size, num_key, 2, num_hash_f] float *query_weight, // [batch_size, num_query, weight_dim] float *key_weight, // [batch_size, num_key, weight_dim] float *value, // [batch_size, num_key, value_dim] float *cumulation_value, // [batch_size, num_query, value_dim] int batch_size, int num_hash_f, int num_query, int num_key, int value_dim, int weight_dim ) { int batch_idx = blockIdx.z; int hash_f_idx = blockIdx.y; int key_idx = blockIdx.x; int num_threads = blockDim.y * blockDim.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int num_warps = blockDim.y; int warp_idx = threadIdx.y; int warp_thread_idx = threadIdx.x; int batch_idx__key_idx = batch_idx * num_key + key_idx; if (key_mask[batch_idx__key_idx] == 0) { return; } int query_offset = key_info[batch_idx__key_idx * 2 * num_hash_f + hash_f_idx]; int query_count = key_info[(batch_idx__key_idx * 2 + 1) * num_hash_f + hash_f_idx]; if (query_count == 0) { return; } extern __shared__ float buffer[]; if (query_count == 1) { if (warp_idx == 0) { int query_idx = query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset]; int batch_idx__query_idx = batch_idx * num_query + query_idx; float weight = 0; for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { int weight_dim_idx = weight_offset + warp_thread_idx; float val = key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx]; #pragma unroll for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { val += __shfl_xor_sync(FULL_MASK, val, offset); } weight = weight + val; } weight = weight / float(num_hash_f); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { int value_dim_idx = value_offset + warp_thread_idx; float val = value[batch_idx__key_idx * value_dim + value_dim_idx]; atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); } } } else { float *weight_buffer = buffer; float *value_buffer = &buffer[weight_dim]; int *query_idxes_buffer = (int*)&buffer[weight_dim + value_dim]; copy_data_nonblocking<float>(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id); copy_data_nonblocking<float>(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id); while (query_count > 0) { int work_size = min(WARP_SIZE, query_count); copy_data_nonblocking<int>(&query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset], query_idxes_buffer, work_size, num_threads, thread_id); __syncthreads(); for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) { int work_idx = work_offset + warp_idx; if (work_idx < query_count) { int query_idx = query_idxes_buffer[work_idx]; int batch_idx__query_idx = batch_idx * num_query + query_idx; float weight = 0; for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { int weight_dim_idx = weight_offset + warp_thread_idx; float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx]; #pragma unroll for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { val += __shfl_xor_sync(FULL_MASK, val, offset); } weight = weight + val; } weight = weight / float(num_hash_f); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { int value_dim_idx = value_offset + warp_thread_idx; float val = value_buffer[value_dim_idx]; atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); } } } query_count = query_count - work_size; query_offset = query_offset + work_size; } } } __global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel( int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] int *key_mask, // [batch_size, num_key] int *key_info, // [batch_size, num_key, 2, num_hash_f] float *query_weight, // [batch_size, num_query, weight_dim] float *key_weight, // [batch_size, num_key, weight_dim] float *value, // [batch_size, num_key, value_dim] float *cumulation_value, // [batch_size, num_query, value_dim] int batch_size, int num_hash_f, int num_query, int num_key, int value_dim, int weight_dim ) { int batch_idx = blockIdx.y; int key_idx = blockIdx.x; int num_threads = blockDim.y * blockDim.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int num_warps = blockDim.y; int warp_idx = threadIdx.y; int warp_thread_idx = threadIdx.x; int batch_idx__key_idx = batch_idx * num_key + key_idx; if (key_mask[batch_idx__key_idx] == 0) { return; } extern __shared__ float buffer[]; float *weight_buffer = buffer; float *value_buffer = &buffer[weight_dim]; int *key_info_buffer = (int*)&buffer[weight_dim + value_dim]; copy_data_nonblocking<float>(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id); copy_data_nonblocking<float>(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id); copy_data_nonblocking<int>(&key_info[batch_idx__key_idx * 2 * num_hash_f], key_info_buffer, 2 * num_hash_f, num_threads, thread_id); int *query_offset_buffer = key_info_buffer; int *query_count_buffer = &key_info_buffer[num_hash_f]; const int hashtable_size = 1024 + OPTIMAL_THREADS_PER_BLOCK; __shared__ int hashtable_query[hashtable_size]; __shared__ int hashtable_count[hashtable_size]; __shared__ int inserted_query[hashtable_size]; __shared__ int query_counter[1]; int hash_f_idx_base = 0; while (true) { init_buffer_nonblocking<int>(EMPTY_VALUE, hashtable_query, hashtable_size, num_threads, thread_id); init_buffer_nonblocking<int>(0, hashtable_count, hashtable_size, num_threads, thread_id); init_buffer_nonblocking<int>(EMPTY_VALUE, inserted_query, hashtable_size, num_threads, thread_id); init_buffer_nonblocking<int>(0, query_counter, 1, num_threads, thread_id); __syncthreads(); while (hash_f_idx_base < num_hash_f) { int hash_f_idx = hash_f_idx_base + warp_idx; int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx; int stop_flag = 0; int query_offset = query_offset_buffer[hash_f_idx]; int query_count = query_count_buffer[hash_f_idx]; while (query_count > 0) { int work_size = min(query_count, WARP_SIZE); // try inserting query to set and check whether the query is new int found_new_query = 0; int query_idx = -1; if (warp_thread_idx < work_size) { query_idx = query_sorted_idxes[batch_idx__hash_f_idx * num_query + query_offset + warp_thread_idx]; int slot = set_insert<int>(hashtable_query, hashtable_size, query_idx); if (slot >= 0) { found_new_query = atomicAdd(&hashtable_count[slot], 1) == 0; } } // compute cumulative offset int position_offset = found_new_query; int next_position_offset = 0; #pragma unroll for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { next_position_offset = __shfl_up_sync(FULL_MASK, position_offset, offset); if (thread_id % WARP_SIZE >= offset) { position_offset = position_offset + next_position_offset; } } // get the inserted query list end index int inserted_query_base = 0; if (thread_id % WARP_SIZE == WARP_SIZE - 1) { inserted_query_base = atomicAdd(query_counter, position_offset); } inserted_query_base = __shfl_sync(FULL_MASK, inserted_query_base, WARP_SIZE - 1); // insert new queries to list int insert_idx = inserted_query_base + position_offset - 1; if (found_new_query) { inserted_query[insert_idx] = query_idx; } // remove inserted queries from list query_offset_buffer[hash_f_idx] += work_size; query_count_buffer[hash_f_idx] -= work_size; query_offset += work_size; query_count -= work_size; // if list is almost full, stop inserting if (inserted_query_base + OPTIMAL_THREADS_PER_BLOCK > hashtable_size) { stop_flag = 1; break; } } if (stop_flag) { break; } hash_f_idx_base = hash_f_idx_base + num_warps; } __syncthreads(); int num_distint_query = query_counter[0]; if (num_distint_query > 0) { for (int idx_base = 0; idx_base < num_distint_query; idx_base = idx_base + num_warps) { int idx = idx_base + warp_idx; if (idx < num_distint_query) { int query_idx = inserted_query[idx]; int batch_idx__query_idx = batch_idx * num_query + query_idx; int slot = set_lookup<int>(hashtable_query, hashtable_size, query_idx); int duplicate_count = hashtable_count[slot]; float weight = 0; for (int weight_idx_base = 0; weight_idx_base < weight_dim; weight_idx_base = weight_idx_base + WARP_SIZE) { int weight_dim_idx = weight_idx_base + warp_thread_idx; float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx]; #pragma unroll for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { val += __shfl_xor_sync(FULL_MASK, val, offset); } weight = weight + val; } weight = (float)duplicate_count * weight / float(num_hash_f); for (int value_idx_base = 0; value_idx_base < value_dim; value_idx_base = value_idx_base + WARP_SIZE) { int value_dim_idx = value_idx_base + warp_thread_idx; float val = value_buffer[value_dim_idx]; atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); } } } } else { // all computation is completed if num_distint_query == 0 break; } __syncthreads(); } }
the_stack
// store allocated textures device addresses static unsigned int* d_textures[MAX_GPUS][4]; #define mixtab0(x) (*((uint32_t*)mixtabs + ( (x)))) #define mixtab1(x) (*((uint32_t*)mixtabs + (256+(x)))) #define mixtab2(x) (*((uint32_t*)mixtabs + (512+(x)))) #define mixtab3(x) (*((uint32_t*)mixtabs + (768+(x)))) static texture<unsigned int, 1, cudaReadModeElementType> mixTab0Tex; static texture<unsigned int, 1, cudaReadModeElementType> mixTab1Tex; static texture<unsigned int, 1, cudaReadModeElementType> mixTab2Tex; static texture<unsigned int, 1, cudaReadModeElementType> mixTab3Tex; static const uint32_t mixtab0_cpu[] = { SPH_C32(0x63633297), SPH_C32(0x7c7c6feb), SPH_C32(0x77775ec7), SPH_C32(0x7b7b7af7), SPH_C32(0xf2f2e8e5), SPH_C32(0x6b6b0ab7), SPH_C32(0x6f6f16a7), SPH_C32(0xc5c56d39), SPH_C32(0x303090c0), SPH_C32(0x01010704), SPH_C32(0x67672e87), SPH_C32(0x2b2bd1ac), SPH_C32(0xfefeccd5), SPH_C32(0xd7d71371), SPH_C32(0xabab7c9a), SPH_C32(0x767659c3), SPH_C32(0xcaca4005), SPH_C32(0x8282a33e), SPH_C32(0xc9c94909), SPH_C32(0x7d7d68ef), SPH_C32(0xfafad0c5), SPH_C32(0x5959947f), SPH_C32(0x4747ce07), SPH_C32(0xf0f0e6ed), SPH_C32(0xadad6e82), SPH_C32(0xd4d41a7d), SPH_C32(0xa2a243be), SPH_C32(0xafaf608a), SPH_C32(0x9c9cf946), SPH_C32(0xa4a451a6), SPH_C32(0x727245d3), SPH_C32(0xc0c0762d), SPH_C32(0xb7b728ea), SPH_C32(0xfdfdc5d9), SPH_C32(0x9393d47a), SPH_C32(0x2626f298), SPH_C32(0x363682d8), SPH_C32(0x3f3fbdfc), SPH_C32(0xf7f7f3f1), SPH_C32(0xcccc521d), SPH_C32(0x34348cd0), SPH_C32(0xa5a556a2), SPH_C32(0xe5e58db9), SPH_C32(0xf1f1e1e9), SPH_C32(0x71714cdf), SPH_C32(0xd8d83e4d), SPH_C32(0x313197c4), SPH_C32(0x15156b54), SPH_C32(0x04041c10), SPH_C32(0xc7c76331), SPH_C32(0x2323e98c), SPH_C32(0xc3c37f21), SPH_C32(0x18184860), SPH_C32(0x9696cf6e), SPH_C32(0x05051b14), SPH_C32(0x9a9aeb5e), SPH_C32(0x0707151c), SPH_C32(0x12127e48), SPH_C32(0x8080ad36), SPH_C32(0xe2e298a5), SPH_C32(0xebeba781), SPH_C32(0x2727f59c), SPH_C32(0xb2b233fe), SPH_C32(0x757550cf), SPH_C32(0x09093f24), SPH_C32(0x8383a43a), SPH_C32(0x2c2cc4b0), SPH_C32(0x1a1a4668), SPH_C32(0x1b1b416c), SPH_C32(0x6e6e11a3), SPH_C32(0x5a5a9d73), SPH_C32(0xa0a04db6), SPH_C32(0x5252a553), SPH_C32(0x3b3ba1ec), SPH_C32(0xd6d61475), SPH_C32(0xb3b334fa), SPH_C32(0x2929dfa4), SPH_C32(0xe3e39fa1), SPH_C32(0x2f2fcdbc), SPH_C32(0x8484b126), SPH_C32(0x5353a257), SPH_C32(0xd1d10169), SPH_C32(0x00000000), SPH_C32(0xededb599), SPH_C32(0x2020e080), SPH_C32(0xfcfcc2dd), SPH_C32(0xb1b13af2), SPH_C32(0x5b5b9a77), SPH_C32(0x6a6a0db3), SPH_C32(0xcbcb4701), SPH_C32(0xbebe17ce), SPH_C32(0x3939afe4), SPH_C32(0x4a4aed33), SPH_C32(0x4c4cff2b), SPH_C32(0x5858937b), SPH_C32(0xcfcf5b11), SPH_C32(0xd0d0066d), SPH_C32(0xefefbb91), SPH_C32(0xaaaa7b9e), SPH_C32(0xfbfbd7c1), SPH_C32(0x4343d217), SPH_C32(0x4d4df82f), SPH_C32(0x333399cc), SPH_C32(0x8585b622), SPH_C32(0x4545c00f), SPH_C32(0xf9f9d9c9), SPH_C32(0x02020e08), SPH_C32(0x7f7f66e7), SPH_C32(0x5050ab5b), SPH_C32(0x3c3cb4f0), SPH_C32(0x9f9ff04a), SPH_C32(0xa8a87596), SPH_C32(0x5151ac5f), SPH_C32(0xa3a344ba), SPH_C32(0x4040db1b), SPH_C32(0x8f8f800a), SPH_C32(0x9292d37e), SPH_C32(0x9d9dfe42), SPH_C32(0x3838a8e0), SPH_C32(0xf5f5fdf9), SPH_C32(0xbcbc19c6), SPH_C32(0xb6b62fee), SPH_C32(0xdada3045), SPH_C32(0x2121e784), SPH_C32(0x10107040), SPH_C32(0xffffcbd1), SPH_C32(0xf3f3efe1), SPH_C32(0xd2d20865), SPH_C32(0xcdcd5519), SPH_C32(0x0c0c2430), SPH_C32(0x1313794c), SPH_C32(0xececb29d), SPH_C32(0x5f5f8667), SPH_C32(0x9797c86a), SPH_C32(0x4444c70b), SPH_C32(0x1717655c), SPH_C32(0xc4c46a3d), SPH_C32(0xa7a758aa), SPH_C32(0x7e7e61e3), SPH_C32(0x3d3db3f4), SPH_C32(0x6464278b), SPH_C32(0x5d5d886f), SPH_C32(0x19194f64), SPH_C32(0x737342d7), SPH_C32(0x60603b9b), SPH_C32(0x8181aa32), SPH_C32(0x4f4ff627), SPH_C32(0xdcdc225d), SPH_C32(0x2222ee88), SPH_C32(0x2a2ad6a8), SPH_C32(0x9090dd76), SPH_C32(0x88889516), SPH_C32(0x4646c903), SPH_C32(0xeeeebc95), SPH_C32(0xb8b805d6), SPH_C32(0x14146c50), SPH_C32(0xdede2c55), SPH_C32(0x5e5e8163), SPH_C32(0x0b0b312c), SPH_C32(0xdbdb3741), SPH_C32(0xe0e096ad), SPH_C32(0x32329ec8), SPH_C32(0x3a3aa6e8), SPH_C32(0x0a0a3628), SPH_C32(0x4949e43f), SPH_C32(0x06061218), SPH_C32(0x2424fc90), SPH_C32(0x5c5c8f6b), SPH_C32(0xc2c27825), SPH_C32(0xd3d30f61), SPH_C32(0xacac6986), SPH_C32(0x62623593), SPH_C32(0x9191da72), SPH_C32(0x9595c662), SPH_C32(0xe4e48abd), SPH_C32(0x797974ff), SPH_C32(0xe7e783b1), SPH_C32(0xc8c84e0d), SPH_C32(0x373785dc), SPH_C32(0x6d6d18af), SPH_C32(0x8d8d8e02), SPH_C32(0xd5d51d79), SPH_C32(0x4e4ef123), SPH_C32(0xa9a97292), SPH_C32(0x6c6c1fab), SPH_C32(0x5656b943), SPH_C32(0xf4f4fafd), SPH_C32(0xeaeaa085), SPH_C32(0x6565208f), SPH_C32(0x7a7a7df3), SPH_C32(0xaeae678e), SPH_C32(0x08083820), SPH_C32(0xbaba0bde), SPH_C32(0x787873fb), SPH_C32(0x2525fb94), SPH_C32(0x2e2ecab8), SPH_C32(0x1c1c5470), SPH_C32(0xa6a65fae), SPH_C32(0xb4b421e6), SPH_C32(0xc6c66435), SPH_C32(0xe8e8ae8d), SPH_C32(0xdddd2559), SPH_C32(0x747457cb), SPH_C32(0x1f1f5d7c), SPH_C32(0x4b4bea37), SPH_C32(0xbdbd1ec2), SPH_C32(0x8b8b9c1a), SPH_C32(0x8a8a9b1e), SPH_C32(0x70704bdb), SPH_C32(0x3e3ebaf8), SPH_C32(0xb5b526e2), SPH_C32(0x66662983), SPH_C32(0x4848e33b), SPH_C32(0x0303090c), SPH_C32(0xf6f6f4f5), SPH_C32(0x0e0e2a38), SPH_C32(0x61613c9f), SPH_C32(0x35358bd4), SPH_C32(0x5757be47), SPH_C32(0xb9b902d2), SPH_C32(0x8686bf2e), SPH_C32(0xc1c17129), SPH_C32(0x1d1d5374), SPH_C32(0x9e9ef74e), SPH_C32(0xe1e191a9), SPH_C32(0xf8f8decd), SPH_C32(0x9898e556), SPH_C32(0x11117744), SPH_C32(0x696904bf), SPH_C32(0xd9d93949), SPH_C32(0x8e8e870e), SPH_C32(0x9494c166), SPH_C32(0x9b9bec5a), SPH_C32(0x1e1e5a78), SPH_C32(0x8787b82a), SPH_C32(0xe9e9a989), SPH_C32(0xcece5c15), SPH_C32(0x5555b04f), SPH_C32(0x2828d8a0), SPH_C32(0xdfdf2b51), SPH_C32(0x8c8c8906), SPH_C32(0xa1a14ab2), SPH_C32(0x89899212), SPH_C32(0x0d0d2334), SPH_C32(0xbfbf10ca), SPH_C32(0xe6e684b5), SPH_C32(0x4242d513), SPH_C32(0x686803bb), SPH_C32(0x4141dc1f), SPH_C32(0x9999e252), SPH_C32(0x2d2dc3b4), SPH_C32(0x0f0f2d3c), SPH_C32(0xb0b03df6), SPH_C32(0x5454b74b), SPH_C32(0xbbbb0cda), SPH_C32(0x16166258) }; static const uint32_t mixtab1_cpu[] = { SPH_C32(0x97636332), SPH_C32(0xeb7c7c6f), SPH_C32(0xc777775e), SPH_C32(0xf77b7b7a), SPH_C32(0xe5f2f2e8), SPH_C32(0xb76b6b0a), SPH_C32(0xa76f6f16), SPH_C32(0x39c5c56d), SPH_C32(0xc0303090), SPH_C32(0x04010107), SPH_C32(0x8767672e), SPH_C32(0xac2b2bd1), SPH_C32(0xd5fefecc), SPH_C32(0x71d7d713), SPH_C32(0x9aabab7c), SPH_C32(0xc3767659), SPH_C32(0x05caca40), SPH_C32(0x3e8282a3), SPH_C32(0x09c9c949), SPH_C32(0xef7d7d68), SPH_C32(0xc5fafad0), SPH_C32(0x7f595994), SPH_C32(0x074747ce), SPH_C32(0xedf0f0e6), SPH_C32(0x82adad6e), SPH_C32(0x7dd4d41a), SPH_C32(0xbea2a243), SPH_C32(0x8aafaf60), SPH_C32(0x469c9cf9), SPH_C32(0xa6a4a451), SPH_C32(0xd3727245), SPH_C32(0x2dc0c076), SPH_C32(0xeab7b728), SPH_C32(0xd9fdfdc5), SPH_C32(0x7a9393d4), SPH_C32(0x982626f2), SPH_C32(0xd8363682), SPH_C32(0xfc3f3fbd), SPH_C32(0xf1f7f7f3), SPH_C32(0x1dcccc52), SPH_C32(0xd034348c), SPH_C32(0xa2a5a556), SPH_C32(0xb9e5e58d), SPH_C32(0xe9f1f1e1), SPH_C32(0xdf71714c), SPH_C32(0x4dd8d83e), SPH_C32(0xc4313197), SPH_C32(0x5415156b), SPH_C32(0x1004041c), SPH_C32(0x31c7c763), SPH_C32(0x8c2323e9), SPH_C32(0x21c3c37f), SPH_C32(0x60181848), SPH_C32(0x6e9696cf), SPH_C32(0x1405051b), SPH_C32(0x5e9a9aeb), SPH_C32(0x1c070715), SPH_C32(0x4812127e), SPH_C32(0x368080ad), SPH_C32(0xa5e2e298), SPH_C32(0x81ebeba7), SPH_C32(0x9c2727f5), SPH_C32(0xfeb2b233), SPH_C32(0xcf757550), SPH_C32(0x2409093f), SPH_C32(0x3a8383a4), SPH_C32(0xb02c2cc4), SPH_C32(0x681a1a46), SPH_C32(0x6c1b1b41), SPH_C32(0xa36e6e11), SPH_C32(0x735a5a9d), SPH_C32(0xb6a0a04d), SPH_C32(0x535252a5), SPH_C32(0xec3b3ba1), SPH_C32(0x75d6d614), SPH_C32(0xfab3b334), SPH_C32(0xa42929df), SPH_C32(0xa1e3e39f), SPH_C32(0xbc2f2fcd), SPH_C32(0x268484b1), SPH_C32(0x575353a2), SPH_C32(0x69d1d101), SPH_C32(0x00000000), SPH_C32(0x99ededb5), SPH_C32(0x802020e0), SPH_C32(0xddfcfcc2), SPH_C32(0xf2b1b13a), SPH_C32(0x775b5b9a), SPH_C32(0xb36a6a0d), SPH_C32(0x01cbcb47), SPH_C32(0xcebebe17), SPH_C32(0xe43939af), SPH_C32(0x334a4aed), SPH_C32(0x2b4c4cff), SPH_C32(0x7b585893), SPH_C32(0x11cfcf5b), SPH_C32(0x6dd0d006), SPH_C32(0x91efefbb), SPH_C32(0x9eaaaa7b), SPH_C32(0xc1fbfbd7), SPH_C32(0x174343d2), SPH_C32(0x2f4d4df8), SPH_C32(0xcc333399), SPH_C32(0x228585b6), SPH_C32(0x0f4545c0), SPH_C32(0xc9f9f9d9), SPH_C32(0x0802020e), SPH_C32(0xe77f7f66), SPH_C32(0x5b5050ab), SPH_C32(0xf03c3cb4), SPH_C32(0x4a9f9ff0), SPH_C32(0x96a8a875), SPH_C32(0x5f5151ac), SPH_C32(0xbaa3a344), SPH_C32(0x1b4040db), SPH_C32(0x0a8f8f80), SPH_C32(0x7e9292d3), SPH_C32(0x429d9dfe), SPH_C32(0xe03838a8), SPH_C32(0xf9f5f5fd), SPH_C32(0xc6bcbc19), SPH_C32(0xeeb6b62f), SPH_C32(0x45dada30), SPH_C32(0x842121e7), SPH_C32(0x40101070), SPH_C32(0xd1ffffcb), SPH_C32(0xe1f3f3ef), SPH_C32(0x65d2d208), SPH_C32(0x19cdcd55), SPH_C32(0x300c0c24), SPH_C32(0x4c131379), SPH_C32(0x9dececb2), SPH_C32(0x675f5f86), SPH_C32(0x6a9797c8), SPH_C32(0x0b4444c7), SPH_C32(0x5c171765), SPH_C32(0x3dc4c46a), SPH_C32(0xaaa7a758), SPH_C32(0xe37e7e61), SPH_C32(0xf43d3db3), SPH_C32(0x8b646427), SPH_C32(0x6f5d5d88), SPH_C32(0x6419194f), SPH_C32(0xd7737342), SPH_C32(0x9b60603b), SPH_C32(0x328181aa), SPH_C32(0x274f4ff6), SPH_C32(0x5ddcdc22), SPH_C32(0x882222ee), SPH_C32(0xa82a2ad6), SPH_C32(0x769090dd), SPH_C32(0x16888895), SPH_C32(0x034646c9), SPH_C32(0x95eeeebc), SPH_C32(0xd6b8b805), SPH_C32(0x5014146c), SPH_C32(0x55dede2c), SPH_C32(0x635e5e81), SPH_C32(0x2c0b0b31), SPH_C32(0x41dbdb37), SPH_C32(0xade0e096), SPH_C32(0xc832329e), SPH_C32(0xe83a3aa6), SPH_C32(0x280a0a36), SPH_C32(0x3f4949e4), SPH_C32(0x18060612), SPH_C32(0x902424fc), SPH_C32(0x6b5c5c8f), SPH_C32(0x25c2c278), SPH_C32(0x61d3d30f), SPH_C32(0x86acac69), SPH_C32(0x93626235), SPH_C32(0x729191da), SPH_C32(0x629595c6), SPH_C32(0xbde4e48a), SPH_C32(0xff797974), SPH_C32(0xb1e7e783), SPH_C32(0x0dc8c84e), SPH_C32(0xdc373785), SPH_C32(0xaf6d6d18), SPH_C32(0x028d8d8e), SPH_C32(0x79d5d51d), SPH_C32(0x234e4ef1), SPH_C32(0x92a9a972), SPH_C32(0xab6c6c1f), SPH_C32(0x435656b9), SPH_C32(0xfdf4f4fa), SPH_C32(0x85eaeaa0), SPH_C32(0x8f656520), SPH_C32(0xf37a7a7d), SPH_C32(0x8eaeae67), SPH_C32(0x20080838), SPH_C32(0xdebaba0b), SPH_C32(0xfb787873), SPH_C32(0x942525fb), SPH_C32(0xb82e2eca), SPH_C32(0x701c1c54), SPH_C32(0xaea6a65f), SPH_C32(0xe6b4b421), SPH_C32(0x35c6c664), SPH_C32(0x8de8e8ae), SPH_C32(0x59dddd25), SPH_C32(0xcb747457), SPH_C32(0x7c1f1f5d), SPH_C32(0x374b4bea), SPH_C32(0xc2bdbd1e), SPH_C32(0x1a8b8b9c), SPH_C32(0x1e8a8a9b), SPH_C32(0xdb70704b), SPH_C32(0xf83e3eba), SPH_C32(0xe2b5b526), SPH_C32(0x83666629), SPH_C32(0x3b4848e3), SPH_C32(0x0c030309), SPH_C32(0xf5f6f6f4), SPH_C32(0x380e0e2a), SPH_C32(0x9f61613c), SPH_C32(0xd435358b), SPH_C32(0x475757be), SPH_C32(0xd2b9b902), SPH_C32(0x2e8686bf), SPH_C32(0x29c1c171), SPH_C32(0x741d1d53), SPH_C32(0x4e9e9ef7), SPH_C32(0xa9e1e191), SPH_C32(0xcdf8f8de), SPH_C32(0x569898e5), SPH_C32(0x44111177), SPH_C32(0xbf696904), SPH_C32(0x49d9d939), SPH_C32(0x0e8e8e87), SPH_C32(0x669494c1), SPH_C32(0x5a9b9bec), SPH_C32(0x781e1e5a), SPH_C32(0x2a8787b8), SPH_C32(0x89e9e9a9), SPH_C32(0x15cece5c), SPH_C32(0x4f5555b0), SPH_C32(0xa02828d8), SPH_C32(0x51dfdf2b), SPH_C32(0x068c8c89), SPH_C32(0xb2a1a14a), SPH_C32(0x12898992), SPH_C32(0x340d0d23), SPH_C32(0xcabfbf10), SPH_C32(0xb5e6e684), SPH_C32(0x134242d5), SPH_C32(0xbb686803), SPH_C32(0x1f4141dc), SPH_C32(0x529999e2), SPH_C32(0xb42d2dc3), SPH_C32(0x3c0f0f2d), SPH_C32(0xf6b0b03d), SPH_C32(0x4b5454b7), SPH_C32(0xdabbbb0c), SPH_C32(0x58161662) }; static const uint32_t mixtab2_cpu[] = { SPH_C32(0x32976363), SPH_C32(0x6feb7c7c), SPH_C32(0x5ec77777), SPH_C32(0x7af77b7b), SPH_C32(0xe8e5f2f2), SPH_C32(0x0ab76b6b), SPH_C32(0x16a76f6f), SPH_C32(0x6d39c5c5), SPH_C32(0x90c03030), SPH_C32(0x07040101), SPH_C32(0x2e876767), SPH_C32(0xd1ac2b2b), SPH_C32(0xccd5fefe), SPH_C32(0x1371d7d7), SPH_C32(0x7c9aabab), SPH_C32(0x59c37676), SPH_C32(0x4005caca), SPH_C32(0xa33e8282), SPH_C32(0x4909c9c9), SPH_C32(0x68ef7d7d), SPH_C32(0xd0c5fafa), SPH_C32(0x947f5959), SPH_C32(0xce074747), SPH_C32(0xe6edf0f0), SPH_C32(0x6e82adad), SPH_C32(0x1a7dd4d4), SPH_C32(0x43bea2a2), SPH_C32(0x608aafaf), SPH_C32(0xf9469c9c), SPH_C32(0x51a6a4a4), SPH_C32(0x45d37272), SPH_C32(0x762dc0c0), SPH_C32(0x28eab7b7), SPH_C32(0xc5d9fdfd), SPH_C32(0xd47a9393), SPH_C32(0xf2982626), SPH_C32(0x82d83636), SPH_C32(0xbdfc3f3f), SPH_C32(0xf3f1f7f7), SPH_C32(0x521dcccc), SPH_C32(0x8cd03434), SPH_C32(0x56a2a5a5), SPH_C32(0x8db9e5e5), SPH_C32(0xe1e9f1f1), SPH_C32(0x4cdf7171), SPH_C32(0x3e4dd8d8), SPH_C32(0x97c43131), SPH_C32(0x6b541515), SPH_C32(0x1c100404), SPH_C32(0x6331c7c7), SPH_C32(0xe98c2323), SPH_C32(0x7f21c3c3), SPH_C32(0x48601818), SPH_C32(0xcf6e9696), SPH_C32(0x1b140505), SPH_C32(0xeb5e9a9a), SPH_C32(0x151c0707), SPH_C32(0x7e481212), SPH_C32(0xad368080), SPH_C32(0x98a5e2e2), SPH_C32(0xa781ebeb), SPH_C32(0xf59c2727), SPH_C32(0x33feb2b2), SPH_C32(0x50cf7575), SPH_C32(0x3f240909), SPH_C32(0xa43a8383), SPH_C32(0xc4b02c2c), SPH_C32(0x46681a1a), SPH_C32(0x416c1b1b), SPH_C32(0x11a36e6e), SPH_C32(0x9d735a5a), SPH_C32(0x4db6a0a0), SPH_C32(0xa5535252), SPH_C32(0xa1ec3b3b), SPH_C32(0x1475d6d6), SPH_C32(0x34fab3b3), SPH_C32(0xdfa42929), SPH_C32(0x9fa1e3e3), SPH_C32(0xcdbc2f2f), SPH_C32(0xb1268484), SPH_C32(0xa2575353), SPH_C32(0x0169d1d1), SPH_C32(0x00000000), SPH_C32(0xb599eded), SPH_C32(0xe0802020), SPH_C32(0xc2ddfcfc), SPH_C32(0x3af2b1b1), SPH_C32(0x9a775b5b), SPH_C32(0x0db36a6a), SPH_C32(0x4701cbcb), SPH_C32(0x17cebebe), SPH_C32(0xafe43939), SPH_C32(0xed334a4a), SPH_C32(0xff2b4c4c), SPH_C32(0x937b5858), SPH_C32(0x5b11cfcf), SPH_C32(0x066dd0d0), SPH_C32(0xbb91efef), SPH_C32(0x7b9eaaaa), SPH_C32(0xd7c1fbfb), SPH_C32(0xd2174343), SPH_C32(0xf82f4d4d), SPH_C32(0x99cc3333), SPH_C32(0xb6228585), SPH_C32(0xc00f4545), SPH_C32(0xd9c9f9f9), SPH_C32(0x0e080202), SPH_C32(0x66e77f7f), SPH_C32(0xab5b5050), SPH_C32(0xb4f03c3c), SPH_C32(0xf04a9f9f), SPH_C32(0x7596a8a8), SPH_C32(0xac5f5151), SPH_C32(0x44baa3a3), SPH_C32(0xdb1b4040), SPH_C32(0x800a8f8f), SPH_C32(0xd37e9292), SPH_C32(0xfe429d9d), SPH_C32(0xa8e03838), SPH_C32(0xfdf9f5f5), SPH_C32(0x19c6bcbc), SPH_C32(0x2feeb6b6), SPH_C32(0x3045dada), SPH_C32(0xe7842121), SPH_C32(0x70401010), SPH_C32(0xcbd1ffff), SPH_C32(0xefe1f3f3), SPH_C32(0x0865d2d2), SPH_C32(0x5519cdcd), SPH_C32(0x24300c0c), SPH_C32(0x794c1313), SPH_C32(0xb29decec), SPH_C32(0x86675f5f), SPH_C32(0xc86a9797), SPH_C32(0xc70b4444), SPH_C32(0x655c1717), SPH_C32(0x6a3dc4c4), SPH_C32(0x58aaa7a7), SPH_C32(0x61e37e7e), SPH_C32(0xb3f43d3d), SPH_C32(0x278b6464), SPH_C32(0x886f5d5d), SPH_C32(0x4f641919), SPH_C32(0x42d77373), SPH_C32(0x3b9b6060), SPH_C32(0xaa328181), SPH_C32(0xf6274f4f), SPH_C32(0x225ddcdc), SPH_C32(0xee882222), SPH_C32(0xd6a82a2a), SPH_C32(0xdd769090), SPH_C32(0x95168888), SPH_C32(0xc9034646), SPH_C32(0xbc95eeee), SPH_C32(0x05d6b8b8), SPH_C32(0x6c501414), SPH_C32(0x2c55dede), SPH_C32(0x81635e5e), SPH_C32(0x312c0b0b), SPH_C32(0x3741dbdb), SPH_C32(0x96ade0e0), SPH_C32(0x9ec83232), SPH_C32(0xa6e83a3a), SPH_C32(0x36280a0a), SPH_C32(0xe43f4949), SPH_C32(0x12180606), SPH_C32(0xfc902424), SPH_C32(0x8f6b5c5c), SPH_C32(0x7825c2c2), SPH_C32(0x0f61d3d3), SPH_C32(0x6986acac), SPH_C32(0x35936262), SPH_C32(0xda729191), SPH_C32(0xc6629595), SPH_C32(0x8abde4e4), SPH_C32(0x74ff7979), SPH_C32(0x83b1e7e7), SPH_C32(0x4e0dc8c8), SPH_C32(0x85dc3737), SPH_C32(0x18af6d6d), SPH_C32(0x8e028d8d), SPH_C32(0x1d79d5d5), SPH_C32(0xf1234e4e), SPH_C32(0x7292a9a9), SPH_C32(0x1fab6c6c), SPH_C32(0xb9435656), SPH_C32(0xfafdf4f4), SPH_C32(0xa085eaea), SPH_C32(0x208f6565), SPH_C32(0x7df37a7a), SPH_C32(0x678eaeae), SPH_C32(0x38200808), SPH_C32(0x0bdebaba), SPH_C32(0x73fb7878), SPH_C32(0xfb942525), SPH_C32(0xcab82e2e), SPH_C32(0x54701c1c), SPH_C32(0x5faea6a6), SPH_C32(0x21e6b4b4), SPH_C32(0x6435c6c6), SPH_C32(0xae8de8e8), SPH_C32(0x2559dddd), SPH_C32(0x57cb7474), SPH_C32(0x5d7c1f1f), SPH_C32(0xea374b4b), SPH_C32(0x1ec2bdbd), SPH_C32(0x9c1a8b8b), SPH_C32(0x9b1e8a8a), SPH_C32(0x4bdb7070), SPH_C32(0xbaf83e3e), SPH_C32(0x26e2b5b5), SPH_C32(0x29836666), SPH_C32(0xe33b4848), SPH_C32(0x090c0303), SPH_C32(0xf4f5f6f6), SPH_C32(0x2a380e0e), SPH_C32(0x3c9f6161), SPH_C32(0x8bd43535), SPH_C32(0xbe475757), SPH_C32(0x02d2b9b9), SPH_C32(0xbf2e8686), SPH_C32(0x7129c1c1), SPH_C32(0x53741d1d), SPH_C32(0xf74e9e9e), SPH_C32(0x91a9e1e1), SPH_C32(0xdecdf8f8), SPH_C32(0xe5569898), SPH_C32(0x77441111), SPH_C32(0x04bf6969), SPH_C32(0x3949d9d9), SPH_C32(0x870e8e8e), SPH_C32(0xc1669494), SPH_C32(0xec5a9b9b), SPH_C32(0x5a781e1e), SPH_C32(0xb82a8787), SPH_C32(0xa989e9e9), SPH_C32(0x5c15cece), SPH_C32(0xb04f5555), SPH_C32(0xd8a02828), SPH_C32(0x2b51dfdf), SPH_C32(0x89068c8c), SPH_C32(0x4ab2a1a1), SPH_C32(0x92128989), SPH_C32(0x23340d0d), SPH_C32(0x10cabfbf), SPH_C32(0x84b5e6e6), SPH_C32(0xd5134242), SPH_C32(0x03bb6868), SPH_C32(0xdc1f4141), SPH_C32(0xe2529999), SPH_C32(0xc3b42d2d), SPH_C32(0x2d3c0f0f), SPH_C32(0x3df6b0b0), SPH_C32(0xb74b5454), SPH_C32(0x0cdabbbb), SPH_C32(0x62581616) }; static const uint32_t mixtab3_cpu[] = { SPH_C32(0x63329763), SPH_C32(0x7c6feb7c), SPH_C32(0x775ec777), SPH_C32(0x7b7af77b), SPH_C32(0xf2e8e5f2), SPH_C32(0x6b0ab76b), SPH_C32(0x6f16a76f), SPH_C32(0xc56d39c5), SPH_C32(0x3090c030), SPH_C32(0x01070401), SPH_C32(0x672e8767), SPH_C32(0x2bd1ac2b), SPH_C32(0xfeccd5fe), SPH_C32(0xd71371d7), SPH_C32(0xab7c9aab), SPH_C32(0x7659c376), SPH_C32(0xca4005ca), SPH_C32(0x82a33e82), SPH_C32(0xc94909c9), SPH_C32(0x7d68ef7d), SPH_C32(0xfad0c5fa), SPH_C32(0x59947f59), SPH_C32(0x47ce0747), SPH_C32(0xf0e6edf0), SPH_C32(0xad6e82ad), SPH_C32(0xd41a7dd4), SPH_C32(0xa243bea2), SPH_C32(0xaf608aaf), SPH_C32(0x9cf9469c), SPH_C32(0xa451a6a4), SPH_C32(0x7245d372), SPH_C32(0xc0762dc0), SPH_C32(0xb728eab7), SPH_C32(0xfdc5d9fd), SPH_C32(0x93d47a93), SPH_C32(0x26f29826), SPH_C32(0x3682d836), SPH_C32(0x3fbdfc3f), SPH_C32(0xf7f3f1f7), SPH_C32(0xcc521dcc), SPH_C32(0x348cd034), SPH_C32(0xa556a2a5), SPH_C32(0xe58db9e5), SPH_C32(0xf1e1e9f1), SPH_C32(0x714cdf71), SPH_C32(0xd83e4dd8), SPH_C32(0x3197c431), SPH_C32(0x156b5415), SPH_C32(0x041c1004), SPH_C32(0xc76331c7), SPH_C32(0x23e98c23), SPH_C32(0xc37f21c3), SPH_C32(0x18486018), SPH_C32(0x96cf6e96), SPH_C32(0x051b1405), SPH_C32(0x9aeb5e9a), SPH_C32(0x07151c07), SPH_C32(0x127e4812), SPH_C32(0x80ad3680), SPH_C32(0xe298a5e2), SPH_C32(0xeba781eb), SPH_C32(0x27f59c27), SPH_C32(0xb233feb2), SPH_C32(0x7550cf75), SPH_C32(0x093f2409), SPH_C32(0x83a43a83), SPH_C32(0x2cc4b02c), SPH_C32(0x1a46681a), SPH_C32(0x1b416c1b), SPH_C32(0x6e11a36e), SPH_C32(0x5a9d735a), SPH_C32(0xa04db6a0), SPH_C32(0x52a55352), SPH_C32(0x3ba1ec3b), SPH_C32(0xd61475d6), SPH_C32(0xb334fab3), SPH_C32(0x29dfa429), SPH_C32(0xe39fa1e3), SPH_C32(0x2fcdbc2f), SPH_C32(0x84b12684), SPH_C32(0x53a25753), SPH_C32(0xd10169d1), SPH_C32(0x00000000), SPH_C32(0xedb599ed), SPH_C32(0x20e08020), SPH_C32(0xfcc2ddfc), SPH_C32(0xb13af2b1), SPH_C32(0x5b9a775b), SPH_C32(0x6a0db36a), SPH_C32(0xcb4701cb), SPH_C32(0xbe17cebe), SPH_C32(0x39afe439), SPH_C32(0x4aed334a), SPH_C32(0x4cff2b4c), SPH_C32(0x58937b58), SPH_C32(0xcf5b11cf), SPH_C32(0xd0066dd0), SPH_C32(0xefbb91ef), SPH_C32(0xaa7b9eaa), SPH_C32(0xfbd7c1fb), SPH_C32(0x43d21743), SPH_C32(0x4df82f4d), SPH_C32(0x3399cc33), SPH_C32(0x85b62285), SPH_C32(0x45c00f45), SPH_C32(0xf9d9c9f9), SPH_C32(0x020e0802), SPH_C32(0x7f66e77f), SPH_C32(0x50ab5b50), SPH_C32(0x3cb4f03c), SPH_C32(0x9ff04a9f), SPH_C32(0xa87596a8), SPH_C32(0x51ac5f51), SPH_C32(0xa344baa3), SPH_C32(0x40db1b40), SPH_C32(0x8f800a8f), SPH_C32(0x92d37e92), SPH_C32(0x9dfe429d), SPH_C32(0x38a8e038), SPH_C32(0xf5fdf9f5), SPH_C32(0xbc19c6bc), SPH_C32(0xb62feeb6), SPH_C32(0xda3045da), SPH_C32(0x21e78421), SPH_C32(0x10704010), SPH_C32(0xffcbd1ff), SPH_C32(0xf3efe1f3), SPH_C32(0xd20865d2), SPH_C32(0xcd5519cd), SPH_C32(0x0c24300c), SPH_C32(0x13794c13), SPH_C32(0xecb29dec), SPH_C32(0x5f86675f), SPH_C32(0x97c86a97), SPH_C32(0x44c70b44), SPH_C32(0x17655c17), SPH_C32(0xc46a3dc4), SPH_C32(0xa758aaa7), SPH_C32(0x7e61e37e), SPH_C32(0x3db3f43d), SPH_C32(0x64278b64), SPH_C32(0x5d886f5d), SPH_C32(0x194f6419), SPH_C32(0x7342d773), SPH_C32(0x603b9b60), SPH_C32(0x81aa3281), SPH_C32(0x4ff6274f), SPH_C32(0xdc225ddc), SPH_C32(0x22ee8822), SPH_C32(0x2ad6a82a), SPH_C32(0x90dd7690), SPH_C32(0x88951688), SPH_C32(0x46c90346), SPH_C32(0xeebc95ee), SPH_C32(0xb805d6b8), SPH_C32(0x146c5014), SPH_C32(0xde2c55de), SPH_C32(0x5e81635e), SPH_C32(0x0b312c0b), SPH_C32(0xdb3741db), SPH_C32(0xe096ade0), SPH_C32(0x329ec832), SPH_C32(0x3aa6e83a), SPH_C32(0x0a36280a), SPH_C32(0x49e43f49), SPH_C32(0x06121806), SPH_C32(0x24fc9024), SPH_C32(0x5c8f6b5c), SPH_C32(0xc27825c2), SPH_C32(0xd30f61d3), SPH_C32(0xac6986ac), SPH_C32(0x62359362), SPH_C32(0x91da7291), SPH_C32(0x95c66295), SPH_C32(0xe48abde4), SPH_C32(0x7974ff79), SPH_C32(0xe783b1e7), SPH_C32(0xc84e0dc8), SPH_C32(0x3785dc37), SPH_C32(0x6d18af6d), SPH_C32(0x8d8e028d), SPH_C32(0xd51d79d5), SPH_C32(0x4ef1234e), SPH_C32(0xa97292a9), SPH_C32(0x6c1fab6c), SPH_C32(0x56b94356), SPH_C32(0xf4fafdf4), SPH_C32(0xeaa085ea), SPH_C32(0x65208f65), SPH_C32(0x7a7df37a), SPH_C32(0xae678eae), SPH_C32(0x08382008), SPH_C32(0xba0bdeba), SPH_C32(0x7873fb78), SPH_C32(0x25fb9425), SPH_C32(0x2ecab82e), SPH_C32(0x1c54701c), SPH_C32(0xa65faea6), SPH_C32(0xb421e6b4), SPH_C32(0xc66435c6), SPH_C32(0xe8ae8de8), SPH_C32(0xdd2559dd), SPH_C32(0x7457cb74), SPH_C32(0x1f5d7c1f), SPH_C32(0x4bea374b), SPH_C32(0xbd1ec2bd), SPH_C32(0x8b9c1a8b), SPH_C32(0x8a9b1e8a), SPH_C32(0x704bdb70), SPH_C32(0x3ebaf83e), SPH_C32(0xb526e2b5), SPH_C32(0x66298366), SPH_C32(0x48e33b48), SPH_C32(0x03090c03), SPH_C32(0xf6f4f5f6), SPH_C32(0x0e2a380e), SPH_C32(0x613c9f61), SPH_C32(0x358bd435), SPH_C32(0x57be4757), SPH_C32(0xb902d2b9), SPH_C32(0x86bf2e86), SPH_C32(0xc17129c1), SPH_C32(0x1d53741d), SPH_C32(0x9ef74e9e), SPH_C32(0xe191a9e1), SPH_C32(0xf8decdf8), SPH_C32(0x98e55698), SPH_C32(0x11774411), SPH_C32(0x6904bf69), SPH_C32(0xd93949d9), SPH_C32(0x8e870e8e), SPH_C32(0x94c16694), SPH_C32(0x9bec5a9b), SPH_C32(0x1e5a781e), SPH_C32(0x87b82a87), SPH_C32(0xe9a989e9), SPH_C32(0xce5c15ce), SPH_C32(0x55b04f55), SPH_C32(0x28d8a028), SPH_C32(0xdf2b51df), SPH_C32(0x8c89068c), SPH_C32(0xa14ab2a1), SPH_C32(0x89921289), SPH_C32(0x0d23340d), SPH_C32(0xbf10cabf), SPH_C32(0xe684b5e6), SPH_C32(0x42d51342), SPH_C32(0x6803bb68), SPH_C32(0x41dc1f41), SPH_C32(0x99e25299), SPH_C32(0x2dc3b42d), SPH_C32(0x0f2d3c0f), SPH_C32(0xb03df6b0), SPH_C32(0x54b74b54), SPH_C32(0xbb0cdabb), SPH_C32(0x16625816) }; #define TIX4(q, x00, x01, x04, x07, x08, x22, x24, x27, x30) { \ x22 ^= x00; \ x00 = (q); \ x08 ^= x00; \ x01 ^= x24; \ x04 ^= x27; \ x07 ^= x30; \ } #define CMIX36(x00, x01, x02, x04, x05, x06, x18, x19, x20) { \ x00 ^= x04; \ x01 ^= x05; \ x02 ^= x06; \ x18 ^= x04; \ x19 ^= x05; \ x20 ^= x06; \ } #define SMIX(x0, x1, x2, x3) { \ uint32_t c0 = 0; \ uint32_t c1 = 0; \ uint32_t c2 = 0; \ uint32_t c3 = 0; \ uint32_t r0 = 0; \ uint32_t r1 = 0; \ uint32_t r2 = 0; \ uint32_t r3 = 0; \ uint32_t tmp; \ tmp = mixtab0(x0 >> 24); \ c0 ^= tmp; \ tmp = mixtab1((x0 >> 16) & 0xFF); \ c0 ^= tmp; \ r1 ^= tmp; \ tmp = mixtab2((x0 >> 8) & 0xFF); \ c0 ^= tmp; \ r2 ^= tmp; \ tmp = mixtab3(x0 & 0xFF); \ c0 ^= tmp; \ r3 ^= tmp; \ tmp = mixtab0(x1 >> 24); \ c1 ^= tmp; \ r0 ^= tmp; \ tmp = mixtab1((x1 >> 16) & 0xFF); \ c1 ^= tmp; \ tmp = mixtab2((x1 >> 8) & 0xFF); \ c1 ^= tmp; \ r2 ^= tmp; \ tmp = mixtab3(x1 & 0xFF); \ c1 ^= tmp; \ r3 ^= tmp; \ tmp = mixtab0(x2 >> 24); \ c2 ^= tmp; \ r0 ^= tmp; \ tmp = mixtab1((x2 >> 16) & 0xFF); \ c2 ^= tmp; \ r1 ^= tmp; \ tmp = mixtab2((x2 >> 8) & 0xFF); \ c2 ^= tmp; \ tmp = mixtab3(x2 & 0xFF); \ c2 ^= tmp; \ r3 ^= tmp; \ tmp = mixtab0(x3 >> 24); \ c3 ^= tmp; \ r0 ^= tmp; \ tmp = mixtab1((x3 >> 16) & 0xFF); \ c3 ^= tmp; \ r1 ^= tmp; \ tmp = mixtab2((x3 >> 8) & 0xFF); \ c3 ^= tmp; \ r2 ^= tmp; \ tmp = mixtab3(x3 & 0xFF); \ c3 ^= tmp; \ x0 = ((c0 ^ r0) & SPH_C32(0xFF000000)) \ | ((c1 ^ r1) & SPH_C32(0x00FF0000)) \ | ((c2 ^ r2) & SPH_C32(0x0000FF00)) \ | ((c3 ^ r3) & SPH_C32(0x000000FF)); \ x1 = ((c1 ^ (r0 << 8)) & SPH_C32(0xFF000000)) \ | ((c2 ^ (r1 << 8)) & SPH_C32(0x00FF0000)) \ | ((c3 ^ (r2 << 8)) & SPH_C32(0x0000FF00)) \ | ((c0 ^ (r3 >> 24)) & SPH_C32(0x000000FF)); \ x2 = ((c2 ^ (r0 << 16)) & SPH_C32(0xFF000000)) \ | ((c3 ^ (r1 << 16)) & SPH_C32(0x00FF0000)) \ | ((c0 ^ (r2 >> 16)) & SPH_C32(0x0000FF00)) \ | ((c1 ^ (r3 >> 16)) & SPH_C32(0x000000FF)); \ x3 = ((c3 ^ (r0 << 24)) & SPH_C32(0xFF000000)) \ | ((c0 ^ (r1 >> 8)) & SPH_C32(0x00FF0000)) \ | ((c1 ^ (r2 >> 8)) & SPH_C32(0x0000FF00)) \ | ((c2 ^ (r3 >> 8)) & SPH_C32(0x000000FF)); \ } #define ROR3 { \ B33 = S33, B34 = S34, B35 = S35; \ S35 = S32; S34 = S31; S33 = S30; S32 = S29; S31 = S28; S30 = S27; S29 = S26; S28 = S25; S27 = S24; \ S26 = S23; S25 = S22; S24 = S21; S23 = S20; S22 = S19; S21 = S18; S20 = S17; S19 = S16; S18 = S15; \ S17 = S14; S16 = S13; S15 = S12; S14 = S11; S13 = S10; S12 = S09; S11 = S08; S10 = S07; S09 = S06; \ S08 = S05; S07 = S04; S06 = S03; S05 = S02; S04 = S01; S03 = S00; S02 = B35; S01 = B34; S00 = B33; \ } #define ROR8 { \ B28 = S28, B29 = S29, B30 = S30, B31 = S31, B32 = S32, B33 = S33, B34 = S34, B35 = S35; \ S35 = S27; S34 = S26; S33 = S25; S32 = S24; S31 = S23; S30 = S22; S29 = S21; S28 = S20; S27 = S19; \ S26 = S18; S25 = S17; S24 = S16; S23 = S15; S22 = S14; S21 = S13; S20 = S12; S19 = S11; S18 = S10; \ S17 = S09; S16 = S08; S15 = S07; S14 = S06; S13 = S05; S12 = S04; S11 = S03; S10 = S02; S09 = S01; \ S08 = S00; S07 = B35; S06 = B34; S05 = B33; S04 = B32; S03 = B31; S02 = B30; S01 = B29; S00 = B28; \ } #define ROR9 { \ B27 = S27, B28 = S28, B29 = S29, B30 = S30, B31 = S31, B32 = S32, B33 = S33, B34 = S34, B35 = S35; \ S35 = S26; S34 = S25; S33 = S24; S32 = S23; S31 = S22; S30 = S21; S29 = S20; S28 = S19; S27 = S18; \ S26 = S17; S25 = S16; S24 = S15; S23 = S14; S22 = S13; S21 = S12; S20 = S11; S19 = S10; S18 = S09; \ S17 = S08; S16 = S07; S15 = S06; S14 = S05; S13 = S04; S12 = S03; S11 = S02; S10 = S01; S09 = S00; \ S08 = B35; S07 = B34; S06 = B33; S05 = B32; S04 = B31; S03 = B30; S02 = B29; S01 = B28; S00 = B27; \ } #define FUGUE512_3(x, y, z) { \ TIX4(x, S00, S01, S04, S07, S08, S22, S24, S27, S30); \ CMIX36(S33, S34, S35, S01, S02, S03, S15, S16, S17); \ SMIX(S33, S34, S35, S00); \ CMIX36(S30, S31, S32, S34, S35, S00, S12, S13, S14); \ SMIX(S30, S31, S32, S33); \ CMIX36(S27, S28, S29, S31, S32, S33, S09, S10, S11); \ SMIX(S27, S28, S29, S30); \ CMIX36(S24, S25, S26, S28, S29, S30, S06, S07, S08); \ SMIX(S24, S25, S26, S27); \ \ TIX4(y, S24, S25, S28, S31, S32, S10, S12, S15, S18); \ CMIX36(S21, S22, S23, S25, S26, S27, S03, S04, S05); \ SMIX(S21, S22, S23, S24); \ CMIX36(S18, S19, S20, S22, S23, S24, S00, S01, S02); \ SMIX(S18, S19, S20, S21); \ CMIX36(S15, S16, S17, S19, S20, S21, S33, S34, S35); \ SMIX(S15, S16, S17, S18); \ CMIX36(S12, S13, S14, S16, S17, S18, S30, S31, S32); \ SMIX(S12, S13, S14, S15); \ \ TIX4(z, S12, S13, S16, S19, S20, S34, S00, S03, S06); \ CMIX36(S09, S10, S11, S13, S14, S15, S27, S28, S29); \ SMIX(S09, S10, S11, S12); \ CMIX36(S06, S07, S08, S10, S11, S12, S24, S25, S26); \ SMIX(S06, S07, S08, S09); \ CMIX36(S03, S04, S05, S07, S08, S09, S21, S22, S23); \ SMIX(S03, S04, S05, S06); \ CMIX36(S00, S01, S02, S04, S05, S06, S18, S19, S20); \ SMIX(S00, S01, S02, S03); \ } /***************************************************/ // Die Hash-Funktion __global__ void x13_fugue512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint64_t *g_hash, uint32_t *g_nonceVector) { extern __shared__ char mixtabs[]; *((uint32_t*)mixtabs + ( threadIdx.x)) = tex1Dfetch(mixTab0Tex, threadIdx.x); *((uint32_t*)mixtabs + (256+threadIdx.x)) = tex1Dfetch(mixTab1Tex, threadIdx.x); *((uint32_t*)mixtabs + (512+threadIdx.x)) = tex1Dfetch(mixTab2Tex, threadIdx.x); *((uint32_t*)mixtabs + (768+threadIdx.x)) = tex1Dfetch(mixTab3Tex, threadIdx.x); __syncthreads(); int i; uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread); int hashPosition = nounce - startNounce; uint32_t *Hash = (uint32_t*)&g_hash[hashPosition<<3]; #pragma unroll 16 for( i = 0; i < 16; i++ ) Hash[i] = cuda_swab32(Hash[i]); uint32_t S00, S01, S02, S03, S04, S05, S06, S07, S08, S09; uint32_t S10, S11, S12, S13, S14, S15, S16, S17, S18, S19; uint32_t S20, S21, S22, S23, S24, S25, S26, S27, S28, S29; uint32_t S30, S31, S32, S33, S34, S35; uint32_t B27, B28, B29, B30, B31, B32, B33, B34, B35; uint64_t bc = (uint64_t) 64 << 3; uint32_t bclo = (uint32_t)(bc & 0xFFFFFFFFULL); uint32_t bchi = (uint32_t)(bc >> 32); S00 = S01 = S02 = S03 = S04 = S05 = S06 = S07 = S08 = S09 = S10 = S11 = S12 = S13 = S14 = S15 = S16 = S17 = S18 = S19 = 0; S20 = SPH_C32(0x8807a57e); S21 = SPH_C32(0xe616af75); S22 = SPH_C32(0xc5d3e4db); S23 = SPH_C32(0xac9ab027); S24 = SPH_C32(0xd915f117); S25 = SPH_C32(0xb6eecc54); S26 = SPH_C32(0x06e8020b); S27 = SPH_C32(0x4a92efd1); S28 = SPH_C32(0xaac6e2c9); S29 = SPH_C32(0xddb21398); S30 = SPH_C32(0xcae65838); S31 = SPH_C32(0x437f203f); S32 = SPH_C32(0x25ea78e7); S33 = SPH_C32(0x951fddd6); S34 = SPH_C32(0xda6ed11d); S35 = SPH_C32(0xe13e3567); FUGUE512_3((Hash[0x0]), (Hash[0x1]), (Hash[0x2])); FUGUE512_3((Hash[0x3]), (Hash[0x4]), (Hash[0x5])); FUGUE512_3((Hash[0x6]), (Hash[0x7]), (Hash[0x8])); FUGUE512_3((Hash[0x9]), (Hash[0xA]), (Hash[0xB])); FUGUE512_3((Hash[0xC]), (Hash[0xD]), (Hash[0xE])); FUGUE512_3((Hash[0xF]), bchi, bclo); #pragma unroll 32 for (i = 0; i < 32; i ++) { ROR3; CMIX36(S00, S01, S02, S04, S05, S06, S18, S19, S20); SMIX(S00, S01, S02, S03); } #pragma unroll 13 for (i = 0; i < 13; i ++) { S04 ^= S00; S09 ^= S00; S18 ^= S00; S27 ^= S00; ROR9; SMIX(S00, S01, S02, S03); S04 ^= S00; S10 ^= S00; S18 ^= S00; S27 ^= S00; ROR9; SMIX(S00, S01, S02, S03); S04 ^= S00; S10 ^= S00; S19 ^= S00; S27 ^= S00; ROR9; SMIX(S00, S01, S02, S03); S04 ^= S00; S10 ^= S00; S19 ^= S00; S28 ^= S00; ROR8; SMIX(S00, S01, S02, S03); } S04 ^= S00; S09 ^= S00; S18 ^= S00; S27 ^= S00; Hash[0] = cuda_swab32(S01); Hash[1] = cuda_swab32(S02); Hash[2] = cuda_swab32(S03); Hash[3] = cuda_swab32(S04); Hash[4] = cuda_swab32(S09); Hash[5] = cuda_swab32(S10); Hash[6] = cuda_swab32(S11); Hash[7] = cuda_swab32(S12); Hash[8] = cuda_swab32(S18); Hash[9] = cuda_swab32(S19); Hash[10] = cuda_swab32(S20); Hash[11] = cuda_swab32(S21); Hash[12] = cuda_swab32(S27); Hash[13] = cuda_swab32(S28); Hash[14] = cuda_swab32(S29); Hash[15] = cuda_swab32(S30); } } #define texDef(id, texname, texmem, texsource, texsize) { \ unsigned int *texmem; \ cudaMalloc(&texmem, texsize); \ d_textures[thr_id][id] = texmem; \ cudaMemcpy(texmem, texsource, texsize, cudaMemcpyHostToDevice); \ texname.normalized = 0; \ texname.filterMode = cudaFilterModePoint; \ texname.addressMode[0] = cudaAddressModeClamp; \ { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned int>(); \ cudaBindTexture(NULL, &texname, texmem, &channelDesc, texsize ); \ } \ } __host__ void x13_fugue512_cpu_init(int thr_id, uint32_t threads) { texDef(0, mixTab0Tex, mixTab0m, mixtab0_cpu, sizeof(uint32_t)*256); texDef(1, mixTab1Tex, mixTab1m, mixtab1_cpu, sizeof(uint32_t)*256); texDef(2, mixTab2Tex, mixTab2m, mixtab2_cpu, sizeof(uint32_t)*256); texDef(3, mixTab3Tex, mixTab3m, mixtab3_cpu, sizeof(uint32_t)*256); } __host__ void x13_fugue512_cpu_free(int thr_id) { for (int i=0; i<4; i++) cudaFree(d_textures[thr_id][i]); } __host__ void x13_fugue512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order) { const uint32_t threadsperblock = 256; // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); // Größe des dynamischen Shared Memory Bereichs size_t shared_size = 4 * 256 * sizeof(uint32_t); // fprintf(stderr, "threads=%d, %d blocks, %d threads per block, %d bytes shared\n", threads, grid.x, block.x, shared_size); x13_fugue512_gpu_hash_64<<<grid, block, shared_size>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector); MyStreamSynchronize(NULL, order, thr_id); }
the_stack
namespace nvbio { namespace bowtie2 { namespace cuda { template <typename T> T* resize(bool do_alloc, thrust::device_vector<T>& vec, const uint32 size, uint64& bytes) { bytes += size * sizeof(T); if (do_alloc) { vec.resize( size ); return thrust::raw_pointer_cast(&vec.front()); } return NULL; } template <typename T> T* resize(bool do_alloc, thrust::host_vector<T>& vec, const uint32 size, uint64& bytes) { bytes += size * sizeof(T); if (do_alloc) { vec.resize( size ); return thrust::raw_pointer_cast(&vec.front()); } return NULL; } template <typename T> T* resize(bool do_alloc, std::vector<T>& vec, const uint32 size, uint64& bytes) { bytes += size * sizeof(T); if (do_alloc) { vec.resize( size ); return &vec[0]; } return NULL; } bool Aligner::init_alloc(const uint32 BATCH_SIZE, const Params& params, const EndType type, bool do_alloc, std::pair<uint64,uint64>* mem_stats) { size_t free, total; cudaMemGetInfo(&free, &total); //const uint32 band_len = (type == kPairedEnds) ? MAXIMUM_BAND_LENGTH : band_length( params.max_dist ); uint64 d_allocated_bytes = 0; uint64 h_allocated_bytes = 0; // alloc the seeding queues d_allocated_bytes += seed_queues.resize_arena( BATCH_SIZE, do_alloc ); // alloc the hit deques d_allocated_bytes += hit_deques.resize( BATCH_SIZE, params.max_hits, do_alloc ); if (params.randomized) rseeds_dptr = resize( do_alloc, rseeds_dvec, BATCH_SIZE, d_allocated_bytes ); // alloc the scoring queues d_allocated_bytes += scoring_queues.resize( BATCH_SIZE, BATCH_SIZE, do_alloc ); // TODO: pass read-end 'type' field idx_queue_dptr = resize( do_alloc, idx_queue_dvec, BATCH_SIZE*2, d_allocated_bytes ); // 2x for ping-pong buffers if (params.mode != AllMapping) resize( do_alloc, sorting_queue_dvec, BATCH_SIZE*2, d_allocated_bytes ); // 2x for ping-pong buffers else resize( do_alloc, sorting_queue_dvec, BATCH_SIZE*2*4, d_allocated_bytes ); // 4x for 64-bit sorting nvbio::cuda::check_error("allocating queues"); mapq_dptr = resize( do_alloc, mapq_dvec, BATCH_SIZE, d_allocated_bytes ); if (params.mode != AllMapping) { reseed_dptr = resize( do_alloc, reseed_dvec, BATCH_SIZE, d_allocated_bytes ); trys_dptr = resize( do_alloc, trys_dvec, BATCH_SIZE, d_allocated_bytes ); best_data_dptr = resize( do_alloc, best_data_dvec, BATCH_SIZE*2, d_allocated_bytes ); if (type == kPairedEnds) best_data_dptr_o = resize( do_alloc, best_data_dvec_o, BATCH_SIZE*2, d_allocated_bytes ); } else { // in all-mapping mode we store the temporary output in a double-buffered ring-buffer buffer_alignments_dptr = resize( do_alloc, buffer_alignments_dvec, BATCH_SIZE*2, d_allocated_bytes ); buffer_read_info_dptr = resize( do_alloc, buffer_read_info_dvec, BATCH_SIZE*2, d_allocated_bytes ); output_alignments_dptr = resize( do_alloc, output_alignments_dvec, BATCH_SIZE, d_allocated_bytes ); output_read_info_dptr = resize( do_alloc, output_read_info_dvec, BATCH_SIZE, d_allocated_bytes ); flags_dptr = resize( do_alloc, flags_dvec, BATCH_SIZE, d_allocated_bytes ); } nvbio::cuda::check_error("allocating output buffers"); hits_stats_dptr = resize( do_alloc, hits_stats_dvec, 128, d_allocated_bytes ); resize( do_alloc, hits_stats_hvec, 128, d_allocated_bytes ); if (params.mode == AllMapping) { hits_count_scan_dptr = resize( do_alloc, hits_count_scan_dvec, BATCH_SIZE+1, d_allocated_bytes ); hits_range_scan_dptr = resize( do_alloc, hits_range_scan_dvec, params.max_hits * BATCH_SIZE+1, d_allocated_bytes ); } //const uint32 n_cigar_entries = BATCH_SIZE*(MAXIMUM_BAND_LEN_MULT*band_len+1); //const uint32 n_mds_entries = BATCH_SIZE*MAX_READ_LEN; const uint32 n_cigar_entries = (128 * BATCH_SIZE)/sizeof(io::Cigar); // 256MB const uint32 n_mds_entries = (256 * BATCH_SIZE)/sizeof(uint8); // 256MB if (do_alloc) { log_verbose(stderr, "[%u] allocating %u MB of string storage\n[%u] CIGARs : %u MB\n[%u] MDs : %u MB\n", ID, uint32(n_cigar_entries * sizeof(io::Cigar) + n_mds_entries)/(1024*1024), ID, uint32(n_cigar_entries * sizeof(io::Cigar))/(1024*1024), ID, n_mds_entries/(1024*1024) ); } // allocate CIGARs & MDs d_allocated_bytes += cigar.resize( BATCH_SIZE, n_cigar_entries, do_alloc ); d_allocated_bytes += mds.resize( BATCH_SIZE, n_mds_entries, do_alloc ); // allocate CIGAR coords cigar_coords_dptr = resize( do_alloc, cigar_coords_dvec, BATCH_SIZE, d_allocated_bytes ); nvbio::cuda::check_error("allocating CIGARs"); if (type == kPairedEnds) { // allocate the device queue opposite_queue_dptr = resize( do_alloc, opposite_queue_dvec, BATCH_SIZE, d_allocated_bytes ); } // allocate DP storage uint32 dp_storage = 0; { // // allocate two thirds of available device memory for scoring / traceback // const uint32 bytes_per_read = uint32( float( params.avg_read_length ) * 1.5f ); const uint64 read_mem = bytes_per_read * BATCH_SIZE * (type == kPairedEnds ? 2u : 1u); // assume 250B per read const uint64 guard_band = 512*1024*1024 + // we want to leave 512MB free, read_mem; // needed for kernels using lmem const uint64 min_dp_storage = 64*1024*1024; // minimum amount of DP storage if (do_alloc) cudaMemGetInfo(&free, &total); else if (free >= d_allocated_bytes + guard_band + min_dp_storage) free -= d_allocated_bytes; else return false; const uint32 free_words = uint32( free / 4u ); const uint32 min_free_words = uint32( guard_band / 4u ); uint32 target_words = (free_words * 2u) / 3u; target_words = nvbio::min( target_words, free_words - min_free_words ); const uint32 buffer_words = target_words; if (do_alloc) log_verbose(stderr, "[%u] allocating %u MB of DP storage\n", ID, (buffer_words*4)/(1024*1024) ); dp_storage = buffer_words * sizeof(uint32); } // allocate a large temporary buffer to for scoring and traceback dp_buffer_dptr = resize( do_alloc, dp_buffer_dvec, dp_storage, d_allocated_bytes ); nvbio::cuda::check_error("allocating alignment buffers"); if (mem_stats) { mem_stats->first = h_allocated_bytes; mem_stats->second = d_allocated_bytes; } return true; } bool Aligner::init(const uint32 id, const uint32 batch_size, const Params& params, const EndType type) { ID = id; BATCH_SIZE = batch_size; // initialize the batch number batch_number = 0; try { std::pair<uint64,uint64> mem_stats; init_alloc( batch_size, params, type, false, &mem_stats ); log_stats(stderr, "[%u] allocating alignment buffers... started\n[%u] estimated: HOST %lu MB, DEVICE %lu MB)\n", ID, ID, mem_stats.first / (1024*1024), mem_stats.second / (1024*1024) ); init_alloc( batch_size, params, type, true, &mem_stats ); log_stats(stderr, "[%u] allocating alignment buffers... done\n[%u] allocated: HOST %lu MB, DEVICE %lu MB)\n", ID, ID, mem_stats.first / (1024*1024), mem_stats.second / (1024*1024) ); } catch (...) { log_error(stderr, "[%u] allocating alignment buffers failed!\n", ID); return false; } return true; } // Compute the total number of matches found __global__ void hits_stats_kernel( const uint32 batch_size, const SeedHit* hit_data, const uint32* hit_counts, uint64* hit_stats) { const uint32 read_id = threadIdx.x + BLOCKDIM*blockIdx.x; if (read_id >= batch_size) return; const uint32 hit_ranges = hit_counts[ read_id ]; strided_iterator<const SeedHit*> hits( hit_data+read_id, batch_size ); typedef vector_view< strided_iterator<const SeedHit*> > Storage; typedef priority_deque< SeedHit, Storage, hit_compare > HitQueue; Storage qStore( hit_ranges, hits ); HitQueue hitheap( qStore, HitQueue::CONSTRUCTED ); __shared__ uint32 shared_max_range; __shared__ uint32 shared_max_hits; __shared__ uint32 shared_top_max_hits; shared_max_range = 0; shared_max_hits = 0; shared_top_max_hits = 0; __syncthreads(); uint32 hits_cnt = 0; uint32 max_range = 0; for (uint32 i = 0; i < hit_ranges; ++i) { hits_cnt += hits[i].get_range_size(); max_range = nvbio::max( max_range, hits[i].get_range_size() ); } const SeedHit top = hit_ranges ? hitheap.top() : SeedHit(); const uint32 top_cnt = hit_ranges ? top.get_range().y - top.get_range().x : 0u; // update the number of ranges and number of total hits atomicAdd( hit_stats + HIT_STATS_RANGES, uint64(hit_ranges) ); atomicAdd( hit_stats + HIT_STATS_TOTAL, uint64(hits_cnt) ); atomicAdd( hit_stats + HIT_STATS_TOP, uint64(top_cnt) ); // bin the number of hits, and update the bin counter const uint32 log_hits = hits_cnt == 0 ? 0u : nvbio::log2( hits_cnt )+1u; atomicAdd( hit_stats + HIT_STATS_BINS + log_hits, uint64(1u) ); // bin the number of top hits, and update the bin counter const uint32 log_top_hits = top_cnt == 0 ? 0u : nvbio::log2( top_cnt )+1u; atomicAdd( hit_stats + HIT_STATS_TOP_BINS + log_top_hits, uint64(1u) ); // update the maximum if (shared_max_range < max_range) atomicMax( &shared_max_range, max_range ); // update the maximum if (shared_max_hits < hits_cnt) atomicMax( &shared_max_hits, hits_cnt ); // update the maximum if (shared_top_max_hits < top_cnt) atomicMax( &shared_top_max_hits, top_cnt ); __syncthreads(); // update the maximum if (threadIdx.x == 0) { if (hit_stats[ HIT_STATS_MAX_RANGE ] < shared_max_range) atomicMax( (uint32*)(hit_stats + HIT_STATS_MAX_RANGE), shared_max_range ); if (hit_stats[ HIT_STATS_MAX ] < shared_max_hits) atomicMax( (uint32*)(hit_stats + HIT_STATS_MAX), shared_max_hits ); if (hit_stats[ HIT_STATS_TOP_MAX ] < shared_top_max_hits) atomicMax( (uint32*)(hit_stats + HIT_STATS_TOP_MAX), shared_top_max_hits ); } } void Aligner::keep_stats(const uint32 count, Stats& stats) { thrust::fill( hits_stats_dvec.begin(), hits_stats_dvec.end(), 0u ); hits_stats( count, nvbio::device_view( hit_deques.hits() ), nvbio::device_view( hit_deques.counts() ), hits_stats_dptr ); cudaThreadSynchronize(); nvbio::cuda::check_error("hit stats kernel"); nvbio::cuda::thrust_copy_vector(hits_stats_hvec, hits_stats_dvec); // poll until previous stats have been consumed //while (output_thread.stats.stats_ready) {} stats.hits_ranges += hits_stats_hvec[ HIT_STATS_RANGES ]; stats.hits_total += hits_stats_hvec[ HIT_STATS_TOTAL ]; stats.hits_max = std::max( stats.hits_max, uint32( hits_stats_hvec[ HIT_STATS_MAX ] ) ); stats.hits_max_range = std::max( stats.hits_max_range, uint32( hits_stats_hvec[ HIT_STATS_MAX_RANGE ] ) ); stats.hits_top_total += hits_stats_hvec[ HIT_STATS_TOP ]; stats.hits_top_max = std::max( stats.hits_top_max, uint32( hits_stats_hvec[ HIT_STATS_TOP_MAX ] ) ); for (uint32 i = 0; i < 28; ++i) { stats.hits_bins[i] += hits_stats_hvec[ HIT_STATS_BINS + i ]; stats.hits_top_bins[i] += hits_stats_hvec[ HIT_STATS_TOP_BINS + i ]; } stats.hits_stats++; // mark stats as ready to be consumed //output_thread.stats.stats_ready = true; } // Compute the total number of matches found void hits_stats( const uint32 batch_size, const SeedHit* hit_data, const uint32* hit_counts, uint64* hit_stats) { const uint32 blocks = (batch_size + BLOCKDIM-1) / BLOCKDIM; hits_stats_kernel<<<blocks, BLOCKDIM>>>( batch_size, hit_data, hit_counts, hit_stats ); } // copy the contents of a section of a ring buffer into a plain array struct ring_buffer_to_plain_array_functor { // constructor ring_buffer_to_plain_array_functor( const uint32* _buffer, const uint32 _buffer_size, const uint32 _begin, const uint32 _end, uint32* _output) : buffer( _buffer ), buffer_size( _buffer_size ), begin( _begin ), end( _end ), output( _output ) {} // functor operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void operator() (const uint32 i) const { if (begin + i < end) output[i] = buffer[ (begin + i) % buffer_size ]; } const uint32* buffer; const uint32 buffer_size; const uint32 begin; const uint32 end; uint32* output; }; void ring_buffer_to_plain_array( const uint32* buffer, const uint32 buffer_size, const uint32 begin, const uint32 end, uint32* output) { nvbio::for_each<device_tag>( end - begin, thrust::make_counting_iterator<uint32>(0), ring_buffer_to_plain_array_functor( buffer, buffer_size, begin, end, output ) ); } // initialize a set of alignments // __global__ void mark_unaligned_kernel( const uint32 n_active_reads, const uint32* active_reads, const io::Alignment* best_data, uint8* reseed) { const uint32 thread_id = threadIdx.x + BLOCKDIM*blockIdx.x; if (thread_id >= n_active_reads) return; // fetch the read const uint32 read_id = active_reads[ thread_id ]; // mark if unaligned if (!best_data[ read_id ].is_aligned()) reseed[ thread_id ] = 1u; } // mark unaligned reads that need reseeding // void mark_unaligned( const uint32 n_active_reads, const uint32* active_reads, const io::Alignment* best_data, uint8* reseed) { const int blocks = (n_active_reads + BLOCKDIM-1) / BLOCKDIM; mark_unaligned_kernel<<<blocks, BLOCKDIM>>>( n_active_reads, active_reads, best_data, reseed ); } // initialize a set of alignments // __global__ void mark_discordant_kernel( const uint32 n_reads, io::Alignment* anchor_data, io::Alignment* opposite_data, const uint32 stride) { const uint32 read_id = threadIdx.x + BLOCKDIM*blockIdx.x; if (read_id >= n_reads) return; // A pair of alignments is considered "discordant" if they are unique and // they are not concordant. if (anchor_data[ read_id ].is_concordant() == false && // anchor is : unpaired anchor_data[ read_id ].is_aligned() == true && // aligned anchor_data[ read_id + stride ].is_aligned() == false && // unique opposite_data[ read_id ].is_aligned() == true && // opposite is : aligned opposite_data[ read_id + stride ].is_aligned() == false) // unique { // mark as paired and discordant anchor_data[ read_id ].m_paired = 1u; anchor_data[ read_id ].m_discordant = 1u; opposite_data[ read_id ].m_paired = 1u; opposite_data[ read_id ].m_discordant = 1u; } } // mark unique unaligned read pairs as discordant // void mark_discordant( const uint32 n_reads, io::Alignment* anchor_data, io::Alignment* opposite_data, const uint32 stride) { const int blocks = (n_reads + BLOCKDIM-1) / BLOCKDIM; mark_discordant_kernel<<<blocks, BLOCKDIM>>>( n_reads, anchor_data, opposite_data, stride ); } } // namespace cuda } // namespace bowtie2 } // namespace nvbio
the_stack
* An implementation of segmented reduction using a load-balanced parallelization * strategy based on the MergePath decision path. ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iterator> #include <vector> #include <string> #include <algorithm> #include <stdio.h> #include <cub/cub.cuh> #include "test_util.h" using namespace cub; using namespace std; /****************************************************************************** * Globals, constants, and typedefs ******************************************************************************/ bool g_verbose = false; int g_timing_iterations = 1; CachingDeviceAllocator g_allocator(true); /****************************************************************************** * Utility routines ******************************************************************************/ /** * An pair of index offsets */ template <typename OffsetT> struct IndexPair { OffsetT a_idx; OffsetT b_idx; }; /** * Computes the begin offsets into A and B for the specified * location (diagonal) along the merge decision path */ template < int BLOCK_THREADS, typename IteratorA, typename IteratorB, typename OffsetT> __device__ __forceinline__ void ParallelMergePathSearch( OffsetT diagonal, IteratorA a, IteratorB b, IndexPair<OffsetT> begin, // Begin offsets into a and b IndexPair<OffsetT> end, // End offsets into a and b IndexPair<OffsetT> &intersection) // [out] Intersection offsets into a and b { OffsetT a_split_min = CUB_MAX(diagonal - end.b_idx, begin.a_idx); OffsetT a_split_max = CUB_MIN(diagonal, end.a_idx); while (a_split_min < a_split_max) { OffsetT a_distance = a_split_max - a_split_min; OffsetT a_slice = (a_distance + BLOCK_THREADS - 1) >> Log2<BLOCK_THREADS>::VALUE; OffsetT a_split_pivot = CUB_MIN(a_split_min + (threadIdx.x * a_slice), end.a_idx - 1); int move_up = (a[a_split_pivot] <= b[diagonal - a_split_pivot - 1]); int num_up = __syncthreads_count(move_up); /* _CubLog("a_split_min(%d), a_split_max(%d) a_distance(%d), a_slice(%d), a_split_pivot(%d), move_up(%d), num_up(%d), a_begin(%d), a_end(%d)\n", a_split_min, a_split_max, a_distance, a_slice, a_split_pivot, move_up, num_up, a_begin, a_end); */ a_split_max = CUB_MIN(num_up * a_slice, end.a_idx); a_split_min = CUB_MAX(a_split_max - a_slice, begin.a_idx) + 1; } intersection.a_idx = CUB_MIN(a_split_min, end.a_idx); intersection.b_idx = CUB_MIN(diagonal - a_split_min, end.b_idx); } /** * Computes the begin offsets into A and B for the specified * location (diagonal) along the merge decision path */ template < typename IteratorA, typename IteratorB, typename OffsetT> __device__ __forceinline__ void MergePathSearch( OffsetT diagonal, IteratorA a, IteratorB b, IndexPair<OffsetT> begin, // Begin offsets into a and b IndexPair<OffsetT> end, // End offsets into a and b IndexPair<OffsetT> &intersection) // [out] Intersection offsets into a and b { OffsetT split_min = CUB_MAX(diagonal - end.b_idx, begin.a_idx); OffsetT split_max = CUB_MIN(diagonal, end.a_idx); while (split_min < split_max) { OffsetT split_pivot = (split_min + split_max) >> 1; if (a[split_pivot] <= b[diagonal - split_pivot - 1]) { // Move candidate split range up A, down B split_min = split_pivot + 1; } else { // Move candidate split range up B, down A split_max = split_pivot; } } intersection.a_idx = CUB_MIN(split_min, end.a_idx); intersection.b_idx = CUB_MIN(diagonal - split_min, end.b_idx); } /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for BlockSegReduceRegion */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) bool _USE_SMEM_SEGMENT_CACHE, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile bool _USE_SMEM_VALUE_CACHE, ///< Whether or not to cache incoming values in shared memory before reducing each tile CacheLoadModifier _LOAD_MODIFIER_SEGMENTS, ///< Cache load modifier for reading segment offsets CacheLoadModifier _LOAD_MODIFIER_VALUES, ///< Cache load modifier for reading values BlockReduceAlgorithm _REDUCE_ALGORITHM, ///< The BlockReduce algorithm to use BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use struct BlockSegReduceRegionPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) USE_SMEM_SEGMENT_CACHE = _USE_SMEM_SEGMENT_CACHE, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile USE_SMEM_VALUE_CACHE = _USE_SMEM_VALUE_CACHE, ///< Whether or not to cache incoming upcoming values in shared memory before reducing each tile }; static const CacheLoadModifier LOAD_MODIFIER_SEGMENTS = _LOAD_MODIFIER_SEGMENTS; ///< Cache load modifier for reading segment offsets static const CacheLoadModifier LOAD_MODIFIER_VALUES = _LOAD_MODIFIER_VALUES; ///< Cache load modifier for reading values static const BlockReduceAlgorithm REDUCE_ALGORITHM = _REDUCE_ALGORITHM; ///< The BlockReduce algorithm to use static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Persistent thread block types ******************************************************************************/ /** * \brief BlockSegReduceTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide segmented reduction. */ template < typename BlockSegReduceRegionPolicy, ///< Parameterized BlockSegReduceRegionPolicy tuning policy typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets typename ValueIterator, ///< Random-access input iterator type for reading values typename OutputIteratorT, ///< Random-access output iterator type for writing segment reductions typename ReductionOp, ///< Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> typename OffsetT> ///< Signed integer type for global offsets struct BlockSegReduceRegion { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { BLOCK_THREADS = BlockSegReduceRegionPolicy::BLOCK_THREADS, ITEMS_PER_THREAD = BlockSegReduceRegionPolicy::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, /// Number of work items to be processed per tile USE_SMEM_SEGMENT_CACHE = BlockSegReduceRegionPolicy::USE_SMEM_SEGMENT_CACHE, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile USE_SMEM_VALUE_CACHE = BlockSegReduceRegionPolicy::USE_SMEM_VALUE_CACHE, ///< Whether or not to cache incoming upcoming values in shared memory before reducing each tile SMEM_SEGMENT_CACHE_ITEMS = USE_SMEM_SEGMENT_CACHE ? TILE_ITEMS : 1, SMEM_VALUE_CACHE_ITEMS = USE_SMEM_VALUE_CACHE ? TILE_ITEMS : 1, }; // Segment offset type typedef typename std::iterator_traits<SegmentOffsetIterator>::value_type SegmentOffset; // Value type typedef typename std::iterator_traits<ValueIterator>::value_type Value; // Counting iterator type typedef CountingInputIterator<SegmentOffsetT, OffsetT> CountingIterator; // Segment offsets iterator wrapper type typedef typename If<(IsPointer<SegmentOffsetIterator>::VALUE), CacheModifiedInputIterator<BlockSegReduceRegionPolicy::LOAD_MODIFIER_SEGMENTS, SegmentOffsetT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator SegmentOffsetIterator>::Type // Directly use the supplied input iterator type WrappedSegmentOffsetIterator; // Values iterator wrapper type typedef typename If<(IsPointer<ValueIterator>::VALUE), CacheModifiedInputIterator<BlockSegReduceRegionPolicy::LOAD_MODIFIER_VALUES, Value, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator ValueIterator>::Type // Directly use the supplied input iterator type WrappedValueIterator; // Tail flag type for marking segment discontinuities typedef int TailFlag; // Reduce-by-key data type tuple (segment-ID, value) typedef KeyValuePair<OffsetT, Value> KeyValuePair; // Index pair data type typedef IndexPair<OffsetT> IndexPair; // BlockScan scan operator for reduction-by-segment typedef ReduceByKeyOp<ReductionOp> ReduceByKeyOp; // Stateful BlockScan prefix callback type for managing a running total while scanning consecutive tiles typedef RunningBlockPrefixCallbackOp< KeyValuePair, ReduceByKeyOp> RunningPrefixCallbackOp; // Parameterized BlockShift type for exchanging index pairs typedef BlockShift< IndexPair, BLOCK_THREADS> BlockShift; // Parameterized BlockReduce type for block-wide reduction typedef BlockReduce< Value, BLOCK_THREADS, BlockSegReduceRegionPolicy::REDUCE_ALGORITHM> BlockReduce; // Parameterized BlockScan type for block-wide reduce-value-by-key typedef BlockScan< KeyValuePair, BLOCK_THREADS, BlockSegReduceRegionPolicy::SCAN_ALGORITHM> BlockScan; // Shared memory type for this threadblock struct _TempStorage { union { // Smem needed for BlockScan typename BlockScan::TempStorage scan; // Smem needed for BlockReduce typename BlockReduce::TempStorage reduce; struct { // Smem needed for communicating start/end indices between threads for a given work tile typename BlockShift::TempStorage shift; // Smem needed for caching segment end-offsets SegmentOffset cached_segment_end_offsets[SMEM_SEGMENT_CACHE_ITEMS + 1]; }; // Smem needed for caching values Value cached_values[SMEM_VALUE_CACHE_ITEMS]; }; IndexPair block_region_idx[2]; // The starting [0] and ending [1] pairs of segment and value indices for the threadblock's region // The first partial reduction tuple scattered by this thread block KeyValuePair first_tuple; }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- _TempStorage &temp_storage; ///< Reference to shared storage WrappedSegmentOffsetIterator d_segment_end_offsets; ///< A sequence of \p num_segments segment end-offsets WrappedValueIterator d_values; ///< A sequence of \p num_values data to reduce OutputIteratorT d_output; ///< A sequence of \p num_segments segment totals CountingIterator d_value_offsets; ///< A sequence of \p num_values value-offsets IndexPair *d_block_idx; OffsetT num_values; ///< Total number of values to reduce OffsetT num_segments; ///< Number of segments being reduced Value identity; ///< Identity value (for zero-length segments) ReductionOp reduction_op; ///< Reduction operator ReduceByKeyOp scan_op; ///< Reduce-by-key scan operator RunningPrefixCallbackOp prefix_op; ///< Stateful running total for block-wide prefix scan of partial reduction tuples //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ BlockSegReduceRegion( TempStorage &temp_storage, ///< Reference to shared storage SegmentOffsetIterator d_segment_end_offsets, ///< A sequence of \p num_segments segment end-offsets ValueIterator d_values, ///< A sequence of \p num_values values OutputIteratorT d_output, ///< A sequence of \p num_segments segment totals IndexPair *d_block_idx, OffsetT num_values, ///< Number of values to reduce OffsetT num_segments, ///< Number of segments being reduced Value identity, ///< Identity value (for zero-length segments) ReductionOp reduction_op) ///< Reduction operator : temp_storage(temp_storage.Alias()), d_segment_end_offsets(d_segment_end_offsets), d_values(d_values), d_value_offsets(0), d_output(d_output), d_block_idx(d_block_idx), num_values(num_values), num_segments(num_segments), identity(identity), reduction_op(reduction_op), scan_op(reduction_op), prefix_op(scan_op) {} /** * Fast-path single-segment tile reduction. Perform a * simple block-wide reduction and accumulate the result into * the running total. */ __device__ __forceinline__ void SingleSegmentTile( IndexPair next_tile_idx, IndexPair block_idx) { OffsetT tile_values = next_tile_idx.b_idx - block_idx.b_idx; // Load a tile's worth of values (using identity for out-of-bounds items) Value values[ITEMS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values + block_idx.b_idx, values, tile_values, identity); // Barrier for smem reuse __syncthreads(); // Reduce the tile of values and update the running total in thread-0 KeyValuePair tile_aggregate; tile_aggregate.key = block_idx.a_idx; tile_aggregate.value = BlockReduce(temp_storage.reduce).Reduce(values, reduction_op); if (threadIdx.x == 0) { prefix_op.running_total = scan_op(prefix_op.running_total, tile_aggregate); } } /** * Fast-path empty-segment tile reduction. Write out a tile of identity * values to output. */ __device__ __forceinline__ void EmptySegmentsTile( IndexPair next_tile_idx, IndexPair block_idx) { Value segment_reductions[ITEMS_PER_THREAD]; if (threadIdx.x == 0) { // The first segment gets the running segment total segment_reductions[0] = prefix_op.running_total.value; // Update the running prefix prefix_op.running_total.value = identity; prefix_op.running_total.key = next_tile_idx.a_idx; } else { // Remainder of segments in this tile get identity segment_reductions[0] = identity; } // Remainder of segments in this tile get identity #pragma unroll for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) segment_reductions[ITEM] = identity; // Store reductions OffsetT tile_segments = next_tile_idx.a_idx - block_idx.a_idx; StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_output + block_idx.a_idx, segment_reductions, tile_segments); } /** * Multi-segment tile reduction. */ template <bool FULL_TILE> __device__ __forceinline__ void MultiSegmentTile( IndexPair block_idx, IndexPair thread_idx, IndexPair next_thread_idx, IndexPair next_tile_idx) { IndexPair local_thread_idx; local_thread_idx.a_idx = thread_idx.a_idx - block_idx.a_idx; local_thread_idx.b_idx = thread_idx.b_idx - block_idx.b_idx; // Check if first segment end-offset is in range bool valid_segment = FULL_TILE || (thread_idx.a_idx < next_thread_idx.a_idx); // Check if first value offset is in range bool valid_value = FULL_TILE || (thread_idx.b_idx < next_thread_idx.b_idx); // Load first segment end-offset OffsetT segment_end_offset = (valid_segment) ? (USE_SMEM_SEGMENT_CACHE)? temp_storage.cached_segment_end_offsets[local_thread_idx.a_idx] : d_segment_end_offsets[thread_idx.a_idx] : -1; OffsetT segment_ids[ITEMS_PER_THREAD]; OffsetT value_offsets[ITEMS_PER_THREAD]; KeyValuePair first_partial; first_partial.key = thread_idx.a_idx; first_partial.value = identity; // Get segment IDs and gather-offsets for values #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { segment_ids[ITEM] = -1; value_offsets[ITEM] = -1; // Whether or not we slide (a) right along the segment path or (b) down the value path if (valid_segment && (!valid_value || (segment_end_offset <= thread_idx.b_idx))) { // Consume this segment index segment_ids[ITEM] = thread_idx.a_idx; thread_idx.a_idx++; local_thread_idx.a_idx++; valid_segment = FULL_TILE || (thread_idx.a_idx < next_thread_idx.a_idx); // Read next segment end-offset (if valid) if (valid_segment) { if (USE_SMEM_SEGMENT_CACHE) segment_end_offset = temp_storage.cached_segment_end_offsets[local_thread_idx.a_idx]; else segment_end_offset = d_segment_end_offsets[thread_idx.a_idx]; } } else if (valid_value) { // Consume this value index value_offsets[ITEM] = thread_idx.b_idx; thread_idx.b_idx++; local_thread_idx.b_idx++; valid_value = FULL_TILE || (thread_idx.b_idx < next_thread_idx.b_idx); } } // Load values Value values[ITEMS_PER_THREAD]; if (USE_SMEM_VALUE_CACHE) { // Barrier for smem reuse __syncthreads(); OffsetT tile_values = next_tile_idx.b_idx - block_idx.b_idx; // Load a tile's worth of values (using identity for out-of-bounds items) LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values + block_idx.b_idx, values, tile_values, identity); // Store to shared StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, temp_storage.cached_values, values, tile_values); // Barrier for smem reuse __syncthreads(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { values[ITEM] = (value_offsets[ITEM] == -1) ? identity : temp_storage.cached_values[value_offsets[ITEM] - block_idx.b_idx]; } } else { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { values[ITEM] = (value_offsets[ITEM] == -1) ? identity : d_values[value_offsets[ITEM]]; } } // Reduce within thread segments KeyValuePair running_total = first_partial; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { if (segment_ids[ITEM] != -1) { // Consume this segment index d_output[segment_ids[ITEM]] = running_total.value; // _CubLog("Updating segment %d with value %lld\n", segment_ids[ITEM], running_total.value) if (first_partial.key == segment_ids[ITEM]) first_partial.value = running_total.value; running_total.key = segment_ids[ITEM]; running_total.value = identity; } running_total.value = reduction_op(running_total.value, values[ITEM]); } /* // Barrier for smem reuse __syncthreads(); // Use prefix scan to reduce values by segment-id. The segment-reductions end up in items flagged as segment-tails. KeyValuePair block_aggregate; BlockScan(temp_storage.scan).InclusiveScan( pairs, // Scan input pairs, // Scan output scan_op, // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total */ /* // Check if first segment end-offset is in range bool valid_segment = (thread_idx.a_idx < next_thread_idx.a_idx); // Check if first value offset is in range bool valid_value = (thread_idx.b_idx < next_thread_idx.b_idx); // Load first segment end-offset OffsetT segment_end_offset = (valid_segment) ? d_segment_end_offsets[thread_idx.a_idx] : num_values; // Out of range (the last segment end-offset is one-past the last value offset) // Load first value offset OffsetT value_offset = (valid_value) ? d_value_offsets[thread_idx.b_idx] : num_values; // Out of range (one-past the last value offset) // Assemble segment-demarcating tail flags and partial reduction tuples TailFlag tail_flags[ITEMS_PER_THREAD]; KeyValuePair partial_reductions[ITEMS_PER_THREAD]; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { // Default tuple and flag values partial_reductions[ITEM].key = thread_idx.a_idx; partial_reductions[ITEM].value = identity; tail_flags[ITEM] = 0; // Whether or not we slide (a) right along the segment path or (b) down the value path if (valid_segment && (!valid_value || (segment_end_offset <= value_offset))) { // Consume this segment index // Set tail flag noting the end of the segment tail_flags[ITEM] = 1; // Increment segment index thread_idx.a_idx++; // Read next segment end-offset (if valid) if ((valid_segment = (thread_idx.a_idx < next_thread_idx.a_idx))) segment_end_offset = d_segment_end_offsets[thread_idx.a_idx]; } else if (valid_value) { // Consume this value index // Update the tuple's value with the value at this index. partial_reductions[ITEM].value = d_values[value_offset]; // Increment value index thread_idx.b_idx++; // Read next value offset (if valid) if ((valid_value = (thread_idx.b_idx < next_thread_idx.b_idx))) value_offset = d_value_offsets[thread_idx.b_idx]; } } // Use prefix scan to reduce values by segment-id. The segment-reductions end up in items flagged as segment-tails. KeyValuePair block_aggregate; BlockScan(temp_storage.scan).InclusiveScan( partial_reductions, // Scan input partial_reductions, // Scan output scan_op, // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // The first segment index for this region (hoist?) OffsetT first_segment_idx = temp_storage.block_idx.a_idx[0]; // Scatter an accumulated reduction if it is the head of a valid segment #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (tail_flags[ITEM]) { OffsetT segment_idx = partial_reductions[ITEM].key; Value value = partial_reductions[ITEM].value; // Write value reduction to corresponding segment id d_output[segment_idx] = value; // Save off the first value product that this thread block will scatter if (segment_idx == first_segment_idx) { temp_storage.first_tuple.value = value; } } } */ } /** * Have the thread block process the specified region of the MergePath decision path */ __device__ __forceinline__ void ProcessRegion( OffsetT block_diagonal, OffsetT next_block_diagonal, KeyValuePair &first_tuple, // [Out] Valid in thread-0 KeyValuePair &last_tuple) // [Out] Valid in thread-0 { // Thread block initialization if (threadIdx.x < 2) { // Retrieve block starting and ending indices IndexPair block_idx = {0, 0}; if (gridDim.x > 1) { block_idx = d_block_idx[blockIdx.x + threadIdx.x]; } else if (threadIdx.x > 0) { block_idx.a_idx = num_segments; block_idx.b_idx = num_values; } // Share block starting and ending indices temp_storage.block_region_idx[threadIdx.x] = block_idx; // Initialize the block's running prefix if (threadIdx.x == 0) { prefix_op.running_total.key = block_idx.a_idx; prefix_op.running_total.value = identity; // Initialize the "first scattered partial reduction tuple" to the prefix tuple (in case we don't actually scatter one) temp_storage.first_tuple = prefix_op.running_total; } } // Ensure coherence of region indices __syncthreads(); // Read block's starting indices IndexPair block_idx = temp_storage.block_region_idx[0]; // Have the thread block iterate over the region #pragma unroll 1 while (block_diagonal < next_block_diagonal) { // Read block's ending indices (hoist?) IndexPair next_block_idx = temp_storage.block_region_idx[1]; // Clamp the per-thread search range to within one work-tile of block's current indices IndexPair next_tile_idx; next_tile_idx.a_idx = CUB_MIN(next_block_idx.a_idx, block_idx.a_idx + TILE_ITEMS); next_tile_idx.b_idx = CUB_MIN(next_block_idx.b_idx, block_idx.b_idx + TILE_ITEMS); // Have each thread search for the end-indices of its subranges within the segment and value inputs IndexPair next_thread_idx; if (USE_SMEM_SEGMENT_CACHE) { // Search in smem cache OffsetT num_segments = next_tile_idx.a_idx - block_idx.a_idx; // Load global SegmentOffset segment_offsets[ITEMS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_segment_end_offsets + block_idx.a_idx, segment_offsets, num_segments, num_values); // Store to shared StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, temp_storage.cached_segment_end_offsets, segment_offsets); __syncthreads(); OffsetT next_thread_diagonal = block_diagonal + ((threadIdx.x + 1) * ITEMS_PER_THREAD); MergePathSearch( next_thread_diagonal, // Next thread diagonal temp_storage.cached_segment_end_offsets - block_idx.a_idx, // A (segment end-offsets) d_value_offsets, // B (value offsets) block_idx, // Start indices into A and B next_tile_idx, // End indices into A and B next_thread_idx); // [out] diagonal intersection indices into A and B } else { // Search in global OffsetT next_thread_diagonal = block_diagonal + ((threadIdx.x + 1) * ITEMS_PER_THREAD); MergePathSearch( next_thread_diagonal, // Next thread diagonal d_segment_end_offsets, // A (segment end-offsets) d_value_offsets, // B (value offsets) block_idx, // Start indices into A and B next_tile_idx, // End indices into A and B next_thread_idx); // [out] diagonal intersection indices into A and B } // Share thread end-indices to get thread begin-indices and tile end-indices IndexPair thread_idx; BlockShift(temp_storage.shift).Up( next_thread_idx, // Input item thread_idx, // [out] Output item block_idx, // Prefix item to be provided to <em>thread</em><sub>0</sub> next_tile_idx); // [out] Suffix item shifted out by the <em>thread</em><sub><tt>BLOCK_THREADS-1</tt></sub> to be provided to all threads // if (block_idx.a_idx == next_tile_idx.a_idx) // { // // There are no segment end-offsets in this tile. Perform a // // simple block-wide reduction and accumulate the result into // // the running total. // SingleSegmentTile(next_tile_idx, block_idx); // } // else if (block_idx.b_idx == next_tile_idx.b_idx) // { // // There are no values in this tile (only empty segments). // EmptySegmentsTile(next_tile_idx.a_idx, block_idx.a_idx); // } // else if ((next_tile_idx.a_idx < num_segments) && (next_tile_idx.b_idx < num_values)) { // Merge the tile's segment and value indices (full tile) MultiSegmentTile<true>(block_idx, thread_idx, next_thread_idx, next_tile_idx); } else { // Merge the tile's segment and value indices (partially full tile) MultiSegmentTile<false>(block_idx, thread_idx, next_thread_idx, next_tile_idx); } // Advance the block's indices in preparation for the next tile block_idx = next_tile_idx; // Advance to the next region in the decision path block_diagonal += TILE_ITEMS; // Barrier for smem reuse __syncthreads(); } // Get first and last tuples for the region if (threadIdx.x == 0) { first_tuple = temp_storage.first_tuple; last_tuple = prefix_op.running_total; } } }; /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for BlockSegReduceRegionByKey */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use bool _LOAD_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use struct BlockSegReduceRegionByKeyPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) LOAD_WARP_TIME_SLICING = _LOAD_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) }; }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Persistent thread block types ******************************************************************************/ /** * \brief BlockSegReduceRegionByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. */ template < typename BlockSegReduceRegionByKeyPolicy, ///< Parameterized BlockSegReduceRegionByKeyPolicy tuning policy typename InputIteratorT, ///< Random-access iterator referencing key-value input tuples typename OutputIteratorT, ///< Random-access iterator referencing segment output totals typename ReductionOp> ///< Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> struct BlockSegReduceRegionByKey { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { BLOCK_THREADS = BlockSegReduceRegionByKeyPolicy::BLOCK_THREADS, ITEMS_PER_THREAD = BlockSegReduceRegionByKeyPolicy::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // KeyValuePair input type typedef typename std::iterator_traits<InputIteratorT>::value_type KeyValuePair; // Signed integer type for global offsets typedef typename KeyValuePair::Key OffsetT; // Value type typedef typename KeyValuePair::Value Value; // Head flag type typedef int HeadFlag; // Input iterator wrapper type for loading KeyValuePair elements through cache typedef CacheModifiedInputIterator< BlockSegReduceRegionByKeyPolicy::LOAD_MODIFIER, KeyValuePair, OffsetT> WrappedInputIteratorT; // Parameterized BlockLoad type typedef BlockLoad< WrappedInputIteratorT, BLOCK_THREADS, ITEMS_PER_THREAD, BlockSegReduceRegionByKeyPolicy::LOAD_ALGORITHM, BlockSegReduceRegionByKeyPolicy::LOAD_WARP_TIME_SLICING> BlockLoad; // BlockScan scan operator for reduction-by-segment typedef ReduceByKeyOp<ReductionOp> ReduceByKeyOp; // Stateful BlockScan prefix callback type for managing a running total while scanning consecutive tiles typedef RunningBlockPrefixCallbackOp< KeyValuePair, ReduceByKeyOp> RunningPrefixCallbackOp; // Parameterized BlockScan type for block-wide reduce-value-by-key typedef BlockScan< KeyValuePair, BLOCK_THREADS, BlockSegReduceRegionByKeyPolicy::SCAN_ALGORITHM> BlockScan; // Parameterized BlockDiscontinuity type for identifying key discontinuities typedef BlockDiscontinuity< OffsetT, BLOCK_THREADS> BlockDiscontinuity; // Operator for detecting discontinuities in a list of segment identifiers. struct NewSegmentOp { /// Returns true if row_b is the start of a new row __device__ __forceinline__ bool operator()(const OffsetT& b, const OffsetT& a) { return (a != b); } }; // Shared memory type for this threadblock struct _TempStorage { union { typename BlockLoad::TempStorage load; // Smem needed for tile loading struct { typename BlockScan::TempStorage scan; // Smem needed for reduce-value-by-segment scan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for head-flagging }; }; }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- _TempStorage &temp_storage; ///< Reference to shared storage WrappedInputIteratorT d_tuple_partials; ///< A sequence of partial reduction tuples to scan OutputIteratorT d_output; ///< A sequence of segment totals Value identity; ///< Identity value (for zero-length segments) ReduceByKeyOp scan_op; ///< Reduce-by-key scan operator RunningPrefixCallbackOp prefix_op; ///< Stateful running total for block-wide prefix scan of partial reduction tuples //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ BlockSegReduceRegionByKey( TempStorage &temp_storage, ///< Reference to shared storage InputIteratorT d_tuple_partials, ///< A sequence of partial reduction tuples to scan OutputIteratorT d_output, ///< A sequence of segment totals Value identity, ///< Identity value (for zero-length segments) ReductionOp reduction_op) ///< Reduction operator : temp_storage(temp_storage.Alias()), d_tuple_partials(d_tuple_partials), d_output(d_output), identity(identity), scan_op(reduction_op), prefix_op(scan_op) {} /** * Processes a reduce-value-by-key input tile, outputting reductions for each segment */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( OffsetT block_offset, OffsetT first_segment_idx, OffsetT last_segment_idx, int guarded_items = TILE_ITEMS) { KeyValuePair partial_reductions[ITEMS_PER_THREAD]; OffsetT segment_ids[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a tile of block partials from previous kernel if (FULL_TILE) { // Full tile BlockLoad(temp_storage.load).Load(d_tuple_partials + block_offset, partial_reductions); } else { KeyValuePair oob_default; oob_default.key = last_segment_idx; // The last segment ID to be reduced oob_default.value = identity; // Partially-full tile BlockLoad(temp_storage.load).Load(d_tuple_partials + block_offset, partial_reductions, guarded_items, oob_default); } // Barrier for shared memory reuse __syncthreads(); // Copy the segment IDs for head-flagging #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { segment_ids[ITEM] = partial_reductions[ITEM].key; } // FlagT segment heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( head_flags, // [out] Head flags segment_ids, // Segment ids NewSegmentOp(), // Functor for detecting start of new rows prefix_op.running_total.key); // Last segment ID from previous tile to compare with first segment ID in this tile // Reduce-value-by-segment across partial_reductions using exclusive prefix scan KeyValuePair block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_reductions, // Scan input partial_reductions, // Scan output scan_op, // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Scatter an accumulated reduction if it is the head of a valid segment #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_output[partial_reductions[ITEM].key] = partial_reductions[ITEM].value; } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessRegion( OffsetT block_offset, OffsetT block_end, OffsetT first_segment_idx, OffsetT last_segment_idx) { if (threadIdx.x == 0) { // Initialize running prefix to the first segment index paired with identity prefix_op.running_total.key = first_segment_idx; prefix_op.running_total.value = identity; } // Process full tiles while (block_offset + TILE_ITEMS <= block_end) { ProcessTile<true>(block_offset, first_segment_idx, last_segment_idx); __syncthreads(); block_offset += TILE_ITEMS; } // Process final value tile (if present) int guarded_items = block_end - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, first_segment_idx, last_segment_idx, guarded_items); } } }; /****************************************************************************** * Kernel entrypoints ******************************************************************************/ /** * Segmented reduce region kernel entry point (multi-block). */ template < typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets typename OffsetT> ///< Signed integer type for global offsets __global__ void SegReducePartitionKernel( SegmentOffsetIterator d_segment_end_offsets, ///< [in] A sequence of \p num_segments segment end-offsets IndexPair<OffsetT> *d_block_idx, int num_partition_samples, OffsetT num_values, ///< [in] Number of values to reduce OffsetT num_segments, ///< [in] Number of segments being reduced GridEvenShare<OffsetT> even_share) ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block { // Segment offset type typedef typename std::iterator_traits<SegmentOffsetIterator>::value_type SegmentOffset; // Counting iterator type typedef CountingInputIterator<SegmentOffsetT, OffsetT> CountingIterator; // Cache-modified iterator for segment end-offsets CacheModifiedInputIterator<LOAD_LDG, SegmentOffsetT, OffsetT> d_wrapped_segment_end_offsets(d_segment_end_offsets); // Counting iterator for value offsets CountingIterator d_value_offsets(0); // Initialize even-share to tell us where to start and stop our tile-processing int partition_id = (blockDim.x * blockIdx.x) + threadIdx.x; even_share.Init(partition_id); // Search for block starting and ending indices IndexPair<OffsetT> start_idx = {0, 0}; IndexPair<OffsetT> end_idx = {num_segments, num_values}; IndexPair<OffsetT> block_idx; MergePathSearch( even_share.block_offset, // Next thread diagonal d_wrapped_segment_end_offsets, // A (segment end-offsets) d_value_offsets, // B (value offsets) start_idx, // Start indices into A and B end_idx, // End indices into A and B block_idx); // [out] diagonal intersection indices into A and B // Write output if (partition_id < num_partition_samples) { d_block_idx[partition_id] = block_idx; } } /** * Segmented reduce region kernel entry point (multi-block). */ template < typename BlockSegReduceRegionPolicy, ///< Parameterized BlockSegReduceRegionPolicy tuning policy typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets typename ValueIterator, ///< Random-access input iterator type for reading values typename OutputIteratorT, ///< Random-access output iterator type for writing segment reductions typename ReductionOp, ///< Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> typename OffsetT, ///< Signed integer type for global offsets typename Value> ///< Value type __launch_bounds__ (BlockSegReduceRegionPolicy::BLOCK_THREADS) __global__ void SegReduceRegionKernel( SegmentOffsetIterator d_segment_end_offsets, ///< [in] A sequence of \p num_segments segment end-offsets ValueIterator d_values, ///< [in] A sequence of \p num_values values OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals KeyValuePair<OffsetT, Value> *d_tuple_partials, ///< [out] A sequence of (gridDim.x * 2) partial reduction tuples IndexPair<OffsetT> *d_block_idx, OffsetT num_values, ///< [in] Number of values to reduce OffsetT num_segments, ///< [in] Number of segments being reduced Value identity, ///< [in] Identity value (for zero-length segments) ReductionOp reduction_op, ///< [in] Reduction operator GridEvenShare<OffsetT> even_share) ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block { typedef KeyValuePair<OffsetT, Value> KeyValuePair; // Specialize threadblock abstraction type for reducing a range of segmented values typedef BlockSegReduceRegion< BlockSegReduceRegionPolicy, SegmentOffsetIterator, ValueIterator, OutputIteratorT, ReductionOp, OffsetT> BlockSegReduceRegion; // Shared memory allocation __shared__ typename BlockSegReduceRegion::TempStorage temp_storage; // Initialize threadblock even-share to tell us where to start and stop our tile-processing even_share.BlockInit(); // Construct persistent thread block BlockSegReduceRegion thread_block( temp_storage, d_segment_end_offsets, d_values, d_output, d_block_idx, num_values, num_segments, identity, reduction_op); // First and last partial reduction tuples within the range (valid in thread-0) KeyValuePair first_tuple, last_tuple; // Consume block's region of work thread_block.ProcessRegion( even_share.block_offset, even_share.block_end, first_tuple, last_tuple); if (threadIdx.x == 0) { if (gridDim.x > 1) { // Special case where the first segment written and the carry-out are for the same segment if (first_tuple.key == last_tuple.key) { first_tuple.value = identity; } // Write the first and last partial products from this thread block so // that they can be subsequently "fixed up" in the next kernel. d_tuple_partials[blockIdx.x * 2] = first_tuple; d_tuple_partials[(blockIdx.x * 2) + 1] = last_tuple; } } } /** * Segmented reduce region kernel entry point (single-block). */ template < typename BlockSegReduceRegionByKeyPolicy, ///< Parameterized BlockSegReduceRegionByKeyPolicy tuning policy typename InputIteratorT, ///< Random-access iterator referencing key-value input tuples typename OutputIteratorT, ///< Random-access iterator referencing segment output totals typename ReductionOp, ///< Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> typename OffsetT, ///< Signed integer type for global offsets typename Value> ///< Value type __launch_bounds__ (BlockSegReduceRegionByKeyPolicy::BLOCK_THREADS, 1) __global__ void SegReduceRegionByKeyKernel( InputIteratorT d_tuple_partials, ///< [in] A sequence of partial reduction tuples OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals OffsetT num_segments, ///< [in] Number of segments in the \p d_output sequence int num_tuple_partials, ///< [in] Number of partial reduction tuples being reduced Value identity, ///< [in] Identity value (for zero-length segments) ReductionOp reduction_op) ///< [in] Reduction operator { // Specialize threadblock abstraction type for reducing a range of values by key typedef BlockSegReduceRegionByKey< BlockSegReduceRegionByKeyPolicy, InputIteratorT, OutputIteratorT, ReductionOp> BlockSegReduceRegionByKey; // Shared memory allocation __shared__ typename BlockSegReduceRegionByKey::TempStorage temp_storage; // Construct persistent thread block BlockSegReduceRegionByKey thread_block( temp_storage, d_tuple_partials, d_output, identity, reduction_op); // Process input tiles thread_block.ProcessRegion( 0, // Region start num_tuple_partials, // Region end 0, // First segment ID num_segments); // Last segment ID (one-past) } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceReduce */ template < typename ValueIterator, ///< Random-access input iterator type for reading values typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets typename OutputIteratorT, ///< Random-access output iterator type for writing segment reductions typename ReductionOp, ///< Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> typename OffsetT> ///< Signed integer type for global offsets struct DeviceSegReduceDispatch { // Value type typedef typename std::iterator_traits<ValueIterator>::value_type Value; // Reduce-by-key data type tuple (segment-ID, value) typedef KeyValuePair<OffsetT, Value> KeyValuePair; // Index pair data type typedef IndexPair<OffsetT>IndexPair; /****************************************************************************** * Tuning policies ******************************************************************************/ /// SM35 struct Policy350 { // ReduceRegionPolicy typedef BlockSegReduceRegionPolicy< 128, ///< Threads per thread block 6, ///< Items per thread (per tile of input) true, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile false, ///< Whether or not to cache incoming values in shared memory before reducing each tile LOAD_DEFAULT, ///< Cache load modifier for reading segment offsets LOAD_LDG, ///< Cache load modifier for reading values BLOCK_REDUCE_RAKING, ///< The BlockReduce algorithm to use BLOCK_SCAN_WARP_SCANS> ///< The BlockScan algorithm to use SegReduceRegionPolicy; // ReduceRegionByKeyPolicy typedef BlockSegReduceRegionByKeyPolicy< 256, ///< Threads per thread block 9, ///< Items per thread (per tile of input) BLOCK_LOAD_DIRECT, ///< The BlockLoad algorithm to use false, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) LOAD_LDG, ///< Cache load modifier for reading input elements BLOCK_SCAN_WARP_SCANS> ///< The BlockScan algorithm to use SegReduceRegionByKeyPolicy; }; /// SM10 struct Policy100 { // ReduceRegionPolicy typedef BlockSegReduceRegionPolicy< 128, ///< Threads per thread block 3, ///< Items per thread (per tile of input) false, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile false, ///< Whether or not to cache incoming values in shared memory before reducing each tile LOAD_DEFAULT, ///< Cache load modifier for reading segment offsets LOAD_DEFAULT, ///< Cache load modifier for reading values BLOCK_REDUCE_RAKING, ///< The BlockReduce algorithm to use BLOCK_SCAN_RAKING> ///< The BlockScan algorithm to use SegReduceRegionPolicy; // ReduceRegionByKeyPolicy typedef BlockSegReduceRegionByKeyPolicy< 128, ///< Threads per thread block 3, ///< Items per thread (per tile of input) BLOCK_LOAD_WARP_TRANSPOSE, ///< The BlockLoad algorithm to use false, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) LOAD_DEFAULT, ///< Cache load modifier for reading input elements BLOCK_SCAN_WARP_SCANS> ///< The BlockScan algorithm to use SegReduceRegionByKeyPolicy; }; /****************************************************************************** * Tuning policies of current PTX compiler pass ******************************************************************************/ #if (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; /* #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #elif (CUB_PTX_ARCH >= 130) typedef Policy130 PtxPolicy; */ #else typedef Policy100 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxSegReduceRegionPolicy : PtxPolicy::SegReduceRegionPolicy {}; struct PtxSegReduceRegionByKeyPolicy : PtxPolicy::SegReduceRegionByKeyPolicy {}; /****************************************************************************** * Utilities ******************************************************************************/ /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template < typename SegReduceKernelConfig, typename SegReduceByKeyKernelConfig> __host__ __device__ __forceinline__ static void InitConfigs( int ptx_version, SegReduceKernelConfig &seg_reduce_region_config, SegReduceByKeyKernelConfig &seg_reduce_region_by_key_config) { #if (CUB_PTX_ARCH > 0) // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy seg_reduce_region_config.Init<PtxSegReduceRegionPolicy>(); seg_reduce_region_by_key_config.Init<PtxSegReduceRegionByKeyPolicy>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 350) { seg_reduce_region_config.template Init<typename Policy350::SegReduceRegionPolicy>(); seg_reduce_region_by_key_config.template Init<typename Policy350::SegReduceRegionByKeyPolicy>(); } /* else if (ptx_version >= 300) { seg_reduce_region_config.template Init<typename Policy300::SegReduceRegionPolicy>(); seg_reduce_region_by_key_config.template Init<typename Policy300::SegReduceRegionByKeyPolicy>(); } else if (ptx_version >= 200) { seg_reduce_region_config.template Init<typename Policy200::SegReduceRegionPolicy>(); seg_reduce_region_by_key_config.template Init<typename Policy200::SegReduceRegionByKeyPolicy>(); } else if (ptx_version >= 130) { seg_reduce_region_config.template Init<typename Policy130::SegReduceRegionPolicy>(); seg_reduce_region_by_key_config.template Init<typename Policy130::SegReduceRegionByKeyPolicy>(); } */ else { seg_reduce_region_config.template Init<typename Policy100::SegReduceRegionPolicy>(); seg_reduce_region_by_key_config.template Init<typename Policy100::SegReduceRegionByKeyPolicy>(); } #endif } /** * SegReduceRegionKernel kernel dispatch configuration */ struct SegReduceKernelConfig { int block_threads; int items_per_thread; bool use_smem_segment_cache; bool use_smem_value_cache; CacheLoadModifier load_modifier_segments; CacheLoadModifier load_modifier_values; BlockReduceAlgorithm reduce_algorithm; BlockScanAlgorithm scan_algorithm; template <typename SegReduceRegionPolicy> __host__ __device__ __forceinline__ void Init() { block_threads = SegReduceRegionPolicy::BLOCK_THREADS; items_per_thread = SegReduceRegionPolicy::ITEMS_PER_THREAD; use_smem_segment_cache = SegReduceRegionPolicy::USE_SMEM_SEGMENT_CACHE; use_smem_value_cache = SegReduceRegionPolicy::USE_SMEM_VALUE_CACHE; load_modifier_segments = SegReduceRegionPolicy::LOAD_MODIFIER_SEGMENTS; load_modifier_values = SegReduceRegionPolicy::LOAD_MODIFIER_VALUES; reduce_algorithm = SegReduceRegionPolicy::REDUCE_ALGORITHM; scan_algorithm = SegReduceRegionPolicy::SCAN_ALGORITHM; } }; /** * SegReduceRegionByKeyKernel kernel dispatch configuration */ struct SegReduceByKeyKernelConfig { int block_threads; int items_per_thread; BlockLoadAlgorithm load_algorithm; bool load_warp_time_slicing; CacheLoadModifier load_modifier; BlockScanAlgorithm scan_algorithm; template <typename SegReduceRegionByKeyPolicy> __host__ __device__ __forceinline__ void Init() { block_threads = SegReduceRegionByKeyPolicy::BLOCK_THREADS; items_per_thread = SegReduceRegionByKeyPolicy::ITEMS_PER_THREAD; load_algorithm = SegReduceRegionByKeyPolicy::LOAD_ALGORITHM; load_warp_time_slicing = SegReduceRegionByKeyPolicy::LOAD_WARP_TIME_SLICING; load_modifier = SegReduceRegionByKeyPolicy::LOAD_MODIFIER; scan_algorithm = SegReduceRegionByKeyPolicy::SCAN_ALGORITHM; } }; /****************************************************************************** * Dispatch entrypoints ******************************************************************************/ /** * Internal dispatch routine for computing a device-wide segmented reduction. */ template < typename SegReducePartitionKernelPtr, typename SegReduceRegionKernelPtr, ///< Function type of cub::SegReduceRegionKernel typename SegReduceRegionByKeyKernelPtr> ///< Function type of cub::SegReduceRegionByKeyKernel __host__ __device__ __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals OffsetT num_values, ///< [in] Total number of values to reduce OffsetT num_segments, ///< [in] Number of segments being reduced Value identity, ///< [in] Identity value (for zero-length segments) ReductionOp reduction_op, ///< [in] Reduction operator cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int sm_version, ///< [in] SM version of target device to use when computing SM occupancy SegReducePartitionKernelPtr seg_reduce_partition_kernel, ///< [in] Kernel function pointer to parameterization of cub::SegReduceRegionKernel SegReduceRegionKernelPtr seg_reduce_region_kernel, ///< [in] Kernel function pointer to parameterization of cub::SegReduceRegionKernel SegReduceRegionByKeyKernelPtr seg_reduce_region_by_key_kernel, ///< [in] Kernel function pointer to parameterization of cub::SegReduceRegionByKeyKernel SegReduceKernelConfig &seg_reduce_region_config, ///< [in] Dispatch parameters that match the policy that \p seg_reduce_region_kernel was compiled for SegReduceByKeyKernelConfig &seg_reduce_region_by_key_config) ///< [in] Dispatch parameters that match the policy that \p seg_reduce_region_by_key_kernel was compiled for { #ifndef CUB_RUNTIME_ENABLED // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Dispatch two kernels: (1) a multi-block segmented reduction // to reduce regions by block, and (2) a single-block reduce-by-key kernel // to "fix up" segments spanning more than one region. // Tile size of seg_reduce_region_kernel int tile_size = seg_reduce_region_config.block_threads * seg_reduce_region_config.items_per_thread; // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Get SM occupancy for histogram_region_kernel int seg_reduce_region_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( seg_reduce_region_sm_occupancy, sm_version, seg_reduce_region_kernel, seg_reduce_region_config.block_threads))) break; // Get device occupancy for histogram_region_kernel int seg_reduce_region_occupancy = seg_reduce_region_sm_occupancy * sm_count; // Even-share work distribution int num_diagonals = num_values + num_segments; // Total number of work items int subscription_factor = seg_reduce_region_sm_occupancy; // Amount of CTAs to oversubscribe the device beyond actively-resident (heuristic) int max_grid_size = seg_reduce_region_occupancy * subscription_factor; GridEvenShare<OffsetT>even_share( num_diagonals, max_grid_size, tile_size); // Get grid size for seg_reduce_region_kernel int seg_reduce_region_grid_size = even_share.grid_size; // Number of "fix-up" reduce-by-key tuples (2 per thread block) int num_tuple_partials = seg_reduce_region_grid_size * 2; int num_partition_samples = seg_reduce_region_grid_size + 1; // Temporary storage allocation requirements void* allocations[2]; size_t allocation_sizes[2] = { num_tuple_partials * sizeof(KeyValuePair), // bytes needed for "fix-up" reduce-by-key tuples num_partition_samples * sizeof(IndexPair), // bytes needed block indices }; // Alias the temporary allocations from the single storage blob (or set the necessary size of the blob) if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation return cudaSuccess; } // Alias the allocations KeyValuePair *d_tuple_partials = (KeyValuePair*) allocations[0]; // "fix-up" tuples IndexPair *d_block_idx = (IndexPair *) allocations[1]; // block starting/ending indices // Array of segment end-offsets SegmentOffsetIterator d_segment_end_offsets = d_segment_offsets + 1; // Grid launch params for seg_reduce_partition_kernel int partition_block_size = 32; int partition_grid_size = (num_partition_samples + partition_block_size - 1) / partition_block_size; // Partition work among multiple thread blocks if necessary if (seg_reduce_region_grid_size > 1) { // Log seg_reduce_partition_kernel configuration if (debug_synchronous) _CubLog("Invoking seg_reduce_partition_kernel<<<%d, %d, 0, %lld>>>()\n", partition_grid_size, partition_block_size, (long long) stream); // Invoke seg_reduce_partition_kernel seg_reduce_partition_kernel<<<partition_grid_size, partition_block_size, 0, stream>>>( d_segment_end_offsets, ///< [in] A sequence of \p num_segments segment end-offsets d_block_idx, num_partition_samples, num_values, ///< [in] Number of values to reduce num_segments, ///< [in] Number of segments being reduced even_share); ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block // Sync the stream if specified if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } // Log seg_reduce_region_kernel configuration if (debug_synchronous) _CubLog("Invoking seg_reduce_region_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", seg_reduce_region_grid_size, seg_reduce_region_config.block_threads, (long long) stream, seg_reduce_region_config.items_per_thread, seg_reduce_region_sm_occupancy); // Mooch if (CubDebug(error = cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte))) break; // Invoke seg_reduce_region_kernel seg_reduce_region_kernel<<<seg_reduce_region_grid_size, seg_reduce_region_config.block_threads, 0, stream>>>( d_segment_end_offsets, d_values, d_output, d_tuple_partials, d_block_idx, num_values, num_segments, identity, reduction_op, even_share); // Sync the stream if specified if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; /* // Perform "fix-up" of region partial reductions if grid size is greater than one thread block if (seg_reduce_region_grid_size > 1) { // Log seg_reduce_region_by_key_kernel configuration if (debug_synchronous) _CubLog("Invoking seg_reduce_region_by_key_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread\n", 1, seg_reduce_region_by_key_config.block_threads, (long long) stream, seg_reduce_region_by_key_config.items_per_thread); // Invoke seg_reduce_region_by_key_kernel seg_reduce_region_by_key_kernel<<<1, seg_reduce_region_by_key_config.block_threads, 0, stream>>>( d_tuple_partials, d_output, num_segments, num_tuple_partials, identity, reduction_op); // Sync the stream if specified if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } */ } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Internal dispatch routine for computing a device-wide segmented reduction. */ __host__ __device__ __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals OffsetT num_values, ///< [in] Total number of values to reduce OffsetT num_segments, ///< [in] Number of segments being reduced Value identity, ///< [in] Identity value (for zero-length segments) ReductionOp reduction_op, ///< [in] Reduction operator cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel kernel dispatch configurations SegReduceKernelConfig seg_reduce_region_config; SegReduceByKeyKernelConfig seg_reduce_region_by_key_config; InitConfigs(ptx_version, seg_reduce_region_config, seg_reduce_region_by_key_config); // Dispatch if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, identity, reduction_op, stream, debug_synchronous, ptx_version, // Use PTX version instead of SM version because, as a statically known quantity, this improves device-side launch dramatically but at the risk of imprecise occupancy calculation for mismatches SegReducePartitionKernel<SegmentOffsetIterator, OffsetT>, SegReduceRegionKernel<PtxSegReduceRegionPolicy, SegmentOffsetIterator, ValueIterator, OutputIteratorT, ReductionOp, OffsetT, Value>, SegReduceRegionByKeyKernel<PtxSegReduceRegionByKeyPolicy, KeyValuePair*, OutputIteratorT, ReductionOp, OffsetT, Value>, seg_reduce_region_config, seg_reduce_region_by_key_config))) break; } while (0); return error; } }; /****************************************************************************** * DeviceSegReduce *****************************************************************************/ /** * \brief DeviceSegReduce provides operations for computing a device-wide, parallel segmented reduction across a sequence of data items residing within global memory. * \ingroup DeviceModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>) * uses a binary combining operator to compute a single aggregate from a list of input elements. * * \par Usage Considerations * \cdp_class{DeviceReduce} * */ struct DeviceSegReduce { /** * \brief Computes a device-wide segmented reduction using the specified binary \p reduction_op functor. * * \par * Does not support non-commutative reduction operators. * * \devicestorage * * \cdp * * \iterator * * \tparam ValueIterator <b>[inferred]</b> Random-access input iterator type for reading values * \tparam SegmentOffsetIterator <b>[inferred]</b> Random-access input iterator type for reading segment end-offsets * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing segment reductions * \tparam Value <b>[inferred]</b> Value type * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename ValueIterator, typename SegmentOffsetIterator, typename OutputIteratorT, typename Value, typename ReductionOp> __host__ __device__ __forceinline__ static cudaError_t Reduce( void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals int num_values, ///< [in] Total number of values to reduce int num_segments, ///< [in] Number of segments being reduced Value identity, ///< [in] Identity value (for zero-length segments) ReductionOp reduction_op, ///< [in] Reduction operator cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; typedef DeviceSegReduceDispatch< ValueIterator, SegmentOffsetIterator, OutputIteratorT, ReductionOp, OffsetT> DeviceSegReduceDispatch; return DeviceSegReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, identity, reduction_op, stream, debug_synchronous); } /** * \brief Computes a device-wide segmented sum using the addition ('+') operator. * * \par * Does not support non-commutative summation. * * \devicestorage * * \cdp * * \iterator * * \tparam ValueIterator <b>[inferred]</b> Random-access input iterator type for reading values * \tparam SegmentOffsetIterator <b>[inferred]</b> Random-access input iterator type for reading segment end-offsets * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing segment reductions */ template < typename ValueIterator, typename SegmentOffsetIterator, typename OutputIteratorT> __host__ __device__ __forceinline__ static cudaError_t Sum( void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals int num_values, ///< [in] Total number of values to reduce int num_segments, ///< [in] Number of segments being reduced cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Value type typedef typename std::iterator_traits<ValueIterator>::value_type Value; Value identity = Value(); cub::Sum reduction_op; typedef DeviceSegReduceDispatch< ValueIterator, SegmentOffsetIterator, OutputIteratorT, cub::Sum, OffsetT> DeviceSegReduceDispatch; return DeviceSegReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, identity, reduction_op, stream, debug_synchronous); } }; //--------------------------------------------------------------------- // Test generation //--------------------------------------------------------------------- /** * Initialize problem */ template <typename OffsetT, typename Value> void Initialize( GenMode gen_mode, Value *h_values, vector<OffsetT> &segment_offsets, int num_values, int avg_segment_size) { // Initialize values // if (g_verbose) printf("Values: "); for (int i = 0; i < num_values; ++i) { InitValue(gen_mode, h_values[i], i); // if (g_verbose) std::cout << h_values[i] << ", "; } // if (g_verbose) printf("\n\n"); // Initialize segment lengths const unsigned int MAX_INTEGER = -1u; const unsigned int MAX_SEGMENT_LENGTH = avg_segment_size * 2; const double SCALE_FACTOR = double(MAX_SEGMENT_LENGTH) / double(MAX_INTEGER); segment_offsets.push_back(0); OffsetT consumed = 0; OffsetT remaining = num_values; while (remaining > 0) { // Randomly sample a 32-bit unsigned int unsigned int segment_length; RandomBits(segment_length); // Scale to maximum segment length segment_length = (unsigned int) (double(segment_length) * SCALE_FACTOR); segment_length = CUB_MIN(segment_length, remaining); consumed += segment_length; remaining -= segment_length; segment_offsets.push_back(consumed); } } /** * Compute reference answer */ template <typename OffsetT, typename Value> void ComputeReference( Value *h_values, OffsetT *h_segment_offsets, Value *h_reference, int num_segments, Value identity) { if (g_verbose) printf("%d segment reductions: ", num_segments); for (int segment = 0; segment < num_segments; ++segment) { h_reference[segment] = identity; for (int i = h_segment_offsets[segment]; i < h_segment_offsets[segment + 1]; ++i) { h_reference[segment] += h_values[i]; } if (g_verbose) std::cout << h_reference[segment] << ", "; } if (g_verbose) printf("\n\n"); } /** * Simple test of device */ template < bool CDP, typename OffsetT, typename Value, typename ReductionOp> void Test( OffsetT num_values, int avg_segment_size, ReductionOp reduction_op, Value identity, char* type_string) { Value *h_values = NULL; Value *h_reference = NULL; OffsetT *h_segment_offsets = NULL; printf("%d\n", num_values); // Initialize problem on host h_values = new Value[num_values]; vector<OffsetT> segment_offsets; Initialize(UNIFORM, h_values, segment_offsets, num_values, avg_segment_size); // Allocate simple offsets array and copy STL vector into it h_segment_offsets = new OffsetT[segment_offsets.size()]; for (int i = 0; i < segment_offsets.size(); ++i) h_segment_offsets[i] = segment_offsets[i]; OffsetT num_segments = segment_offsets.size() - 1; if (g_verbose) { printf("%d segment offsets: ", num_segments); for (int i = 0; i < num_segments; ++i) std::cout << h_segment_offsets[i] << "(" << h_segment_offsets[i + 1] - h_segment_offsets[i] << "), "; if (g_verbose) std::cout << std::endl << std::endl; } // Solve problem on host h_reference = new Value[num_segments]; ComputeReference(h_values, h_segment_offsets, h_reference, num_segments, identity); printf("\n\n%s cub::DeviceSegReduce::%s %d items (%d-byte %s), %d segments (%d-byte offset indices)\n", (CDP) ? "CDP device invoked" : "Host-invoked", (Equals<ReductionOp, Sum>::VALUE) ? "Sum" : "Reduce", num_values, (int) sizeof(Value), type_string, num_segments, (int) sizeof(OffsetT)); fflush(stdout); // Allocate and initialize problem on device Value *d_values = NULL; OffsetT *d_segment_offsets = NULL; Value *d_output = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * num_values)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_output, sizeof(Value) * num_segments)); CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * num_values, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice)); // Request and allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, false)); CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Clear device output CubDebugExit(cudaMemset(d_output, 0, sizeof(Value) * num_segments)); // Run warmup/correctness iteration CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, true)); // Check for correctness (and display results, if specified) int compare = CompareDeviceResults(h_reference, d_output, num_segments, true, g_verbose); printf("\t%s", compare ? "FAIL" : "PASS"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Performance GpuTimer gpu_timer; gpu_timer.Start(); for (int i = 0; i < g_timing_iterations; ++i) { CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, false)); } gpu_timer.Stop(); float elapsed_millis = gpu_timer.ElapsedMillis(); // Display performance if (g_timing_iterations > 0) { float avg_millis = elapsed_millis / g_timing_iterations; float giga_rate = float(num_values) / avg_millis / 1000.0 / 1000.0; float giga_bandwidth = giga_rate * printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth); } // Device cleanup if (d_values) CubDebugExit(g_allocator.DeviceFree(d_values)); if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); if (d_output) CubDebugExit(g_allocator.DeviceFree(d_output)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); // Host cleanup if (h_values) delete[] h_values; if (h_segment_offsets) delete[] h_segment_offsets; if (h_reference) delete[] h_reference; } /** * Main */ int main(int argc, char** argv) { int num_values = 32 * 1024 * 1024; int avg_segment_size = 500; // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("n", num_values); args.GetCmdLineArgument("ss", avg_segment_size); args.GetCmdLineArgument("i", g_timing_iterations); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--v] " "[--i=<timing iterations>] " "[--n=<input samples>]\n" "[--ss=<average segment size>]\n" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); Test<false>((int) num_values, avg_segment_size, Sum(), (long long) 0, CUB_TYPE_STRING(long long)); return 0; }
the_stack
#include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { #define MAX_BLOCKS 128 __global__ void unmaskedCalcHistKernel0(const uchar* src, int size, int* histogram) { __shared__ int local_histogram[256]; int element_x = (blockIdx.x << 8) + threadIdx.x; int index_x = element_x << 2; int grid_offset = gridDim.x * 1024; int index = threadIdx.y * blockDim.x + threadIdx.x; local_histogram[index] = 0; __syncthreads(); uint* input = (uint*)src; for (; index_x < size; index_x += grid_offset) { if (index_x < size) { uint value = input[element_x]; if (index_x < size - 3) { atomicAdd(&local_histogram[(value >> 0) & 0xFFU], 1); atomicAdd(&local_histogram[(value >> 8) & 0xFFU], 1); atomicAdd(&local_histogram[(value >> 16) & 0xFFU], 1); atomicAdd(&local_histogram[(value >> 24) & 0xFFU], 1); } else { atomicAdd(&local_histogram[(value >> 0) & 0xFFU], 1); if (index_x < size - 1) { atomicAdd(&local_histogram[(value >> 8) & 0xFFU], 1); } if (index_x < size - 2) { atomicAdd(&local_histogram[(value >> 16) & 0xFFU], 1); } if (index_x < size - 3) { atomicAdd(&local_histogram[(value >> 24) & 0xFFU], 1); } } } input += (grid_offset >> 2); } __syncthreads(); int count = local_histogram[index]; if (count > 0) { atomicAdd(histogram + index, count); } } __global__ void unmaskedCalcHistKernel1(const uchar* src, int rows, int cols, int src_stride, int* histogram) { __shared__ int local_histogram[256]; int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x; int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; int index_x = element_x << 2; int index = threadIdx.y * blockDim.x + threadIdx.x; local_histogram[index] = 0; __syncthreads(); uint* input; for (; element_y < rows; element_y += gridDim.y * blockDim.y) { if (index_x < cols) { input = (uint*)((uchar*)src + element_y * src_stride); uint value = input[element_x]; if (index_x < cols - 3) { atomicAdd(&local_histogram[(value >> 0) & 0xFFU], 1); atomicAdd(&local_histogram[(value >> 8) & 0xFFU], 1); atomicAdd(&local_histogram[(value >> 16) & 0xFFU], 1); atomicAdd(&local_histogram[(value >> 24) & 0xFFU], 1); } else { atomicAdd(&local_histogram[(value >> 0) & 0xFFU], 1); if (index_x < cols - 1) { atomicAdd(&local_histogram[(value >> 8) & 0xFFU], 1); } if (index_x < cols - 2) { atomicAdd(&local_histogram[(value >> 16) & 0xFFU], 1); } if (index_x < cols - 3) { atomicAdd(&local_histogram[(value >> 24) & 0xFFU], 1); } } } } __syncthreads(); int count = local_histogram[index]; if (count > 0) { atomicAdd(histogram + index, count); } } __global__ void unmaskedCalcHistKernel2(const uchar* src, int rows, int cols, int src_stride, int* histogram) { __shared__ int local_histogram[256]; int element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2; int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; int index = threadIdx.y * blockDim.x + threadIdx.x; local_histogram[index] = 0; __syncthreads(); uchar* input; uchar value0, value1, value2, value3; for (; element_y < rows; element_y += gridDim.y * blockDim.y) { if (element_x < cols) { input = (uchar*)src + element_y * src_stride; if (element_x < cols - 3) { value0 = input[element_x]; value1 = input[element_x + 1]; value2 = input[element_x + 2]; value3 = input[element_x + 3]; atomicAdd(&local_histogram[value0], 1); atomicAdd(&local_histogram[value1], 1); atomicAdd(&local_histogram[value2], 1); atomicAdd(&local_histogram[value3], 1); } else { value0 = input[element_x]; if (element_x < cols - 1) { value1 = input[element_x + 1]; } if (element_x < cols - 2) { value2 = input[element_x + 2]; } atomicAdd(&local_histogram[value0], 1); if (element_x < cols - 1) { atomicAdd(&local_histogram[value1], 1); } if (element_x < cols - 2) { atomicAdd(&local_histogram[value2], 1); } } } } __syncthreads(); int count = local_histogram[index]; if (count > 0) { atomicAdd(histogram + index, count); } } __global__ void maskedCalcHistKernel0(const uchar* src, int size, const uchar* mask, int* histogram) { __shared__ int local_histogram[256]; int element_x = (blockIdx.x << 8) + threadIdx.x; int index_x = element_x << 2; int grid_offset = gridDim.x * 1024; int index = threadIdx.y * blockDim.x + threadIdx.x; local_histogram[index] = 0; __syncthreads(); uint* input = (uint*)src; uint* mask_start = (uint*)mask; for (; index_x < size; index_x += grid_offset) { if (index_x < size) { uint src_value = input[element_x]; uint mask_value = mask_start[element_x]; if (index_x < size - 3) { if ((mask_value >> 0) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 0) & 0xFFU], 1); } if ((mask_value >> 8) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 8) & 0xFFU], 1); } if ((mask_value >> 16) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 16) & 0xFFU], 1); } if ((mask_value >> 24) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 24) & 0xFFU], 1); } } else { if ((mask_value >> 0) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 0) & 0xFFU], 1); } if ((mask_value >> 8) & 0xFFU && index_x < size - 1) { atomicAdd(&local_histogram[(src_value >> 8) & 0xFFU], 1); } if ((mask_value >> 16) & 0xFFU && index_x < size - 2) { atomicAdd(&local_histogram[(src_value >> 16) & 0xFFU], 1); } if ((mask_value >> 24) & 0xFFU && index_x < size - 3) { atomicAdd(&local_histogram[(src_value >> 24) & 0xFFU], 1); } } } input += (grid_offset >> 2); mask_start += (grid_offset >> 2); } __syncthreads(); int count = local_histogram[index]; if (count > 0) { atomicAdd(histogram + index, count); } } __global__ void maskedCalcHistKernel1(const uchar* src, int rows, int cols, int src_stride, const uchar* mask, int mask_stride, int* histogram) { __shared__ int local_histogram[256]; int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x; int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; int index_x = element_x << 2; int index = threadIdx.y * blockDim.x + threadIdx.x; local_histogram[index] = 0; __syncthreads(); uint* input; uint* mask_start; for (; element_y < rows; element_y += gridDim.y * blockDim.y) { if (index_x < cols) { input = (uint*)((uchar*)src + element_y * src_stride); mask_start = (uint*)((uchar*)mask + element_y * mask_stride); uint src_value = input[element_x]; uint mask_value = mask_start[element_x]; if (index_x < cols - 3) { if ((mask_value >> 0) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 0) & 0xFFU], 1); } if ((mask_value >> 8) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 8) & 0xFFU], 1); } if ((mask_value >> 16) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 16) & 0xFFU], 1); } if ((mask_value >> 24) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 24) & 0xFFU], 1); } } else { if ((mask_value >> 0) & 0xFFU) { atomicAdd(&local_histogram[(src_value >> 0) & 0xFFU], 1); } if ((mask_value >> 8) & 0xFFU && index_x < cols - 1) { atomicAdd(&local_histogram[(src_value >> 8) & 0xFFU], 1); } if ((mask_value >> 16) & 0xFFU && index_x < cols - 2) { atomicAdd(&local_histogram[(src_value >> 16) & 0xFFU], 1); } if ((mask_value >> 24) & 0xFFU && index_x < cols - 3) { atomicAdd(&local_histogram[(src_value >> 24) & 0xFFU], 1); } } } } __syncthreads(); int count = local_histogram[index]; if (count > 0) { atomicAdd(histogram + index, count); } } __global__ void maskedCalcHistKernel2(const uchar* src, int rows, int cols, int src_stride, const uchar* mask, int mask_stride, int* histogram) { __shared__ int local_histogram[256]; int element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2; int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; int index = threadIdx.y * blockDim.x + threadIdx.x; local_histogram[index] = 0; __syncthreads(); uchar* input; uchar* mask_start; uchar value0, value1, value2, value3; uchar mask_value0, mask_value1, mask_value2, mask_value3; for (; element_y < rows; element_y += gridDim.y * blockDim.y) { if (element_x < cols) { input = (uchar*)src + element_y * src_stride; mask_start = (uchar*)mask + element_y * mask_stride; if (element_x < cols - 3) { value0 = input[element_x]; value1 = input[element_x + 1]; value2 = input[element_x + 2]; value3 = input[element_x + 3]; mask_value0 = mask_start[element_x]; mask_value1 = mask_start[element_x + 1]; mask_value2 = mask_start[element_x + 2]; mask_value3 = mask_start[element_x + 3]; if (mask_value0 > 0) { atomicAdd(&local_histogram[value0], 1); } if (mask_value1 > 0) { atomicAdd(&local_histogram[value1], 1); } if (mask_value2 > 0) { atomicAdd(&local_histogram[value2], 1); } if (mask_value3 > 0) { atomicAdd(&local_histogram[value3], 1); } } else { value0 = input[element_x]; if (element_x < cols - 1) { value1 = input[element_x + 1]; } if (element_x < cols - 2) { value2 = input[element_x + 2]; } mask_value0 = mask_start[element_x]; if (element_x < cols - 1) { mask_value1 = mask_start[element_x + 1]; } if (element_x < cols - 2) { mask_value2 = mask_start[element_x + 2]; } if (mask_value0) { atomicAdd(&local_histogram[value0], 1); } if (element_x < cols - 1 && mask_value1 > 0) { atomicAdd(&local_histogram[value1], 1); } if (element_x < cols - 2 && mask_value2 > 0) { atomicAdd(&local_histogram[value2], 1); } } } } __syncthreads(); int count = local_histogram[index]; if (count > 0) { atomicAdd(histogram + index, count); } } RetCode calcHist(const uchar* src, int rows, int cols, int src_stride, int* histogram, const uchar* mask, int mask_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(histogram != nullptr); PPL_ASSERT(rows >= 1 && cols >= 1); PPL_ASSERT(src_stride >= cols * (int)sizeof(uchar)); if (mask != nullptr) { PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar)); } dim3 block, grid; if (src_stride == cols) { block.x = 256; block.y = 1; grid.x = divideUp(cols * rows, 256, 8); if (grid.x > MAX_BLOCKS) { grid.x = MAX_BLOCKS; } grid.y = 1; } else { int columns = divideUp(cols, 4, 2); block.x = kBlockDimX1; block.y = kBlockDimY1; grid.x = divideUp(columns, kBlockDimX1, kBlockShiftX1); grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1); uint grid_y = MAX_BLOCKS / grid.x; grid.y = (grid_y < grid.y) ? grid_y : grid.y; } if (mask == nullptr) { if (src_stride == cols) { unmaskedCalcHistKernel0<<<grid, block, 0, stream>>>(src, rows * cols, histogram); } else if ((src_stride & 3) == 0) { unmaskedCalcHistKernel1<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, histogram); } else { unmaskedCalcHistKernel2<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, histogram); } } else { if (src_stride == cols && mask_stride == cols) { maskedCalcHistKernel0<<<grid, block, 0, stream>>>(src, rows * cols, mask, histogram); } else if ((src_stride & 3) == 0 && (mask_stride & 3) == 0) { maskedCalcHistKernel1<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, mask_stride, histogram); } else { maskedCalcHistKernel2<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, mask_stride, histogram); } } cudaError_t code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode CalcHist<uchar>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int* outHist, int maskWidthStride, const uchar* mask) { RetCode code = calcHist(inData, height, width, inWidthStride, outHist, mask, maskWidthStride, stream); return code; } } // cuda } // cv } // ppl
the_stack
* \file * Operations for writing linear segments of data from the CUDA thread block */ #pragma once #include <iterator> #include "block_exchange.cuh" #include "../util_ptx.cuh" #include "../util_macro.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup UtilIo * @{ */ /******************************************************************//** * \name Blocked arrangement I/O (direct) *********************************************************************/ //@{ /** * \brief Store a blocked arrangement of items across a thread block into a linear segment of items. * * \blocked * * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator. */ template < typename T, int ITEMS_PER_THREAD, typename OutputIteratorT> __device__ __forceinline__ void StoreDirectBlocked( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); // Store directly in thread-blocked order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { thread_itr[ITEM] = items[ITEM]; } } /** * \brief Store a blocked arrangement of items across a thread block into a linear segment of items, guarded by range * * \blocked * * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator. */ template < typename T, int ITEMS_PER_THREAD, typename OutputIteratorT> __device__ __forceinline__ void StoreDirectBlocked( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); // Store directly in thread-blocked order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (ITEM + (linear_tid * ITEMS_PER_THREAD) < valid_items) { thread_itr[ITEM] = items[ITEM]; } } } /** * \brief Store a blocked arrangement of items across a thread block into a linear segment of items. * * \blocked * * The output offset (\p block_ptr + \p block_offset) must be quad-item aligned, * which is the default starting offset returned by \p cudaMalloc() * * \par * The following conditions will prevent vectorization and storing will fall back to cub::BLOCK_STORE_DIRECT: * - \p ITEMS_PER_THREAD is odd * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) * * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * */ template < typename T, int ITEMS_PER_THREAD> __device__ __forceinline__ void StoreDirectBlockedVectorized( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) T *block_ptr, ///< [in] Input pointer for storing from T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { enum { // Maximum CUDA vector size is 4 elements MAX_VEC_SIZE = CUB_MIN(4, ITEMS_PER_THREAD), // Vector size must be a power of two and an even divisor of the items per thread VEC_SIZE = ((((MAX_VEC_SIZE - 1) & MAX_VEC_SIZE) == 0) && ((ITEMS_PER_THREAD % MAX_VEC_SIZE) == 0)) ? MAX_VEC_SIZE : 1, VECTORS_PER_THREAD = ITEMS_PER_THREAD / VEC_SIZE, }; // Vector type typedef typename CubVector<T, VEC_SIZE>::Type Vector; // Alias global pointer Vector *block_ptr_vectors = reinterpret_cast<Vector*>(const_cast<T*>(block_ptr)); // Alias pointers (use "raw" array here which should get optimized away to prevent conservative PTXAS lmem spilling) Vector raw_vector[VECTORS_PER_THREAD]; T *raw_items = reinterpret_cast<T*>(raw_vector); // Copy #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { raw_items[ITEM] = items[ITEM]; } // Direct-store using vector types StoreDirectBlocked(linear_tid, block_ptr_vectors, raw_vector); } //@} end member group /******************************************************************//** * \name Striped arrangement I/O (direct) *********************************************************************/ //@{ /** * \brief Store a striped arrangement of data across the thread block into a linear segment of items. * * \striped * * \tparam BLOCK_THREADS The thread block size in threads * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator. */ template < int BLOCK_THREADS, typename T, int ITEMS_PER_THREAD, typename OutputIteratorT> __device__ __forceinline__ void StoreDirectStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { OutputIteratorT thread_itr = block_itr + linear_tid; // Store directly in striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM]; } } /** * \brief Store a striped arrangement of data across the thread block into a linear segment of items, guarded by range * * \striped * * \tparam BLOCK_THREADS The thread block size in threads * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator. */ template < int BLOCK_THREADS, typename T, int ITEMS_PER_THREAD, typename OutputIteratorT> __device__ __forceinline__ void StoreDirectStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { OutputIteratorT thread_itr = block_itr + linear_tid; // Store directly in striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if ((ITEM * BLOCK_THREADS) + linear_tid < valid_items) { thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM]; } } } //@} end member group /******************************************************************//** * \name Warp-striped arrangement I/O (direct) *********************************************************************/ //@{ /** * \brief Store a warp-striped arrangement of data across the thread block into a linear segment of items. * * \warpstriped * * \par Usage Considerations * The number of threads in the thread block must be a multiple of the architecture's warp size. * * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator. */ template < typename T, int ITEMS_PER_THREAD, typename OutputIteratorT> __device__ __forceinline__ void StoreDirectWarpStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; OutputIteratorT thread_itr = block_itr + warp_offset + tid; // Store directly in warp-striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM]; } } /** * \brief Store a warp-striped arrangement of data across the thread block into a linear segment of items, guarded by range * * \warpstriped * * \par Usage Considerations * The number of threads in the thread block must be a multiple of the architecture's warp size. * * \tparam T <b>[inferred]</b> The data type to store. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam OutputIteratorT <b>[inferred]</b> The random-access iterator type for output \iterator. */ template < typename T, int ITEMS_PER_THREAD, typename OutputIteratorT> __device__ __forceinline__ void StoreDirectWarpStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; OutputIteratorT thread_itr = block_itr + warp_offset + tid; // Store directly in warp-striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items) { thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM]; } } } //@} end member group /** @} */ // end group UtilIo //----------------------------------------------------------------------------- // Generic BlockStore abstraction //----------------------------------------------------------------------------- /** * \brief cub::BlockStoreAlgorithm enumerates alternative algorithms for cub::BlockStore to write a blocked arrangement of items across a CUDA thread block to a linear segment of memory. */ enum BlockStoreAlgorithm { /** * \par Overview * * A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is written * directly to memory. * * \par Performance Considerations * - The utilization of memory transactions (coalescing) decreases as the * access stride between threads increases (i.e., the number items per thread). */ BLOCK_STORE_DIRECT, /** * \par Overview * * A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is written directly * to memory using CUDA's built-in vectorized stores as a coalescing optimization. * For example, <tt>st.global.v4.s32</tt> instructions will be generated * when \p T = \p int and \p ITEMS_PER_THREAD % 4 == 0. * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high until the the * access stride between threads (i.e., the number items per thread) exceeds the * maximum vector store width (typically 4 items or 64B, whichever is lower). * - The following conditions will prevent vectorization and writing will fall back to cub::BLOCK_STORE_DIRECT: * - \p ITEMS_PER_THREAD is odd * - The \p OutputIteratorT is not a simple pointer type * - The block output offset is not quadword-aligned * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) */ BLOCK_STORE_VECTORIZE, /** * \par Overview * A [<em>blocked arrangement</em>](index.html#sec5sec3) is locally * transposed and then efficiently written to memory as a [<em>striped arrangement</em>](index.html#sec5sec3). * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high regardless * of items written per thread. * - The local reordering incurs slightly longer latencies and throughput than the * direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives. */ BLOCK_STORE_TRANSPOSE, /** * \par Overview * A [<em>blocked arrangement</em>](index.html#sec5sec3) is locally * transposed and then efficiently written to memory as a * [<em>warp-striped arrangement</em>](index.html#sec5sec3) * * \par Usage Considerations * - BLOCK_THREADS must be a multiple of WARP_THREADS * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high regardless * of items written per thread. * - The local reordering incurs slightly longer latencies and throughput than the * direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives. */ BLOCK_STORE_WARP_TRANSPOSE, /** * \par Overview * A [<em>blocked arrangement</em>](index.html#sec5sec3) is locally * transposed and then efficiently written to memory as a * [<em>warp-striped arrangement</em>](index.html#sec5sec3) * To reduce the shared memory requirement, only one warp's worth of shared * memory is provisioned and is subsequently time-sliced among warps. * * \par Usage Considerations * - BLOCK_THREADS must be a multiple of WARP_THREADS * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high regardless * of items written per thread. * - Provisions less shared memory temporary storage, but incurs larger * latencies than the BLOCK_STORE_WARP_TRANSPOSE alternative. */ BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, }; /** * \brief The BlockStore class provides [<em>collective</em>](index.html#sec0) data movement methods for writing a [<em>blocked arrangement</em>](index.html#sec5sec3) of items partitioned across a CUDA thread block to a linear segment of memory. ![](block_store_logo.png) * \ingroup BlockModule * \ingroup UtilIo * * \tparam T The type of data to be written. * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam ITEMS_PER_THREAD The number of consecutive items partitioned onto each thread. * \tparam ALGORITHM <b>[optional]</b> cub::BlockStoreAlgorithm tuning policy enumeration. default: cub::BLOCK_STORE_DIRECT. * \tparam WARP_TIME_SLICING <b>[optional]</b> Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage). (default: false) * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - The BlockStore class provides a single data movement abstraction that can be specialized * to implement different cub::BlockStoreAlgorithm strategies. This facilitates different * performance policies for different architectures, data types, granularity sizes, etc. * - BlockStore can be optionally specialized by different data movement strategies: * -# <b>cub::BLOCK_STORE_DIRECT</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is written * directly to memory. [More...](\ref cub::BlockStoreAlgorithm) * -# <b>cub::BLOCK_STORE_VECTORIZE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * of data is written directly to memory using CUDA's built-in vectorized stores as a * coalescing optimization. [More...](\ref cub::BlockStoreAlgorithm) * -# <b>cub::BLOCK_STORE_TRANSPOSE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * is locally transposed into a [<em>striped arrangement</em>](index.html#sec5sec3) which is * then written to memory. [More...](\ref cub::BlockStoreAlgorithm) * -# <b>cub::BLOCK_STORE_WARP_TRANSPOSE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * is locally transposed into a [<em>warp-striped arrangement</em>](index.html#sec5sec3) which is * then written to memory. [More...](\ref cub::BlockStoreAlgorithm) * - \rowmajor * * \par A Simple Example * \blockcollective{BlockStore} * \par * The code snippet below illustrates the storing of a "blocked" arrangement * of 512 integers across 128 threads (where each thread owns 4 consecutive items) * into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE, * meaning items are locally reordered among threads so that memory references will be * efficiently coalesced using a warp-striped access pattern. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_store.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_WARP_TRANSPOSE> BlockStore; * * // Allocate shared memory for BlockStore * __shared__ typename BlockStore::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Store items to linear memory * int thread_data[4]; * BlockStore(temp_storage).Store(d_data, thread_data); * * \endcode * \par * Suppose the set of \p thread_data across the block of threads is * <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>. * The output \p d_data will be <tt>0, 1, 2, 3, 4, 5, ...</tt>. * */ template < typename T, int BLOCK_DIM_X, int ITEMS_PER_THREAD, BlockStoreAlgorithm ALGORITHM = BLOCK_STORE_DIRECT, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockStore { private: /****************************************************************************** * Constants and typed definitions ******************************************************************************/ /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /****************************************************************************** * Algorithmic variants ******************************************************************************/ /// Store helper template <BlockStoreAlgorithm _POLICY, int DUMMY> struct StoreInternal; /** * BLOCK_STORE_DIRECT specialization of store helper */ template <int DUMMY> struct StoreInternal<BLOCK_STORE_DIRECT, DUMMY> { /// Shared memory storage layout type typedef NullType TempStorage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ StoreInternal( TempStorage &/*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} /// Store items into a linear segment of memory template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { StoreDirectBlocked(linear_tid, block_itr, items); } /// Store items into a linear segment of memory, guarded by range template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { StoreDirectBlocked(linear_tid, block_itr, items, valid_items); } }; /** * BLOCK_STORE_VECTORIZE specialization of store helper */ template <int DUMMY> struct StoreInternal<BLOCK_STORE_VECTORIZE, DUMMY> { /// Shared memory storage layout type typedef NullType TempStorage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ StoreInternal( TempStorage &/*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} /// Store items into a linear segment of memory, specialized for native pointer types (attempts vectorization) __device__ __forceinline__ void Store( T *block_ptr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { StoreDirectBlockedVectorized(linear_tid, block_ptr, items); } /// Store items into a linear segment of memory, specialized for opaque input iterators (skips vectorization) template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { StoreDirectBlocked(linear_tid, block_itr, items); } /// Store items into a linear segment of memory, guarded by range template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { StoreDirectBlocked(linear_tid, block_itr, items, valid_items); } }; /** * BLOCK_STORE_TRANSPOSE specialization of store helper */ template <int DUMMY> struct StoreInternal<BLOCK_STORE_TRANSPOSE, DUMMY> { // BlockExchange utility type for keys typedef BlockExchange<T, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange; /// Shared memory storage layout type struct _TempStorage : BlockExchange::TempStorage { /// Temporary storage for partially-full block guard volatile int valid_items; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ StoreInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} /// Store items into a linear segment of memory template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { BlockExchange(temp_storage).BlockedToStriped(items); StoreDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items); } /// Store items into a linear segment of memory, guarded by range template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { BlockExchange(temp_storage).BlockedToStriped(items); if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); StoreDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, temp_storage.valid_items); } }; /** * BLOCK_STORE_WARP_TRANSPOSE specialization of store helper */ template <int DUMMY> struct StoreInternal<BLOCK_STORE_WARP_TRANSPOSE, DUMMY> { enum { WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) }; // Assert BLOCK_THREADS must be a multiple of WARP_THREADS CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); // BlockExchange utility type for keys typedef BlockExchange<T, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange; /// Shared memory storage layout type struct _TempStorage : BlockExchange::TempStorage { /// Temporary storage for partially-full block guard volatile int valid_items; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ StoreInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} /// Store items into a linear segment of memory template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { BlockExchange(temp_storage).BlockedToWarpStriped(items); StoreDirectWarpStriped(linear_tid, block_itr, items); } /// Store items into a linear segment of memory, guarded by range template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { BlockExchange(temp_storage).BlockedToWarpStriped(items); if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); } }; /** * BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED specialization of store helper */ template <int DUMMY> struct StoreInternal<BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, DUMMY> { enum { WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) }; // Assert BLOCK_THREADS must be a multiple of WARP_THREADS CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); // BlockExchange utility type for keys typedef BlockExchange<T, BLOCK_DIM_X, ITEMS_PER_THREAD, true, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange; /// Shared memory storage layout type struct _TempStorage : BlockExchange::TempStorage { /// Temporary storage for partially-full block guard volatile int valid_items; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ StoreInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} /// Store items into a linear segment of memory template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { BlockExchange(temp_storage).BlockedToWarpStriped(items); StoreDirectWarpStriped(linear_tid, block_itr, items); } /// Store items into a linear segment of memory, guarded by range template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { BlockExchange(temp_storage).BlockedToWarpStriped(items); if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); } }; /****************************************************************************** * Type definitions ******************************************************************************/ /// Internal load implementation to use typedef StoreInternal<ALGORITHM, 0> InternalStore; /// Shared memory storage layout type typedef typename InternalStore::TempStorage _TempStorage; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /****************************************************************************** * Thread fields ******************************************************************************/ /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; public: /// \smemstorage{BlockStore} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockStore() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockStore( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Data movement *********************************************************************/ //@{ /** * \brief Store items into a linear segment of memory. * * \par * - \blocked * - \smemreuse * * \par Snippet * The code snippet below illustrates the storing of a "blocked" arrangement * of 512 integers across 128 threads (where each thread owns 4 consecutive items) * into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE, * meaning items are locally reordered among threads so that memory references will be * efficiently coalesced using a warp-striped access pattern. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_store.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_WARP_TRANSPOSE> BlockStore; * * // Allocate shared memory for BlockStore * __shared__ typename BlockStore::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Store items to linear memory * int thread_data[4]; * BlockStore(temp_storage).Store(d_data, thread_data); * * \endcode * \par * Suppose the set of \p thread_data across the block of threads is * <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>. * The output \p d_data will be <tt>0, 1, 2, 3, 4, 5, ...</tt>. * */ template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store { InternalStore(temp_storage, linear_tid).Store(block_itr, items); } /** * \brief Store items into a linear segment of memory, guarded by range. * * \par * - \blocked * - \smemreuse * * \par Snippet * The code snippet below illustrates the guarded storing of a "blocked" arrangement * of 512 integers across 128 threads (where each thread owns 4 consecutive items) * into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE, * meaning items are locally reordered among threads so that memory references will be * efficiently coalesced using a warp-striped access pattern. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_store.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items, ...) * { * // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_WARP_TRANSPOSE> BlockStore; * * // Allocate shared memory for BlockStore * __shared__ typename BlockStore::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Store items to linear memory * int thread_data[4]; * BlockStore(temp_storage).Store(d_data, thread_data, valid_items); * * \endcode * \par * Suppose the set of \p thread_data across the block of threads is * <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt> and \p valid_items is \p 5. * The output \p d_data will be <tt>0, 1, 2, 3, 4, ?, ?, ?, ...</tt>, with * only the first two threads being unmasked to store portions of valid data. * */ template <typename OutputIteratorT> __device__ __forceinline__ void Store( OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store int valid_items) ///< [in] Number of valid items to write { InternalStore(temp_storage, linear_tid).Store(block_itr, items, valid_items); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
namespace caffe { #ifdef TODO_REFACTOR #ifdef USE_OPENCL cl_mem weight_image_; const SyncedMemory * copied_weight_data_; bool test_only_; uint64_t weight_image_seq_; gemm_type_t innerprod_type_; bool tuned_; stringstream cache_path_; string key_; #endif #ifdef USE_OPENCL virtual void generate_key(); virtual void tune_innerprod_type(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_b, const cl_mem a, const cl_mem b, const cl_mem B_image, const size_t max_image_size); virtual bool load_cache(); #endif #ifdef USE_OPENCL ~InnerProductLayer() { if (weight_image_) clReleaseMemObject(weight_image_); weight_image_ = NULL; } #endif enum gemm_type_t { GEMM_TYPE_DEFAULT = 0, GEMM_TYPE_FAST_IMAGE_32_1, GEMM_TYPE_FAST_IMAGE_32_2, GEMM_TYPE_FAST_IMAGE_B_IMAGE, GEMM_TYPE_FAST_BUFFER }; struct gemm_callback_arg { vector<cl_event> evs; vector<cl_mem> imgs; }; static void CL_CALLBACK gemm_callback(cl_event event, cl_int event_command_exec_status, void *user_data) { struct gemm_callback_arg *arg = (struct gemm_callback_arg *) user_data; for (int i = 0; i < arg->evs.size(); i++) { clReleaseEvent(arg->evs[i]); } for (int i = 0; i < arg->imgs.size(); i++) { clReleaseMemObject(arg->imgs[i]); } delete arg; } // Create and copy buffer to image for GEMM's matrix a and b. // Will return image to caller if the input image is NULL. Otherwise, // will use the image directly. It's caller's responsibility to // release the created image. template<typename Dtype, typename MItype, typename MOtype> static void greentea_gpu_gemm_copy_buffer_to_image(int_tp ctx_id, cl_mem *image, cl_mem buffer, int offset, bool is_matrix_a, bool transpose, bool padding, int padded_height, int padded_width, int height, int width, int ld, int wait_list_size, cl_event *wait_list, cl_event *event) { viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) ->program(); cl_image_desc desc; cl_image_format format; bool halfPrecisionMode = !std::is_same<Dtype, float>::value; memset(&desc, 0, sizeof(desc)); int src_offset = sizeof(Dtype) * offset; if (!is_matrix_a && transpose) { // For matrix b with transpose, we need to handle them differently. // As we can't use the sub group block read to get a row easily, // we have to use CL_FLOAT type with read_imagef to get the row. cl_int err; if (halfPrecisionMode) { format.image_channel_data_type = CL_HALF_FLOAT; } else { format.image_channel_data_type = CL_FLOAT; } desc.image_type = CL_MEM_OBJECT_IMAGE2D; desc.image_width = width; format.image_channel_order = CL_R; desc.image_height = height; if (*image == NULL) { *image = clCreateImage( ctx.handle().get(), CL_MEM_READ_WRITE, &format, &desc, NULL, &err); OCL_CHECK(err); } if (ld == width) { size_t origin[] = {0, 0, 0}; size_t region[] = {(size_t)desc.image_width, (size_t)desc.image_height, 1}; OCL_CHECK(clEnqueueCopyBufferToImage(ctx.get_queue().handle().get(), buffer, *image, src_offset, origin, region, wait_list_size, wait_list, event)); } else { viennacl::ocl::kernel &oclk_gemm_copy = program.get_kernel( CL_KERNEL_SELECT("gemm_buffer_copy_image_transpose")); size_t global_copy[2]; global_copy[0] = width; global_copy[1] = height; oclk_gemm_copy.arg(0, WrapHandle(buffer, &ctx)); oclk_gemm_copy.arg(1, WrapHandle(*image, &ctx)); oclk_gemm_copy.arg(2, offset); oclk_gemm_copy.arg(3, width); oclk_gemm_copy.arg(4, height); oclk_gemm_copy.arg(5, ld); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_gemm_copy.handle().get(), 2, NULL, global_copy, NULL, wait_list_size, wait_list, event)); } } else { if (*image == NULL) { desc.image_type = CL_MEM_OBJECT_IMAGE2D; if (halfPrecisionMode) { format.image_channel_data_type = CL_HALF_FLOAT; format.image_channel_order = CL_R; } else { format.image_channel_data_type = CL_UNSIGNED_INT8; format.image_channel_order = CL_RGBA; } if (!padding) { desc.image_width = width; desc.image_height = height; } else { desc.image_width = padded_width; desc.image_height = padded_height; } cl_int err; *image = clCreateImage(ctx.handle().get(), desc.buffer ? CL_MEM_READ_ONLY : CL_MEM_READ_WRITE, &format, &desc, NULL, &err); OCL_CHECK(err); } if (!padding && desc.buffer != NULL) return; if (!padding && desc.buffer == NULL) { // copy without padding. size_t origin[] = {0, 0, 0}; size_t region[] = {(size_t)width, (size_t)height, 1}; OCL_CHECK(clEnqueueCopyBufferToImage(ctx.get_queue().handle().get(), buffer, *image, src_offset, origin, region, wait_list_size, wait_list, event)); } else { viennacl::ocl::kernel &oclk_gemm_copy = program.get_kernel( CL_KERNEL_SELECT("gemm_buffer_copy_image_no_transpose")); size_t global_copy[2]; global_copy[0] = padding ? padded_width : width; global_copy[1] = padding ? padded_height : height; oclk_gemm_copy.arg(0, WrapHandle(buffer, &ctx)); oclk_gemm_copy.arg(1, WrapHandle(*image, &ctx)); oclk_gemm_copy.arg(2, offset); oclk_gemm_copy.arg(3, width); oclk_gemm_copy.arg(4, height); oclk_gemm_copy.arg(5, ld); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_gemm_copy.handle().get(), 2, NULL, global_copy, NULL, wait_list_size, wait_list, event)); } } } template<typename Dtype, typename MItype, typename MOtype> static void greentea_gpu_fast_image_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_a, const CBLAS_TRANSPOSE trans_b, const int_tp m, const int_tp n, const int_tp k, const Dtype alpha, const cl_mem a, const int_tp offA, const cl_mem b, const int_tp offB, const Dtype beta, cl_mem c, const int_tp offC, bool is_image_a, bool is_image_b, enum gemm_type_t gemm_type, const size_t max_image_size) { CHECK_EQ(gemm_type == GEMM_TYPE_FAST_IMAGE_32_1 || gemm_type == GEMM_TYPE_FAST_IMAGE_32_2 || gemm_type == GEMM_TYPE_FAST_IMAGE_B_IMAGE, true) << "Invalid fast image gemm type." << std::endl; if (is_image_a) CHECK_EQ(offA, 0) << "Invalid input image offset." << std::endl; if (is_image_b) CHECK_EQ(offB, 0) << "Invalid input image offset." << std::endl; bool halfPrecisionMode = !std::is_same<Dtype, float>::value; int widthA = (trans_a == CblasNoTrans) ? k : m; int heightA = (trans_a == CblasNoTrans) ? m : k; int widthB = (trans_b == CblasNoTrans) ? n : k; int heightB = (trans_b == CblasNoTrans) ? k : n; int ldA = widthA; int ldB = widthB; int ldC = n; int A_start_x = 0, A_start_y = 0, B_start_x = 0; int B_start_y = 0, C_start_x = 0, C_start_y = 0; int blocksize = 1024; if (gemm_type == GEMM_TYPE_FAST_IMAGE_B_IMAGE) blocksize = max_image_size; int blockA_width = blocksize; int blockA_height = blocksize; int blockB_width = blocksize; int blockB_height = blocksize; int blockC_width = blocksize; int blockC_height = blocksize; int use_buffer_indicator = halfPrecisionMode ? 16 : 8; // To fix the edge problem casued by the sub group block read. // we have to pad the image if it's not multiple of tile. // just padding one line is enough as the sub group block read // will clamp to edge according to the spec. viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) ->program(); cl_mem ImA = NULL; cl_mem ImB = NULL; viennacl::ocl::kernel *oclk_gemm_float; string kernel_name("gemm_"); if (gemm_type == GEMM_TYPE_FAST_IMAGE_32_1 || gemm_type == GEMM_TYPE_FAST_IMAGE_B_IMAGE) kernel_name += "32_1_"; else kernel_name += "32_2_"; if (trans_a == CblasNoTrans) kernel_name += "n"; else kernel_name += "T"; if (trans_b == CblasNoTrans) { kernel_name += "N_"; } else { kernel_name += "T_"; if (is_image_b || (k % use_buffer_indicator != 0)) { kernel_name += "SCALAR_"; } else { kernel_name += "BUFFER_"; } } if (alpha == 1) kernel_name += "1_"; else kernel_name += "0_"; if (beta == 0) kernel_name += "0"; else kernel_name += "1"; if (halfPrecisionMode) { kernel_name += "_half"; } else { kernel_name += "_float"; } oclk_gemm_float = &program.get_kernel(kernel_name); while (C_start_y < m) { blockC_width = std::min(static_cast<int>(n) - C_start_x, blocksize); blockC_height = std::min(static_cast<int>(m) - C_start_y, blocksize); int isFirstColBlock = 1; for (int k = 0; k < k; k += blocksize) { cl_event ev[5]; cl_uint ev_idx = 0; memset(ev, 0, sizeof(cl_event) * 5); struct gemm_callback_arg * arg = new gemm_callback_arg; blockA_width = std::min(widthA - A_start_x, blocksize); blockA_height = std::min(heightA - A_start_y, blocksize); blockB_width = std::min(widthB - B_start_x, blocksize); blockB_height = std::min(heightB - B_start_y, blocksize); int block_Ksize = std::min(static_cast<int>(k) - k, blocksize); int padded_k = block_Ksize + ((block_Ksize & 7) ? (8 - (block_Ksize & 7)) : 0); int imageA_w = (trans_a == CblasNoTrans) ? padded_k : blockA_width; int imageA_h = (trans_a == CblasNoTrans) ? blockA_height : padded_k; int imageB_w = (trans_b == CblasNoTrans) ? blockB_width : padded_k; int imageB_h = (trans_b == CblasNoTrans) ? padded_k : blockB_height; int blockA_offset = offA + A_start_y * ldA + A_start_x; int blockB_offset = offB + B_start_y * ldB + B_start_x; int blockC_offset = offC + C_start_y * ldC + C_start_x; if (trans_b == CblasNoTrans) { bool padding_A = false; bool padding_B = false; if (halfPrecisionMode && is_image_b) { padding_A = true; } if (!is_image_a && !is_image_b) { if (m * k < n * k) padding_B = true; else padding_A = true; } if (!is_image_a) { greentea_gpu_gemm_copy_buffer_to_image<Dtype>(ctx_id, &ImA, a, blockA_offset, true, trans_a != CblasNoTrans, padding_A, imageA_h, imageA_w, blockA_height, blockA_width, ldA, 0, NULL, &ev[ev_idx]); if (ev[ev_idx] != NULL) ev_idx++; } if (!is_image_b) { greentea_gpu_gemm_copy_buffer_to_image<Dtype>(ctx_id, &ImB, b, blockB_offset, false, false, padding_B, imageB_h, imageB_w, blockB_height, blockB_width, ldB, 0, NULL, &ev[ev_idx]); if (ev[ev_idx] != NULL) ev_idx++; } } else { // We will use normal read_imagef to read image b when b has transpose. // thus we don't need to pad image a at all. if (!is_image_a) { bool padding; padding = !is_image_b || halfPrecisionMode; greentea_gpu_gemm_copy_buffer_to_image<Dtype>(ctx_id, &ImA, a, blockA_offset, true, trans_a != CblasNoTrans, padding, imageA_h, imageA_w, blockA_height, blockA_width, ldA, 0, NULL, &ev[ev_idx]); if (ev[ev_idx] != NULL) ev_idx++; } if (!is_image_b && (k % use_buffer_indicator != 0)) { greentea_gpu_gemm_copy_buffer_to_image<Dtype>(ctx_id, &ImB, b, blockB_offset, false, true, false, imageB_h, imageB_w, blockB_height, blockB_width, ldB, 0, NULL, &ev[ev_idx]); if (ev[ev_idx] != NULL) ev_idx++; } } if (is_image_a) ImA = a; if (is_image_b) ImB = b; size_t global[2]; if (gemm_type == GEMM_TYPE_FAST_IMAGE_32_1 || gemm_type == GEMM_TYPE_FAST_IMAGE_B_IMAGE ) { if (halfPrecisionMode) { global[0] = (size_t)( blockC_width + 15 ) & ~15; } else { global[0] = (size_t)( blockC_width + 7 ) & ~7; } } else { if (halfPrecisionMode) { global[0] = (size_t)( (blockC_width / 2 ) + 15 ) ^ ~15; } else { global[0] = (size_t)( (blockC_width / 2 ) + 7 ) ^ ~7; } } global[1] = (size_t)(blockC_height + 31) / 32; size_t local[2]; if (halfPrecisionMode) { local[0] = 16; } else { local[0] = 8; } local[1] = 1; cl_uint arg_idx = 0; oclk_gemm_float->arg(arg_idx++, WrapHandle(ImA, &ctx)); if (trans_b == CblasNoTrans || is_image_b || (k % use_buffer_indicator != 0)) { oclk_gemm_float->arg(arg_idx++, WrapHandle(ImB, &ctx)); } else { oclk_gemm_float->arg(arg_idx++, WrapHandle(b, &ctx)); oclk_gemm_float->arg(arg_idx++, blockB_offset); oclk_gemm_float->arg(arg_idx++, ldB); } oclk_gemm_float->arg(arg_idx++, WrapHandle(c, &ctx)); oclk_gemm_float->arg(arg_idx++, blockC_offset); oclk_gemm_float->arg(arg_idx++, blockC_height); oclk_gemm_float->arg(arg_idx++, blockC_width); oclk_gemm_float->arg(arg_idx++, ldC); oclk_gemm_float->arg(arg_idx++, fixup_arg_type(alpha)); oclk_gemm_float->arg(arg_idx++, fixup_arg_type(beta)); oclk_gemm_float->arg(arg_idx++, padded_k); if (trans_b != CblasNoTrans) oclk_gemm_float->arg(arg_idx++, block_Ksize); oclk_gemm_float->arg(arg_idx++, isFirstColBlock); cl_event *wait_list = NULL; if (ev_idx != 0) wait_list = &ev[0]; OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_gemm_float->handle().get(), 2, NULL, global, local, ev_idx, wait_list, &ev[ev_idx])); if (trans_a == CblasNoTrans) A_start_x += blockA_width; else A_start_y += blockA_height; if (trans_b == CblasNoTrans) B_start_y += blockB_height; else B_start_x += blockB_width; isFirstColBlock = 0; arg->evs.assign(ev, ev + ev_idx + 1); clSetEventCallback(ev[ev_idx], CL_COMPLETE, &gemm_callback, static_cast<void*>(arg)); } C_start_x += blockC_width; if (trans_a == CblasNoTrans) A_start_x = 0; else A_start_y = 0; if (trans_b == CblasNoTrans) { B_start_x += blockB_width; B_start_y = 0; } else { B_start_y += blockB_height; B_start_x = 0; } if (C_start_x >= n) { C_start_x = 0; B_start_x = 0; B_start_y = 0; C_start_y += blockC_height; if (trans_a == CblasNoTrans) A_start_y += blockA_height; else A_start_x += blockA_width; } } if (ImA && !is_image_a) clReleaseMemObject(ImA); if (ImB && !is_image_b) clReleaseMemObject(ImB); } template<typename Dtype, typename MItype, typename MOtype> static void greentea_gpu_fast_buffer_gemm(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_a, const CBLAS_TRANSPOSE trans_b, const int_tp m, const int_tp n, const int_tp k, const Dtype alpha, const cl_mem a, const int_tp offA, const cl_mem b, const int_tp offB, const Dtype beta, cl_mem c, const int_tp offC, enum gemm_type_t gemm_type) { CHECK_EQ(gemm_type == GEMM_TYPE_FAST_BUFFER, true) << "Invalid fast buffer gemm type." << std::endl; cl_event ev; viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); viennacl::ocl::program &program = (Caffe::Get().GetDevice(ctx_id, false)) ->program(); bool halfPrecisionMode = !std::is_same<Dtype, float>::value; size_t sub_group_size = 8; bool is_small_batch = (m == 2 || m == 4 || m == 8); viennacl::ocl::kernel *oclk_gemm_float; string kernel_name("gemm_buffer_"); if (trans_a == CblasNoTrans && trans_b == CblasNoTrans) { kernel_name += "NN"; if (halfPrecisionMode) { sub_group_size = 16; } } else if (trans_a == CblasNoTrans && trans_b != CblasNoTrans) { if (m == 2) kernel_name +="NT_M_2"; else if (m == 4) kernel_name +="NT_M_4"; else if (m == 8) kernel_name +="NT_M_8"; else kernel_name += "NT"; } else if (trans_a != CblasNoTrans && trans_b == CblasNoTrans) { kernel_name += "TN"; if (halfPrecisionMode) { sub_group_size = 16; } } else { kernel_name += "TT"; } if (halfPrecisionMode) { kernel_name += "_half"; } else { kernel_name += "_float"; } oclk_gemm_float = &program.get_kernel(kernel_name); size_t local[2] = {}; size_t global[2] = {}; if (trans_a == CblasNoTrans && trans_b != CblasNoTrans && is_small_batch) { if (m == 8) local[0] = 16; else if (m == 4) local[0] = 32; else local[0] = 64; local[1] = 1; if (m == 8) global[0] = n * local[0]; else global[0] = (n + 3) / 4 * local[0]; global[1] = 1; } else { size_t lx = sub_group_size; size_t ly = (trans_b != CblasNoTrans && trans_a == CblasNoTrans && halfPrecisionMode) ? 2 : 4; int dx = (trans_b != CblasNoTrans && trans_a == CblasNoTrans) ? 1 : 4; int dy = 8; size_t gx = (size_t)(n + dx - 1) / dx; size_t gy = (size_t)(m + dy - 1) / dy; global[0] = (gx + lx - 1) / lx * lx; global[1] = (gy + ly - 1) / ly * ly; local[0] = lx; local[1] = ly; } cl_uint arg_idx = 0; oclk_gemm_float->arg(arg_idx++, WrapHandle(a, &ctx)); oclk_gemm_float->arg(arg_idx++, offA); oclk_gemm_float->arg(arg_idx++, WrapHandle(b, &ctx)); oclk_gemm_float->arg(arg_idx++, offB); oclk_gemm_float->arg(arg_idx++, WrapHandle(c, &ctx)); oclk_gemm_float->arg(arg_idx++, offC); oclk_gemm_float->arg(arg_idx++, m); oclk_gemm_float->arg(arg_idx++, n); oclk_gemm_float->arg(arg_idx++, k); oclk_gemm_float->arg(arg_idx++, fixup_arg_type(alpha)); oclk_gemm_float->arg(arg_idx++, fixup_arg_type(beta)); if (trans_b == CblasNoTrans || trans_a != CblasNoTrans) { int stride = 256; for (int start_index = 0; start_index < k; start_index += stride) { oclk_gemm_float->arg(arg_idx, start_index); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_gemm_float->handle().get(), 2, NULL, global, local, 0, NULL, &ev)); } } else { OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_gemm_float->handle().get(), 2, NULL, global, local, 0, NULL, &ev)); } clReleaseEvent(ev); } template<typename Dtype, typename MItype, typename MOtype> static void innerprod_common(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_b, const int_tp m, const int_tp n, const int_tp k, const cl_mem a, const cl_mem b, const cl_mem B_image, cl_mem c, gemm_type_t gemm_type, const size_t max_image_size) { if (gemm_type == GEMM_TYPE_FAST_IMAGE_32_1 || gemm_type == GEMM_TYPE_FAST_IMAGE_32_2) { greentea_gpu_fast_image_gemm<Dtype>(ctx_id, CblasNoTrans, trans_b, m, n, k, (Dtype)1., a, 0, b, 0, (Dtype)0., c, 0, false, false, gemm_type, max_image_size); } else if (gemm_type == GEMM_TYPE_FAST_IMAGE_B_IMAGE) { greentea_gpu_fast_image_gemm<Dtype>(ctx_id, CblasNoTrans, trans_b, m, n, k, (Dtype)1., a, 0, B_image, 0, (Dtype)0., c, 0, false, true, GEMM_TYPE_FAST_IMAGE_B_IMAGE, max_image_size); } else if (gemm_type == GEMM_TYPE_FAST_BUFFER) { greentea_gpu_fast_buffer_gemm<Dtype>(ctx_id, CblasNoTrans, trans_b, m, n, k, 1.f, a, 0, b, 0, 0.f, c, 0, gemm_type); } else { greentea_gpu_gemm<Dtype>(ctx_id, CblasNoTrans, trans_b, m, n, k, (Dtype)1., a, 0, b, 0, (Dtype)0., c, 0); } } template<typename Dtype, typename MItype, typename MOtype> void InnerProductLayer<Dtype, MItype, MOtype>::generate_key() { stringstream keyBuilder; keyBuilder << M_ << "_" << N_ << "_" << K_ << "_" << transpose_; viennacl::ocl::context &ctx = viennacl::ocl::get_context (this->device_->id()); string prefix = ctx.current_device().name() + ctx.current_device().vendor() + ctx.current_device().driver_version() + std::to_string(ctx.current_device().max_compute_units()); key_ = viennacl::tools::sha1(prefix + keyBuilder.str()); // short_key_ = keyBuilder.str(); } #ifdef HAS_HALF_SUPPORT template void InnerProductLayer<half>::generate_key(); #endif template void InnerProductLayer<float>::generate_key(); template void InnerProductLayer<double>::generate_key(); template<typename Dtype, typename MItype, typename MOtype> bool InnerProductLayer<Dtype, MItype, MOtype>::load_cache() { if (tuned_) { return true; } else { generate_key(); // Find cached kernel configuration string outputFile; outputFile = cache_path_.str() + key_; std::ifstream cachedKernel(outputFile.c_str()); if (cachedKernel) { int cache_config; cachedKernel >> cache_config; innerprod_type_ = (gemm_type_t)cache_config; tuned_ = true; return true; } else { return false; } } } #ifdef HAS_HALF_SUPPORT template bool InnerProductLayer<half>::load_cache(); #endif template bool InnerProductLayer<float>::load_cache(); template bool InnerProductLayer<double>::load_cache(); template<typename Dtype, typename MItype, typename MOtype> void InnerProductLayer<Dtype, MItype, MOtype>::tune_innerprod_type(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_b, const cl_mem a, const cl_mem b, const cl_mem B_image, const size_t max_image_size) { if (std::is_same<Dtype, double>::value) { innerprod_type_ = GEMM_TYPE_DEFAULT; return; } else { // 1. load cache if (load_cache()) { return; } else { // 2. if not cached generate tuning uint element_size = 0; bool halfPrecisionMode = !std::is_same<Dtype, float>::value; if (halfPrecisionMode) { element_size = sizeof(uint16_t); } else { element_size = sizeof(float); } viennacl::ocl::context &ctx = viennacl::ocl::get_context(ctx_id); cl_int err; cl_mem c = clCreateBuffer(ctx.handle().get(), CL_MEM_ALLOC_HOST_PTR, M_ * N_ * element_size, NULL, &err); OCL_CHECK(err); vector<gemm_type_t> gemm_tests; gemm_tests.push_back(GEMM_TYPE_FAST_IMAGE_32_1); if (B_image != NULL) gemm_tests.push_back(GEMM_TYPE_FAST_IMAGE_B_IMAGE); gemm_tests.push_back(GEMM_TYPE_FAST_BUFFER); if (!halfPrecisionMode) gemm_tests.push_back(GEMM_TYPE_DEFAULT); // warm up. for ( int i = 0; i < gemm_tests.size(); i++ ) { innerprod_common<Dtype>(ctx_id, trans_b, M_, N_, K_, a, b, B_image, c, gemm_tests[i], max_image_size); } float fastest_time = 1e10; int fastest_index = -1; clFinish(ctx.get_queue().handle().get()); for ( int i = 0; i < gemm_tests.size(); i++ ) { Timer timer; timer.initted(); timer.Start(); innerprod_common<Dtype>(ctx_id, trans_b, M_, N_, K_, a, b, B_image, c, gemm_tests[i], max_image_size); timer.Stop(); float elapsedTime = timer.MilliSeconds(); // #define INNERPROD_PROFILING #ifdef INNERPROD_PROFILING std::cout << "innerprod type: " << gemm_tests[i] <<" eclipsed time: " << elapsedTime << "ms." << std::endl; #endif if (elapsedTime < fastest_time) { fastest_time = elapsedTime; fastest_index = i; } } clReleaseMemObject(c); if (fastest_index >= 0) { innerprod_type_ = gemm_tests[fastest_index]; } // 3. store cache. string outputFile; outputFile = cache_path_.str() + key_; std::ofstream outputKernel; outputKernel.open(outputFile.c_str()); outputKernel << innerprod_type_; outputKernel.close(); tuned_ = true; return; } } return; } #ifdef HAS_HALF_SUPPORT template void InnerProductLayer<half>::tune_innerprod_type(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_b, const cl_mem a, const cl_mem b, const cl_mem B_image, const size_t max_image_size); #endif template void InnerProductLayer<float>::tune_innerprod_type(const int_tp ctx_id, const CBLAS_TRANSPOSE trans_b, const cl_mem a, const cl_mem b, const cl_mem B_image, const size_t max_image_size); template void InnerProductLayer<double>::tune_innerprod_type( const int_tp ctx_id, const CBLAS_TRANSPOSE trans_b, const cl_mem a, const cl_mem b, const cl_mem B_image, const size_t max_image_size); template<typename Dtype, typename MItype, typename MOtype> void InnerProductLayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { vptr<const Dtype> bottom_data = bottom[0]->gpu_data(); vptr<Dtype> top_data = top[0]->mutable_gpu_data(); vptr<const Dtype> weight = this->blobs_[0]->gpu_data(); if (M_ == 1) { this->device_->template gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) this->device_->template axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { this->device_->template gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) this->device_->template gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } } template<typename Dtype, typename MItype, typename MOtype> void InnerProductLayer<Dtype, MItype, MOtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { if (this->param_propagate_down_[0]) { vptr<const Dtype> top_diff = top[0]->gpu_diff(); vptr<const Dtype> bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { this->device_->template gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., bottom_data, top_diff, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } else { this->device_->template gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } } if (bias_term_ && this->param_propagate_down_[1]) { vptr<const Dtype> top_diff = top[0]->gpu_diff(); // Gradient with respect to bias this->device_->template gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { vptr<const Dtype> top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { this->device_->template gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } else { this->device_->template gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } } } INSTANTIATE_CLASST_FUNC_3T_GUARDED(InnerProductLayer); #endif // TODO_REFACTOR } // namespace caffe
the_stack
#include <cusp/ell_matrix.h> #include <cusp/multiply.h> template <typename MemorySpace> void TestEllMatrixView(void) { typedef int IndexType; typedef float ValueType; typedef typename cusp::ell_matrix<IndexType,ValueType,MemorySpace> Matrix; typedef typename cusp::array1d<IndexType,MemorySpace>::iterator IndexIterator; typedef typename cusp::array1d<ValueType,MemorySpace>::iterator ValueIterator; typedef typename cusp::array1d_view<IndexIterator> IndexView1d; typedef typename cusp::array1d_view<ValueIterator> ValueView1d; typedef typename cusp::array2d_view<IndexView1d,cusp::column_major> IndexView2d; typedef typename cusp::array2d_view<ValueView1d,cusp::column_major> ValueView2d; typedef typename cusp::ell_matrix_view<IndexView2d,ValueView2d> View; Matrix M(3, 2, 6, 2); View V(3, 2, 6, cusp::make_array2d_view(M.column_indices), cusp::make_array2d_view(M.values)); ASSERT_EQUAL(V.num_rows, 3); ASSERT_EQUAL(V.num_cols, 2); ASSERT_EQUAL(V.num_entries, 6); ASSERT_EQUAL_QUIET(V.column_indices.num_rows, M.column_indices.num_rows); ASSERT_EQUAL_QUIET(V.column_indices.num_cols, M.column_indices.num_cols); ASSERT_EQUAL_QUIET(V.column_indices.num_entries, M.column_indices.num_entries); ASSERT_EQUAL_QUIET(V.column_indices.pitch, M.column_indices.pitch); ASSERT_EQUAL_QUIET(V.column_indices.values.begin(), M.column_indices.values.begin()); ASSERT_EQUAL_QUIET(V.column_indices.values.end(), M.column_indices.values.end()); ASSERT_EQUAL_QUIET(V.values.num_rows, M.values.num_rows); ASSERT_EQUAL_QUIET(V.values.num_cols, M.values.num_cols); ASSERT_EQUAL_QUIET(V.values.num_entries, M.values.num_entries); ASSERT_EQUAL_QUIET(V.values.pitch, M.values.pitch); ASSERT_EQUAL_QUIET(V.values.values.begin(), M.values.values.begin()); ASSERT_EQUAL_QUIET(V.values.values.end(), M.values.values.end()); View W(M); ASSERT_EQUAL(W.num_rows, 3); ASSERT_EQUAL(W.num_cols, 2); ASSERT_EQUAL(W.num_entries, 6); ASSERT_EQUAL_QUIET(W.column_indices.num_rows, M.column_indices.num_rows); ASSERT_EQUAL_QUIET(W.column_indices.num_cols, M.column_indices.num_cols); ASSERT_EQUAL_QUIET(W.column_indices.num_entries, M.column_indices.num_entries); ASSERT_EQUAL_QUIET(W.column_indices.pitch, M.column_indices.pitch); ASSERT_EQUAL_QUIET(W.column_indices.values.begin(), M.column_indices.values.begin()); ASSERT_EQUAL_QUIET(W.column_indices.values.end(), M.column_indices.values.end()); ASSERT_EQUAL_QUIET(W.values.num_rows, M.values.num_rows); ASSERT_EQUAL_QUIET(W.values.num_cols, M.values.num_cols); ASSERT_EQUAL_QUIET(W.values.num_entries, M.values.num_entries); ASSERT_EQUAL_QUIET(W.values.pitch, M.values.pitch); ASSERT_EQUAL_QUIET(W.values.values.begin(), M.values.values.begin()); ASSERT_EQUAL_QUIET(W.values.values.end(), M.values.values.end()); } DECLARE_HOST_DEVICE_UNITTEST(TestEllMatrixView); template <typename MemorySpace> void TestEllMatrixViewAssignment(void) { typedef int IndexType; typedef float ValueType; typedef typename cusp::ell_matrix<IndexType,ValueType,MemorySpace> Matrix; typedef typename cusp::array1d<IndexType,MemorySpace>::iterator IndexIterator; typedef typename cusp::array1d<ValueType,MemorySpace>::iterator ValueIterator; typedef typename cusp::array1d_view<IndexIterator> IndexView1d; typedef typename cusp::array1d_view<ValueIterator> ValueView1d; typedef typename cusp::array2d_view<IndexView1d,cusp::column_major> IndexView2d; typedef typename cusp::array2d_view<ValueView1d,cusp::column_major> ValueView2d; typedef typename cusp::ell_matrix_view<IndexView2d,ValueView2d> View; Matrix M(3, 2, 6, 2); View V = M; ASSERT_EQUAL(V.num_rows, 3); ASSERT_EQUAL(V.num_cols, 2); ASSERT_EQUAL(V.num_entries, 6); ASSERT_EQUAL_QUIET(V.column_indices.num_rows, M.column_indices.num_rows); ASSERT_EQUAL_QUIET(V.column_indices.num_cols, M.column_indices.num_cols); ASSERT_EQUAL_QUIET(V.column_indices.num_entries, M.column_indices.num_entries); ASSERT_EQUAL_QUIET(V.column_indices.pitch, M.column_indices.pitch); ASSERT_EQUAL_QUIET(V.column_indices.values.begin(), M.column_indices.values.begin()); ASSERT_EQUAL_QUIET(V.column_indices.values.end(), M.column_indices.values.end()); ASSERT_EQUAL_QUIET(V.values.num_rows, M.values.num_rows); ASSERT_EQUAL_QUIET(V.values.num_cols, M.values.num_cols); ASSERT_EQUAL_QUIET(V.values.num_entries, M.values.num_entries); ASSERT_EQUAL_QUIET(V.values.pitch, M.values.pitch); ASSERT_EQUAL_QUIET(V.values.values.begin(), M.values.values.begin()); ASSERT_EQUAL_QUIET(V.values.values.end(), M.values.values.end()); View W = V; ASSERT_EQUAL(W.num_rows, 3); ASSERT_EQUAL(W.num_cols, 2); ASSERT_EQUAL(W.num_entries, 6); ASSERT_EQUAL_QUIET(W.column_indices.num_rows, M.column_indices.num_rows); ASSERT_EQUAL_QUIET(W.column_indices.num_cols, M.column_indices.num_cols); ASSERT_EQUAL_QUIET(W.column_indices.num_entries, M.column_indices.num_entries); ASSERT_EQUAL_QUIET(W.column_indices.pitch, M.column_indices.pitch); ASSERT_EQUAL_QUIET(W.column_indices.values.begin(), M.column_indices.values.begin()); ASSERT_EQUAL_QUIET(W.column_indices.values.end(), M.column_indices.values.end()); ASSERT_EQUAL_QUIET(W.values.num_rows, M.values.num_rows); ASSERT_EQUAL_QUIET(W.values.num_cols, M.values.num_cols); ASSERT_EQUAL_QUIET(W.values.num_entries, M.values.num_entries); ASSERT_EQUAL_QUIET(W.values.pitch, M.values.pitch); ASSERT_EQUAL_QUIET(W.values.values.begin(), M.values.values.begin()); ASSERT_EQUAL_QUIET(W.values.values.end(), M.values.values.end()); } DECLARE_HOST_DEVICE_UNITTEST(TestEllMatrixViewAssignment); template <typename MemorySpace> void TestMakeEllMatrixView(void) { typedef int IndexType; typedef float ValueType; typedef typename cusp::ell_matrix<IndexType,ValueType,MemorySpace> Matrix; typedef typename cusp::array1d<IndexType,MemorySpace>::iterator IndexIterator; typedef typename cusp::array1d<ValueType,MemorySpace>::iterator ValueIterator; typedef typename cusp::array1d_view<IndexIterator> IndexView1d; typedef typename cusp::array1d_view<ValueIterator> ValueView1d; typedef typename cusp::array2d_view<IndexView1d,cusp::column_major> IndexView2d; typedef typename cusp::array2d_view<ValueView1d,cusp::column_major> ValueView2d; typedef typename cusp::ell_matrix_view<IndexView2d,ValueView2d> View; typedef typename cusp::array1d<IndexType,MemorySpace>::const_iterator ConstIndexIterator; typedef typename cusp::array1d<ValueType,MemorySpace>::const_iterator ConstValueIterator; typedef typename cusp::array1d_view<ConstIndexIterator> ConstIndexView1d; typedef typename cusp::array1d_view<ConstValueIterator> ConstValueView1d; typedef typename cusp::array2d_view<ConstIndexView1d,cusp::column_major> ConstIndexView2d; typedef typename cusp::array2d_view<ConstValueView1d,cusp::column_major> ConstValueView2d; typedef typename cusp::ell_matrix_view<ConstIndexView2d,ConstValueView2d> ConstView; // construct view from parts { Matrix M(3, 2, 6, 2); View V = cusp::make_ell_matrix_view(3, 2, 6, cusp::make_array2d_view(M.column_indices), cusp::make_array2d_view(M.values)); ASSERT_EQUAL(V.num_rows, 3); ASSERT_EQUAL(V.num_cols, 2); ASSERT_EQUAL(V.num_entries, 6); V.column_indices(0,0) = 10; V.values(0,0) = 20; ASSERT_EQUAL_QUIET(V.column_indices, M.column_indices); ASSERT_EQUAL_QUIET(V.values, M.values); } // construct view from matrix { Matrix M(3, 2, 6, 2); View V = cusp::make_ell_matrix_view(M); ASSERT_EQUAL(V.num_rows, 3); ASSERT_EQUAL(V.num_cols, 2); ASSERT_EQUAL(V.num_entries, 6); V.column_indices(0,0) = 10; V.values(0,0) = 20; ASSERT_EQUAL_QUIET(V.column_indices, M.column_indices); ASSERT_EQUAL_QUIET(V.values, M.values); } // construct view from view { Matrix M(3, 2, 6, 2); View X = cusp::make_ell_matrix_view(M); View V = cusp::make_ell_matrix_view(X); ASSERT_EQUAL(V.num_rows, 3); ASSERT_EQUAL(V.num_cols, 2); ASSERT_EQUAL(V.num_entries, 6); V.column_indices(0,0) = 10; V.values(0,0) = 20; ASSERT_EQUAL_QUIET(V.column_indices, M.column_indices); ASSERT_EQUAL_QUIET(V.values, M.values); } // construct view from const matrix { const Matrix M(3, 2, 6, 2); ConstView V = cusp::make_ell_matrix_view(M); ASSERT_EQUAL(cusp::make_ell_matrix_view(M).num_rows, 3); ASSERT_EQUAL(cusp::make_ell_matrix_view(M).num_cols, 2); ASSERT_EQUAL(cusp::make_ell_matrix_view(M).num_entries, 6); ASSERT_EQUAL_QUIET(V.column_indices, M.column_indices); ASSERT_EQUAL_QUIET(V.values, M.values); } } DECLARE_HOST_DEVICE_UNITTEST(TestMakeEllMatrixView); template <typename MemorySpace> void TestEllToCooMatrixView(void) { typedef int IndexType; typedef float ValueType; typedef cusp::ell_matrix<IndexType,ValueType,MemorySpace> TestMatrix; typedef typename TestMatrix::coo_view_type View; cusp::coo_matrix<IndexType,ValueType,cusp::host_memory> A(3, 2, 6); A.row_indices[0] = 0; A.column_indices[0] = 0; A.values[0] = 1; A.row_indices[1] = 0; A.column_indices[1] = 1; A.values[1] = 2; A.row_indices[2] = 1; A.column_indices[2] = 0; A.values[2] = 3; A.row_indices[3] = 1; A.column_indices[3] = 1; A.values[3] = 4; A.row_indices[4] = 2; A.column_indices[4] = 0; A.values[4] = 5; A.row_indices[5] = 2; A.column_indices[5] = 1; A.values[5] = 6; TestMatrix M(A); View V(M); V.column_indices[0] = -1; V.values[0] = -1; ASSERT_EQUAL(M.column_indices(0,0), -1); ASSERT_EQUAL(M.values(0,0), -1); } DECLARE_HOST_DEVICE_UNITTEST(TestEllToCooMatrixView);
the_stack
#include<types.h> #include<cutil.h> #include <vector> template <class Matrix, class Vector> SmoothedMG_AMG_Level<Matrix, Vector>::SmoothedMG_AMG_Level(AMG<Matrix, Vector> *amg) : AMG_Level<Matrix, Vector>(amg) { aggregator = Aggregator<Matrix, Vector>::allocate(amg->aggregatorType_); // DHL } template <class Matrix, class Vector> SmoothedMG_AMG_Level<Matrix, Vector>::~SmoothedMG_AMG_Level() { } __global__ void packcoo_kernel(int num_entries, int* row_indices, int* column_indices, int* aggridx, int* partidx, int* partlabel) { int entryidx = blockIdx.x * blockDim.x + threadIdx.x; if(entryidx < num_entries) { int row = row_indices[entryidx]; int col = column_indices[entryidx]; int l = partlabel[row]; int partstart = aggridx[partidx[l]]; unsigned int newindex = row - partstart; newindex <<= 16; newindex += col - partstart; row_indices[entryidx] = newindex; } } __global__ void matrixpermute_kernel(int np, int num_entries, int* row_indices, int* column_indices, AMGType* values, int* entrypartlabel, int* permutation, int* partitionlabel) { int entryidx = blockIdx.x * blockDim.x + threadIdx.x; if(entryidx < num_entries) { int oldrow = row_indices[entryidx]; int oldcol = column_indices[entryidx]; int newrow = permutation[oldrow]; int newcol = permutation[oldcol]; row_indices[entryidx] = newrow; column_indices[entryidx] = newcol; int rowpartition = partitionlabel[newrow]; int colpartition = partitionlabel[newcol]; if(rowpartition == colpartition) //inside point { if(newcol > newrow) { entrypartlabel[entryidx] = rowpartition; } else entrypartlabel[entryidx] = INT_MAX; } else { entrypartlabel[entryidx] = rowpartition + np; } } } __global__ void matrixpermute_csr_kernel(int np, int num_entries, int* row_indices, int* column_indices, AMGType* values, int* entrypartlabel, int* permutation, int* partitionlabel) { int entryidx = blockIdx.x * blockDim.x + threadIdx.x; if(entryidx < num_entries) { int oldrow = row_indices[entryidx]; int oldcol = column_indices[entryidx]; int newrow = permutation[oldrow]; int newcol = permutation[oldcol]; row_indices[entryidx] = newrow; column_indices[entryidx] = newcol; int rowpartition = partitionlabel[newrow]; int colpartition = partitionlabel[newcol]; if(rowpartition == colpartition) //inside point { entrypartlabel[entryidx] = rowpartition; } else { entrypartlabel[entryidx] = rowpartition + np; } } } template <> void SmoothedMG_AMG_Level<Matrix_h, Vector_h>::generateMatrixCsr(IdxVector_d &permutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionlabel) { int numpart = partitionIdx.size() - 1; int numentries = A_d.num_entries; Acoo_d = A_d; cusp::array1d<int, cusp::device_memory> entrypartlabel(numentries, -1); size_t blocksize = this->amg->blockSize_; size_t blocknum = ceil((float)numentries / (float)blocksize); if(blocknum > 65535) printf("too many blocks!!\n"); matrixpermute_csr_kernel << <blocknum, blocksize >> >(numpart, A_d.num_entries, thrust::raw_pointer_cast(&Acoo_d.row_indices[0]), thrust::raw_pointer_cast(&Acoo_d.column_indices[0]), thrust::raw_pointer_cast(&Acoo_d.values[0]), thrust::raw_pointer_cast(&entrypartlabel[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&partitionlabel[0])); cudaCheckError(); typedef IdxVector_d::iterator IntIterator; typedef Vector_d::iterator FloatIterator; typedef thrust::tuple<IntIterator, IntIterator, FloatIterator> IteratorTuple; typedef thrust::zip_iterator<IteratorTuple> ZipIterator; ZipIterator iter(thrust::make_tuple(Acoo_d.row_indices.begin(), Acoo_d.column_indices.begin(), Acoo_d.values.begin())); thrust::sort_by_key(entrypartlabel.begin(), entrypartlabel.end(), iter); IdxVector_d redoutput(numentries); IdxVector_d redvalue(numentries, 1); IdxVector_d redoutputkey(2 * numpart); IdxVector_d redoutputvalue(2 * numpart); thrust::reduce_by_key(entrypartlabel.begin(), entrypartlabel.end(), redvalue.begin(), redoutputkey.begin(), redoutputvalue.begin()); int innum = thrust::reduce(redoutputvalue.begin(), redoutputvalue.begin() + numpart); int outnum = numentries - innum; IntIterator res = thrust::max_element(redoutputvalue.begin(), redoutputvalue.begin() + numpart); largest_num_entries = *res; printf("CSR largest_num_entries is %d\n", largest_num_entries); printf("CSR inside number is %d, outside number is %d\n", innum, outnum); IdxVector_d AinBlockIdx(numpart + 1); thrust::copy(redoutputvalue.begin(), redoutputvalue.begin() + numpart, AinBlockIdx.begin()); thrust::exclusive_scan(AinBlockIdx.begin(), AinBlockIdx.end(), AinBlockIdx.begin()); AinBlockIdx[numpart] = innum; IdxVector_d AoutBlockIdx(numpart + 1); thrust::copy(redoutputvalue.begin() + numpart, redoutputvalue.begin() + 2 * numpart, AoutBlockIdx.begin()); thrust::exclusive_scan(AoutBlockIdx.begin(), AoutBlockIdx.end(), AoutBlockIdx.begin()); AoutBlockIdx[numpart] = outnum; AinBlockIdx_d = AinBlockIdx; AoutBlockIdx_d = AoutBlockIdx; AinCSR_d = Matrix_d(A_d.num_rows, A_d.num_cols, innum); Aout_d = Matrix_coo_d(A_d.num_rows, A_d.num_cols, outnum); Matrix_coo_d AinCOO_tmp(A_d.num_rows, A_d.num_cols, innum); thrust::copy_n(Acoo_d.row_indices.begin(), innum, AinCOO_tmp.row_indices.begin()); thrust::copy_n(Acoo_d.column_indices.begin(), innum, AinCOO_tmp.column_indices.begin()); thrust::copy_n(Acoo_d.values.begin(), innum, AinCOO_tmp.values.begin()); AinCOO_tmp.sort_by_row_and_column(); AinCSR_d.column_indices = AinCOO_tmp.column_indices; AinCSR_d.values = AinCOO_tmp.values; cusp::detail::indices_to_offsets(AinCOO_tmp.row_indices, AinCSR_d.row_offsets); thrust::reduce_by_key(AinCOO_tmp.row_indices.begin(), AinCOO_tmp.row_indices.end(), redvalue.begin(), redoutputkey.begin(), redoutputvalue.begin()); IntIterator res2 = thrust::max_element(redoutputvalue.begin(), redoutputvalue.begin() + AinCOO_tmp.num_rows); largest_num_per_row = *res2; thrust::copy_n(Acoo_d.row_indices.begin() + innum, outnum, Aout_d.row_indices.begin()); thrust::copy_n(Acoo_d.column_indices.begin() + innum, outnum, Aout_d.column_indices.begin()); thrust::copy_n(Acoo_d.values.begin() + innum, outnum, Aout_d.values.begin()); Aout_d.sort_by_row_and_column(); Acoo_d.sort_by_row_and_column(); A_d = Acoo_d; printf("Finished generateMatrixCSR_d!!\n"); } template <> void SmoothedMG_AMG_Level<Matrix_h, Vector_h>::generateMatrixSymmetric_d( IdxVector_d &permutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionlabel, bool verbose) { int numpart = partitionIdx.size() - 1; int numentries = A_d.num_entries; Acoo_d = A_d; cusp::array1d<int, cusp::device_memory> entrypartlabel(numentries, -1); size_t blocksize = this->amg->blockSize_; size_t blocknum = ceil((float)numentries / (float)blocksize); if(blocknum > 65535) printf("too many blocks!!\n"); matrixpermute_kernel << <blocknum, blocksize >> >(numpart, A_d.num_entries, thrust::raw_pointer_cast(&Acoo_d.row_indices[0]), thrust::raw_pointer_cast(&Acoo_d.column_indices[0]), thrust::raw_pointer_cast(&Acoo_d.values[0]), thrust::raw_pointer_cast(&entrypartlabel[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&partitionlabel[0])); std::vector<int> entrypartlabelvec; cusp::array1d<int, cusp::host_memory> entrypartlabel_h(entrypartlabel); for (size_t i = 0; i < entrypartlabel_h.size(); i++) { entrypartlabelvec.push_back(entrypartlabel_h[i]); } typedef IdxVector_d::iterator IntIterator; typedef Vector_d::iterator FloatIterator; typedef thrust::tuple<IntIterator, IntIterator, FloatIterator> IteratorTuple; typedef thrust::zip_iterator<IteratorTuple> ZipIterator; ZipIterator iter(thrust::make_tuple(Acoo_d.row_indices.begin(), Acoo_d.column_indices.begin(), Acoo_d.values.begin())); thrust::sort_by_key(entrypartlabel.begin(), entrypartlabel.end(), iter); std::vector<int> entrypartlabelvec2; entrypartlabel_h = cusp::array1d<int, cusp::host_memory>(entrypartlabel); for (size_t i = 0; i < entrypartlabel_h.size(); i++) { entrypartlabelvec2.push_back(entrypartlabel_h[i]); } if (verbose) printf("partition number is %d\n", numpart); IdxVector_d redvalue(numentries, 1); IdxVector_d redoutputkey(2 * numpart + 1); IdxVector_d redoutputvalue(2 * numpart + 1); thrust::reduce_by_key(entrypartlabel.begin(), entrypartlabel.end(), redvalue.begin(), redoutputkey.begin(), redoutputvalue.begin()); std::vector<int> redvaluevec, redoutputkeyvec, redoutputvaluevec; IdxVector_h redvalue_h(redvalue), redoutputkey_h(redoutputkey), redoutputvalue_h(redoutputvalue); for (size_t i = 0; i < redoutputkey_h.size(); i++) { redoutputkeyvec.push_back(redoutputkey_h[i]); redoutputvaluevec.push_back(redoutputvalue_h[i]); } for (size_t i = 0; i < redvalue_h.size(); i++) { redvaluevec.push_back(redvalue_h[i]); } int innum = thrust::reduce(redoutputvalue.begin(), redoutputvalue.begin() + numpart); int outnum = numentries - innum - redoutputvalue[numpart * 2]; IntIterator res = thrust::max_element(redoutputvalue.begin(), redoutputvalue.begin() + numpart); largest_num_entries = *res; if (verbose) { printf("largest_num_entries is %d\n", largest_num_entries); printf("inside number is %d, outside number is %d\n", innum, outnum); } IdxVector_d AinBlockIdx(numpart + 1); thrust::copy(redoutputvalue.begin(), redoutputvalue.begin() + numpart, AinBlockIdx.begin()); thrust::exclusive_scan(AinBlockIdx.begin(), AinBlockIdx.end(), AinBlockIdx.begin()); AinBlockIdx[numpart] = innum; IdxVector_d AoutBlockIdx(numpart + 1); thrust::copy(redoutputvalue.begin() + numpart, redoutputvalue.begin() + 2 * numpart, AoutBlockIdx.begin()); thrust::exclusive_scan(AoutBlockIdx.begin(), AoutBlockIdx.end(), AoutBlockIdx.begin()); AoutBlockIdx[numpart] = outnum; AinBlockIdx_d = AinBlockIdx; AoutBlockIdx_d = AoutBlockIdx; AinSysCoo_d = Matrix_coo_d(A_d.num_rows, A_d.num_cols, innum); AoutSys_d = Matrix_coo_d(A_d.num_rows, A_d.num_cols, outnum); thrust::copy_n(Acoo_d.row_indices.begin(), innum, AinSysCoo_d.row_indices.begin()); thrust::copy_n(Acoo_d.column_indices.begin(), innum, AinSysCoo_d.column_indices.begin()); thrust::copy_n(Acoo_d.values.begin(), innum, AinSysCoo_d.values.begin()); AinSysCoo_d.sort_by_row_and_column(); thrust::copy_n(Acoo_d.row_indices.begin() + innum, outnum, AoutSys_d.row_indices.begin()); thrust::copy_n(Acoo_d.column_indices.begin() + innum, outnum, AoutSys_d.column_indices.begin()); thrust::copy_n(Acoo_d.values.begin() + innum, outnum, AoutSys_d.values.begin()); AoutSys_d.sort_by_row_and_column(); //pack column of Ain to row blocknum = ceil((float)innum / (float)blocksize); if(blocknum > 65535) printf("too many blocks!!\n"); packcoo_kernel << <blocknum, blocksize >> >(innum, thrust::raw_pointer_cast(&AinSysCoo_d.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo_d.column_indices[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&partitionlabel[0])); Acoo_d.sort_by_row_and_column(); A_d = Acoo_d; } template <typename T> struct scaled_multiply { const T lambda; scaled_multiply(const T lambda) : lambda(lambda) {} __host__ __device__ T operator()(const T& x, const T& y) const { return lambda * x * y; } }; template <> void SmoothedMG_AMG_Level<Matrix_h, Vector_h>::generateProlongatorFull_d(IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx) { int num_aggregates = this->nnout; Matrix_coo_d &S = Acoo_d; Matrix_coo_d T(A_d.num_rows, num_aggregates, A_d.num_rows); thrust::sequence(T.row_indices.begin(), T.row_indices.end()); cusp::detail::offsets_to_indices(aggregateIdx, T.column_indices); thrust::fill(T.values.begin(), T.values.end(), 1); const AMGType lambda = this->amg->proOmega_; // temp <- -lambda * S(i,j) * T(j,k) Matrix_coo_d temp(S.num_rows, T.num_cols, S.num_entries + T.num_entries); thrust::copy(S.row_indices.begin(), S.row_indices.end(), temp.row_indices.begin()); thrust::gather(S.column_indices.begin(), S.column_indices.end(), T.column_indices.begin(), temp.column_indices.begin()); thrust::transform(S.values.begin(), S.values.end(), thrust::make_permutation_iterator(T.values.begin(), S.column_indices.begin()), temp.values.begin(), scaled_multiply<AMGType > (-lambda)); // temp <- D^-1 { Vector_d D(S.num_rows); cusp::detail::extract_diagonal(S, D); thrust::transform(temp.values.begin(), temp.values.begin() + S.num_entries, thrust::make_permutation_iterator(D.begin(), S.row_indices.begin()), temp.values.begin(), thrust::divides<AMGType > ()); } // temp <- temp + T thrust::copy(T.row_indices.begin(), T.row_indices.end(), temp.row_indices.begin() + S.num_entries); thrust::copy(T.column_indices.begin(), T.column_indices.end(), temp.column_indices.begin() + S.num_entries); thrust::copy(T.values.begin(), T.values.end(), temp.values.begin() + S.num_entries); // sort by (I,J) cusp::detail::sort_by_row_and_column(temp.row_indices, temp.column_indices, temp.values); // compute unique number of nonzeros in the output // throws a warning at compile (warning: expression has no effect) IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(temp.row_indices.begin(), temp.column_indices.begin())), thrust::make_zip_iterator(thrust::make_tuple(temp.row_indices.end(), temp.column_indices.end())) - 1, thrust::make_zip_iterator(thrust::make_tuple(temp.row_indices.begin(), temp.column_indices.begin())) + 1, IndexType(0), thrust::plus<IndexType > (), thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1; // printf("NNZ is: %d\n", NNZ); // allocate space for output P_d.resize(temp.num_rows, temp.num_cols, NNZ); // sum values with the same (i,j) thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(temp.row_indices.begin(), temp.column_indices.begin())), thrust::make_zip_iterator(thrust::make_tuple(temp.row_indices.end(), temp.column_indices.end())), temp.values.begin(), thrust::make_zip_iterator(thrust::make_tuple(P_d.row_indices.begin(), P_d.column_indices.begin())), P_d.values.begin(), thrust::equal_to< thrust::tuple<IndexType, IndexType> >(), thrust::plus<ValueType > ()); cusp::transpose(P_d, R_d); prolongatorFull_d = P_d; restrictorFull_d = R_d; } template <> void SmoothedMG_AMG_Level<Matrix_h, Vector_h>::generateNextLevelMatrixFull_d(bool verbose) { Matrix_coo_d AP; Matrix_coo_d Atmp = A_d; cusp::multiply(Atmp, P_d, AP); Matrix_d& Anextlevel2 = this->next->getA_d(); Matrix_coo_d tmpmtx; cusp::multiply(R_d, AP, tmpmtx); Anextlevel2 = tmpmtx; if (verbose) printf("Anextlevel num_rows=%lu, num_cols=%lud, num_entries=%lud\n", Anextlevel2.num_rows, Anextlevel2.num_cols, Anextlevel2.num_entries); } template <> void SmoothedMG_AMG_Level<Matrix_h, Vector_h>::createNextLevel(bool verbose) { cudaThreadSetCacheConfig(cudaFuncCachePreferShared); permutation_d = IdxVector_d(this->nn, -1); ipermutation_d = IdxVector_d(this->nn, -1); IdxVector_d partitionlabel_d(this->nn); IdxVector_h partitionlabelpermuted(this->nn); IdxVector_h partitionlabel(this->nn); if (verbose) std::cout << "Initialized IdxVector host & device vars." << std::endl; //compute permutation if(this->level_id == 0) { if(this->amg->triMesh_ != NULL) { if (verbose) std::cout << "calling computePermutation_d with tri mesh." << std::endl; aggregator->computePermutation_d(this->m_meshPtr, permutation_d, ipermutation_d, aggregateIdx_d, partitionIdx_d, partitionlabel_d, m_xadjout_d, m_adjncyout_d, this->amg->aggregatorType_, this->amg->randMisParameters_, this->amg->partitionMaxSize_, verbose);// DHL if (verbose) std::cout << "computePermutation_d called with tri mesh." << std::endl; } else { if (verbose) std::cout << "calling computePermutation_d with tet mesh." << std::endl; aggregator->computePermutation_d(this->m_tetmeshPtr, permutation_d, ipermutation_d, aggregateIdx_d, partitionIdx_d, partitionlabel_d, m_xadjout_d, m_adjncyout_d, this->amg->aggregatorType_, this->amg->randMisParameters_, this->amg->partitionMaxSize_, verbose); // DHL if (verbose) std::cout << "computePermutation_d called with tet mesh." << std::endl; } } else { if (verbose) std::cout << "calling computePermutation_d with level_id != 0." << std::endl; aggregator->computePermutation_d(m_xadj_d, m_adjncy_d, permutation_d, ipermutation_d, aggregateIdx_d, partitionIdx_d, partitionlabel_d, m_xadjout_d, m_adjncyout_d, this->amg->aggregatorType_, this->amg->randMisParameters_, this->amg->partitionMaxSize_, verbose); // DHL if (verbose) std::cout << "computePermutation_d called with level_id != 0." << std::endl; } if (verbose) std::cout << "size: " << partitionIdx_d.size() - 1 << std::endl; this->nnout = aggregateIdx_d.size() - 1; IdxVector_d ones(partitionlabel_d.size(), 1); IdxVector_d outputkeys(partitionIdx_d.size() - 1); IdxVector_d blocksizes(partitionIdx_d.size() - 1); thrust::reduce_by_key(partitionlabel_d.begin(), partitionlabel_d.end(), ones.begin(), outputkeys.begin(), blocksizes.begin()); largestblocksize = thrust::reduce(blocksizes.begin(), blocksizes.end(), -1, thrust::maximum<int>()); if (verbose) std::cout << "The largest block size is " << largestblocksize << std::endl; //generate matrix int num_per_thread; switch (amg->dsType_) { case 0: generateMatrixSymmetric_d(permutation_d, aggregateIdx_d, partitionIdx_d, partitionlabel_d, verbose); num_per_thread = ceil((double)largest_num_entries / largestblocksize); if (verbose) std::cout << "The largest num of entries per thread is " << num_per_thread << std::endl; break; case 1: break; case 2: generateMatrixCsr(permutation_d, aggregateIdx_d, partitionIdx_d, partitionlabel_d); break; default: std::cout << "Wrong DStype 2!" << std::endl; exit(0); } //generate prolongator generateProlongatorFull_d(aggregateIdx_d, partitionIdx_d); //generate matrix for next level generateNextLevelMatrixFull_d(verbose); } template <> void SmoothedMG_AMG_Level<Matrix_d, Vector_d>::createNextLevel(bool verbose) { } template <class Matrix, class Vector> void SmoothedMG_AMG_Level<Matrix, Vector>::computeProlongationOperator() { this->Profile.tic("computeP"); } #include <cusp/multiply.h> #include <cusp/transpose.h> #include <cusp/blas.h> /********************************************** * computes R=P^T **********************************************/ template <class Matrix, class Vector> void SmoothedMG_AMG_Level<Matrix, Vector>::computeRestrictionOperator() { } /********************************************** * computes the Galerkin product: A_c=R*A*P **********************************************/ template <class Matrix, class Vector> void SmoothedMG_AMG_Level<Matrix, Vector>::computeAOperator() { } /********************************************** * computes the restriction: rr=R*r **********************************************/ template <class Matrix, class Vector> void SmoothedMG_AMG_Level<Matrix, Vector>::restrictResidual(const Vector &r, Vector & rr) { } /********************************************** * prolongates the error: x+=P*e **********************************************/ template <class Matrix, class Vector> void SmoothedMG_AMG_Level<Matrix, Vector>::prolongateAndApplyCorrection(const Vector &e, Vector &x, Vector & tmp) { } /**************************************** * Explict instantiations ***************************************/ template class SmoothedMG_AMG_Level<Matrix_h, Vector_h>; template class SmoothedMG_AMG_Level<Matrix_d, Vector_d>;
the_stack
namespace sqint = sqaod_internal; using namespace sqaod_cuda; #define SQAODC_VECTORIZE_JQ #ifdef SQAODC_VECTORIZE_JQ template<class real> using DotJq = DeviceDotJqVec4<real, real*>; #else template<class real> using DotJq = DeviceDotJq<real, real*>; #endif template<class real> using DotSpins = DeviceDotSpins<real, char>; template<class real> CUDADenseGraphAnnealer<real>::CUDADenseGraphAnnealer() { devStream_ = NULL; m_ = -1; d_reachCount_ = NULL; selectAlgorithm(sq::algoDefault); annealMethod_ = &CUDADenseGraphAnnealer::annealOneStepSQA; } template<class real> CUDADenseGraphAnnealer<real>::CUDADenseGraphAnnealer(Device &device) { devStream_ = NULL; m_ = -1; d_reachCount_ = NULL; assignDevice(device); selectAlgorithm(sq::algoDefault); } template<class real> CUDADenseGraphAnnealer<real>::~CUDADenseGraphAnnealer() { deallocate(); d_random_.deallocate(); if (d_reachCount_ != NULL) devAlloc_->deallocate(d_reachCount_); d_reachCount_ = NULL; if (dotJq_ != NULL) { delete dotJq_; dotJq_ = NULL; delete dotSpins_; dotSpins_ = NULL; } } template<class real> void CUDADenseGraphAnnealer<real>::deallocate() { deallocateProblem(); deallocateInternalObjects(); } template<class real> void CUDADenseGraphAnnealer<real>::deallocateProblem() { devAlloc_->deallocate(d_J_); devAlloc_->deallocate(d_h_); devAlloc_->deallocate(d_c_); clearState(solProblemSet); } template<class real> void CUDADenseGraphAnnealer<real>::deallocateInternalObjects() { devAlloc_->deallocate(d_matq_); devAlloc_->deallocate(d_Jq_); HostObjectAllocator halloc; halloc.deallocate(h_E_); halloc.deallocate(h_q_); E_ = HostVector(); halloc.deallocate(h_spinDotSum_); flipPosBuffer_.deallocate(); realNumBuffer_.deallocate(); clearState(solPrepared); clearState(solQSet); } template<class real> void CUDADenseGraphAnnealer<real>::assignDevice(sqaod::cuda::Device &device) { assignDevice(static_cast<Device&>(device)); } template<class real> void CUDADenseGraphAnnealer<real>::assignDevice(Device &device) { throwErrorIf(devStream_ != NULL, "Device assigned more than once."); devStream_ = device.defaultStream(); devAlloc_ = device.objectAllocator(); devFormulas_.assignDevice(device, devStream_); devCopy_.assignDevice(device, devStream_); d_random_.assignDevice(device, devStream_); flipPosBuffer_.assignDevice(device, devStream_); realNumBuffer_.assignDevice(device, devStream_); d_reachCount_ = (uint2*)devAlloc_->allocate(sizeof(uint2)); /* initialize sumJq */ dotJq_ = new DotJq<real>(device, devStream_); dotSpins_ = new DotSpins<real>(device, devStream_); } template<class real> sq::Algorithm CUDADenseGraphAnnealer<real>::selectAlgorithm(sq::Algorithm algo) { switch (algo) { case sq::algoColoring: case sq::algoSANaive: algo_ = algo; break; default: selectDefaultAlgorithm(algo, sq::algoColoring, sq::algoSANaive); break; } return algo_; } template<class real> void CUDADenseGraphAnnealer<real>::seed(unsigned long long seed) { throwErrorIf(devStream_ == NULL, "Device not set."); d_random_.seed(seed); setState(solRandSeedGiven); } template<class real> void CUDADenseGraphAnnealer<real>::setQUBO(const HostMatrix &W, sq::OptimizeMethod om) { sqint::matrixCheckIfSymmetric(W, __func__); throwErrorIf(devStream_ == NULL, "Device not set."); deallocate(); N_ = W.rows; m_ = N_ / 4; om_ = om; devAlloc_->allocate(&d_J_, N_, N_); DeviceMatrix *dW = devStream_->tempDeviceMatrix<real>(W.dim(), __func__); devCopy_(dW, W); if (om == sq::optMaximize) devFormulas_.devMath.scale(dW, -1., *dW); devFormulas_.calculateHamiltonian(&d_h_, &d_J_, &d_c_, *dW); devCopy_.clearPadding(&d_J_); devStream_->synchronize(); setState(solProblemSet); } template<class real> void CUDADenseGraphAnnealer<real>::setHamiltonian(const HostVector &h, const HostMatrix &J, real c) { sqint::isingModelShapeCheck(h, J, c, __func__); sqint::matrixCheckIfSymmetric(J, __func__); throwErrorIf(devStream_ == NULL, "Device not set."); deallocate(); N_ = J.rows; m_ = N_ / 4; om_ = sq::optMinimize; devCopy_(&d_h_, h); devCopy_(&d_J_, J); devCopy_.clearPadding(&d_J_); devCopy_(&d_c_, c); devStream_->synchronize(); setState(solProblemSet); } template<class real> sq::Preferences CUDADenseGraphAnnealer<real>::getPreferences() const { sq::Preferences prefs = Base::getPreferences(); prefs.pushBack(sq::Preference(sq::pnDevice, "cuda")); return prefs; } template<class real> const sq::VectorType<real> &CUDADenseGraphAnnealer<real>::get_E() const { if (!isEAvailable()) const_cast<This*>(this)->calculate_E(); /* add a flag to tell if kernel synchronized.*/ devStream_->synchronize(); return E_; } template<class real> const sq::BitSetArray &CUDADenseGraphAnnealer<real>::get_x() const { if (!isSolutionAvailable()) const_cast<This*>(this)->makeSolution(); return xlist_; } template<class real> void CUDADenseGraphAnnealer<real>::set_q(const BitSet &q) { sqint::isingModelSolutionShapeCheck(N_, q, __func__); throwErrorIfNotPrepared(); DeviceBitSet *d_q = devStream_->tempDeviceVector<char>(q.size); devCopy_(d_q, q); devCopy_.broadcastToRows(&d_matq_, *d_q); devCopy_.clearPadding(&d_matq_); devStream_->synchronize(); setState(solQSet); } template<class real> void CUDADenseGraphAnnealer<real>::set_qset(const BitSetArray &q) { sqint::isingModelSolutionShapeCheck(N_, q, __func__); m_ = q.size(); prepare(); /* FIXME: apply pinned memory */ sq::BitMatrix qMat(m_, N_); for (int iRow = 0; iRow < m_; ++iRow) memcpy(&qMat(iRow, 0), q[iRow].data, sizeof(char) * N_); DeviceBitMatrix *d_q = devStream_->tempDeviceMatrix<char>(m_, N_); devCopy_(&d_matq_, qMat); devCopy_.clearPadding(&d_matq_); devCopy_.synchronize(); setState(solQSet); } template<class real> void CUDADenseGraphAnnealer<real>::getHamiltonian(HostVector *h, HostMatrix *J, real *c) const { throwErrorIfProblemNotSet(); devCopy_(h, d_h_); devCopy_(J, d_J_); devCopy_(c, d_c_); devCopy_.synchronize(); } template<class real> const sq::BitSetArray &CUDADenseGraphAnnealer<real>::get_q() const { if (!isSolutionAvailable()) const_cast<This*>(this)->makeSolution(); return qlist_; } template<class real> void CUDADenseGraphAnnealer<real>::randomizeSpin() { throwErrorIfNotPrepared(); ::randomizeSpin(&d_matq_, d_random_, devStream_->getCudaStream()); devCopy_.clearPadding(&d_matq_); setState(solQSet); } template<class real> void CUDADenseGraphAnnealer<real>::calculate_E() { throwErrorIfQNotSet(); DeviceVector *d_E = devStream_->tempDeviceVector<real>(m_); DeviceMatrix *d_realMatQ = devStream_->tempDeviceMatrix<real>(d_matq_.dim()); devCopy_.cast(d_realMatQ, d_matq_); devFormulas_.calculate_E(d_E, d_h_, d_J_, d_c_, *d_realMatQ); real sign = (om_ == sq::optMaximize) ? real(-1.) : real(1.); devFormulas_.devMath.scale(&h_E_, sign, *d_E); /* FIXME: due to asynchronous execution, here is not a good place to set solEAvailable. */ setState(solEAvailable); } template<class real> void CUDADenseGraphAnnealer<real>::prepare() { throwErrorIfProblemNotSet(); throwErrorIf(devStream_->getNumThreadsToFillDevice() < (m_ + 1) / 2, "nTrotters too large for this device."); if (!isRandSeedGiven()) d_random_.seed(); setState(solRandSeedGiven); deallocateInternalObjects(); if (m_ == 1) selectDefaultSAAlgorithm(algo_, sq::algoSANaive); switch (algo_) { case sq::algoColoring: annealMethod_ = &CUDADenseGraphAnnealer::annealOneStepSQA; break; case sq::algoSANaive: annealMethod_ = &CUDADenseGraphAnnealer::annealOneStepSA; break; default: abort_("Must not reach here."); } HostObjectAllocator halloc; devAlloc_->allocate(&d_matq_, m_, N_); devAlloc_->allocate(&d_Jq_, m_); halloc.allocate(&h_E_, m_); E_.map(h_E_.d_data, h_E_.size); halloc.allocate(&h_q_, sq::Dim(m_, N_)); halloc.allocate(&h_spinDotSum_); xlist_.reserve(m_); qlist_.reserve(m_); /* estimate # rand nums required per one anneal. */ nRunsPerRandGen_ = maxRandBufCapacity / (m_ * N_ * sizeof(real)); nRunsPerRandGen_ = std::max(2, std::min(nRunsPerRandGen_, (sq::SizeType)maxNRunsPerRandGen)); sq::SizeType requiredSize = nRunsPerRandGen_ * m_ * N_ * sizeof(real) / sizeof(float); d_random_.setRequiredSize(requiredSize); throwOnError(cudaMemsetAsync(d_reachCount_, 0, sizeof(uint2), devStream_->getCudaStream())); DotJq<real> &dotJq = static_cast<DotJq<real>&>(*dotJq_); dotJq.configure(N_, m_, false); DotSpins<real> &dotSpins = static_cast<DotSpins<real>&>(*dotSpins_); dotSpins.configure(N_, m_, false); setState(solPrepared); } template<class real> void CUDADenseGraphAnnealer<real>::makeSolution() { throwErrorIfQNotSet(); syncBits(); setState(solSolutionAvailable); calculate_E(); // devStream_->synchronize(); } template<class real> void CUDADenseGraphAnnealer<real>::syncBits() { xlist_.clear(); qlist_.clear(); devCopy_(&h_q_, d_matq_); devStream_->synchronize(); for (int idx = 0; idx < sq::IdxType(m_); ++idx) { BitSet q(h_q_.row(idx), N_); qlist_.pushBack(q); BitSet x(qlist_.size()); x = x_from_q(q); xlist_.pushBack(x); } } template<class real> real CUDADenseGraphAnnealer<real>::getSystemE(real G, real beta) const { auto _this = const_cast<CUDADenseGraphAnnealer<real>*>(this); _this->calculate_E(); /* asynchronous */ if (isSQAAlgorithm(algo_)) { DeviceVector *d_spinDot = devStream_->tempDeviceVector<real>(m_); DotSpins<real> &dotSpins = static_cast<DotSpins<real>&>(*dotSpins_); dotSpins(d_matq_, d_spinDot); _this->devFormulas_.devMath.sum(&_this->h_spinDotSum_, real(1.), *d_spinDot); } devStream_->synchronize(); real E = E_.sum() / m_; if (isSQAAlgorithm(algo_)) { real coef = real(0.5) / beta * std::log(std::tanh(G * beta / m_)); E -= *_this->h_spinDotSum_.d_data * coef; } if (om_ == sq::optMaximize) E *= real(-1.); return E; } #if 0 /* equivalent code */ template<class real> void annealOneStep(real G, real beta) { real twoDivM = real(2.) / real(m_); real coef = std::log(std::tanh(G * beta / m_)) / beta; for (int outer = 0; outer < IdxType(N_); ++outer) { int x[m]; /* carried out in DeviceRandomBuffer. */ for (int y = 0; y < IdxType(m_); ++y) { /* first plane */ int fraction = y % 2; /* second plane */ int fraction = 1 - y % 2; x[innder] = (random_random() * 2 + fraction) % N; } /* calculate_Jq() */ real d_Jq[m]; for (int y = 0; y < IdxType(m_); ++y) d_Jq[y] = J_.row(x[y]).dot(matQ_.row(y)); /* flip each bit, */ for (int inner = 0; inner < IdxType(m_); ++inner) { /* flip one bit */ real qyx = matQ_(y, x[m]); real dE = twoDivM * qyx * (real(2.) * d_Jq[x[y] + h_(x[y])]; int neibour0 = (m_ + y - 1) % m_, neibour1 = (y + 1) % m_; dE -= qyx * (matQ_(neibour0, x) + matQ_(neibour1, x)) * coef; real threshold = (dE < real(0.)) ? real(1.) : std::exp(-dE * beta); if (threshold > random_.random<real>()) matQ_(y, x) = - qyx; } } } #endif template<class real> void CUDADenseGraphAnnealer<real>::calculate_Jq(DeviceVector *d_Jq, const DeviceMatrix &d_J, const DeviceBitMatrix &d_matq, const int *d_flipPos) { DotJq<real> &dotJq = static_cast<DotJq<real>&>(*dotJq_); dotJq(d_J, d_matq, d_flipPos, d_Jq->d_data); } template<bool mIsOdd, class real> __global__ static void tryFlipSQAKernel(char *d_q, sq::SizeType qStride, const real *d_Jq, const real *d_h, const int *d_x, const real *d_random, sq::SizeType m, const real twoDivM, const real coef, const real beta, uint2 *reachCount) { int gid = blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for (int offset = 0; offset < 2; ++offset) { if (gid < m / 2) { int y = 2 * gid + offset; int x = d_x[y]; /* N */ char qyx = d_q[qStride * y + x]; int neibour0 = (y == 0) ? m - 1 : y - 1; int neibour1 = (y == m - 1) ? 0 : y + 1; real dE = twoDivM * (real)qyx * (real(2.) * d_Jq[y] + d_h[x]); dE -= (real)qyx * (d_q[qStride * neibour0 + x] + d_q[qStride * neibour1 + x]) * coef; real threshold = (dE < real(0.)) ? real(1.) : exp(-dE * beta); if (threshold > d_random[y]) d_q[qStride * y + x] = - qyx; } if (offset == 0) { if ((gid == 0) && mIsOdd) { int y = m - 1; int x = d_x[y]; /* N */ char qyx = d_q[qStride * y + x]; int neibour0 = m - 2, neibour1 = 0; real dE = twoDivM * (real)qyx * (real(2.) * d_Jq[y] + d_h[x]); dE -= (real)qyx * (d_q[qStride * neibour0 + x] + d_q[qStride * neibour1 + x]) * coef; real threshold = (dE < real(0.)) ? real(1.) : exp(-dE * beta); if (threshold > d_random[y]) d_q[qStride * y + x] = - qyx; } __syncthreads(); if (threadIdx.x == 0) { int count = atomicAdd(&reachCount->x, 1) + 1; while (count != gridDim.x) count = *(volatile unsigned int*)(&reachCount->x); } __syncthreads(); } } if (threadIdx.x == 0) { int count = atomicAdd(&reachCount->y, 1) + 1; if (count == gridDim.x) *reachCount = make_uint2(0, 0); } } template<class real> void CUDADenseGraphAnnealer<real>:: annealOneStepSQA(DeviceBitMatrix *d_matq, const DeviceVector &d_Jq, const int *d_x, const real *d_random, const DeviceVector &d_h, const DeviceMatrix &d_J, real G, real beta) { real twoDivM = real(2.) / real(m_); real coef = std::log(std::tanh(G * beta / m_)) / beta; dim3 blockDim(128); int nThreadsToFlipBits = m_ / 2; dim3 gridDim(std::max(divru(nThreadsToFlipBits, blockDim.x), 1)); gridDim.x = std::min((unsigned int)1, gridDim.x); /* for the case of m == 1. */ cudaStream_t stream = devStream_->getCudaStream(); bool mIsOdd = (m_ & 1) != 0; #if 0 if (mIsOdd) { tryFlipKernelSQA<true><<<gridDim, blockDim, 0, stream>>>(d_matq->d_data, d_matq->stride, d_Jq.d_data, d_h.d_data, d_x, d_random, m_, twoDivM, coef, beta, d_reachCount_); } else { tryFlipKernelSQA<false><<<gridDim, blockDim, 0, stream>>>(d_matq->d_data, d_matq->stride, d_Jq.d_data, d_h.d_data, d_x, d_random, m_, twoDivM, coef, beta, d_reachCount_); } #else void *args[] = {(void*)&d_matq->d_data, (void*)&d_matq->stride, (void*)&d_Jq.d_data, (void*)&d_h.d_data, (void*)&d_x, (void*)&d_random, (void*)&m_, (void*)&twoDivM, (void*)&coef, (void*)&beta, (void*)&d_reachCount_, NULL}; if (mIsOdd) cudaLaunchKernel((void*)tryFlipSQAKernel<true, real>, gridDim, blockDim, args, 0, stream); else cudaLaunchKernel((void*)tryFlipSQAKernel<false, real>, gridDim, blockDim, args, 0, stream); #endif DEBUG_SYNC; } template<class real> void CUDADenseGraphAnnealer<real>::annealOneStepSQA(real G, real beta) { throwErrorIfQNotSet(); clearState(solSolutionAvailable); if (!flipPosBuffer_.available(m_ * N_)) flipPosBuffer_.generateFlipPositions(d_random_, N_, m_, nRunsPerRandGen_); if (!realNumBuffer_.available(m_ * N_)) realNumBuffer_.generate<real>(d_random_, N_ * m_ * nRunsPerRandGen_); for (int idx = 0; idx < N_; ++idx) { const int *d_flipPos = flipPosBuffer_.acquire<int>(m_); const real *d_random = realNumBuffer_.acquire<real>(m_); calculate_Jq(&d_Jq_, d_J_, d_matq_, d_flipPos); annealOneStepSQA(&d_matq_, d_Jq_, d_flipPos, d_random, d_h_, d_J_, G, beta); } } /* Simulated annealing */ template<class real> __global__ static void tryFlipSAKernel(char *d_q, sq::SizeType qStride, const real *d_Jq, const real *d_h, const int *d_x, const real *d_random, sq::SizeType m, const real invKT) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < m) { int y = gid; int x = d_x[y]; /* N */ char qyx = d_q[qStride * y + x]; real dE = real(2.) * (real)qyx * (real(2.) * d_Jq[y] + d_h[x]); real threshold = (dE < real(0.)) ? real(1.) : exp(-dE * invKT); if (threshold > d_random[y]) d_q[qStride * y + x] = - qyx; } } template<class real> void CUDADenseGraphAnnealer<real>:: annealOneStepSA(DeviceBitMatrix *d_matq, const DeviceVector &d_Jq, const int *d_x, const real *d_random, const DeviceVector &d_h, const DeviceMatrix &d_J, real invKT) { dim3 blockDim(128); dim3 gridDim(std::max(divru(m_, blockDim.x), 1)); cudaStream_t stream = devStream_->getCudaStream(); #if 0 tryFlipSAKernel<real><<<gridDim, blockDim, 0, stream>>>(d_matq->d_data, d_matq->stride, d_Jq.d_data, d_h.d_data, d_x, d_random, m_, invKT); #else void *args[] = {(void*)&d_matq->d_data, (void*)&d_matq->stride, (void*)&d_Jq.d_data, (void*)&d_h.d_data, (void*)&d_x, (void*)&d_random, (void*)&m_, (void*)&invKT, NULL}; cudaLaunchKernel((void*)tryFlipSAKernel<real>, gridDim, blockDim, args, 0, stream); #endif DEBUG_SYNC; } template<class real> void CUDADenseGraphAnnealer<real>::annealOneStepSA(real kT, real beta) { throwErrorIfQNotSet(); clearState(solSolutionAvailable); if (!flipPosBuffer_.available(m_ * N_)) flipPosBuffer_.generateFlipPositions(d_random_, N_, m_, nRunsPerRandGen_); if (!realNumBuffer_.available(m_ * N_)) realNumBuffer_.generate<real>(d_random_, N_ * m_ * nRunsPerRandGen_); real invKT = real(1.) / kT; for (int idx = 0; idx < N_; ++idx) { const int *d_flipPos = flipPosBuffer_.acquire<int>(m_); const real *d_random = realNumBuffer_.acquire<real>(m_); calculate_Jq(&d_Jq_, d_J_, d_matq_, d_flipPos); annealOneStepSA(&d_matq_, d_Jq_, d_flipPos, d_random, d_h_, d_J_, invKT); } } template class CUDADenseGraphAnnealer<double>; template class CUDADenseGraphAnnealer<float>;
the_stack
//#include <rendercheck_gl.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/glut.h> #endif #include "fluidsGL_kernels.cuh" #define MAX_EPSILON_ERROR 1.0f // Define the files that are to be save and the reference images for validation const char *sOriginal[] = { "fluidsGL.ppm", NULL }; const char *sReference[] = { "ref_fluidsGL.ppm", NULL }; // CUDA example code that implements the frequency space version of // Jos Stam's paper 'Stable Fluids' in 2D. This application uses the // CUDA FFT library (CUFFT) to perform velocity diffusion and to // force non-divergence in the velocity field at each time step. It uses // CUDA-OpenGL interoperability to update the particle field directly // instead of doing a copy to system memory before drawing. Texture is // used for automatic bilinear interpolation at the velocity advection step. #if 1 #define DIM 64 // Square size of solver domain #else #define DIM 512 // Square size of solver domani #endif #define DS (DIM*DIM) // Total domain size #define CPADW (DIM/2+1) // Padded width for real->complex in-place FFT #define RPADW (2*(DIM/2+1)) // Padded width for real->complex in-place FFT #define PDS (DIM*CPADW) // Padded total domain size #define DT 0.09f // Delta T for interative solver #define VIS 0.0025f // Viscosity constant #define FORCE (5.8f*DIM) // Force scale factor #define FR 4 // Force update radius #define TILEX 64 // Tile width #define TILEY 64 // Tile height #define TIDSX 64 // Tids in X #define TIDSY 4 // Tids in Y void cleanup(void); // CUFFT plan handle static cufftHandle planr2c; static cufftHandle planc2r; static cData *vxfield = NULL; static cData *vyfield = NULL; cData *hvfield = NULL; cData *dvfield = NULL; static int wWidth = max(512,DIM); static int wHeight = max(512,DIM); static int clicked = 0; static int fpsCount = 0; static int fpsLimit = 1; unsigned int timer; // Particle data GLuint vbo = 0; // OpenGL vertex buffer object static cData *particles = NULL; // particle positions in host memory static int lastx = 0, lasty = 0; // Texture pitch size_t tPitch = 0; // Now this is compatible with gcc in 64-bit bool g_bQAReadback = false; bool g_bQAAddTestForce = true; int g_iFrameToCompare = 4; int frame = 0; int g_TotalErrors = 0; // CheckFBO/BackBuffer class objects //CheckRender *g_CheckRender = NULL; void autoTest(); void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r) { dim3 tids(2*r+1, 2*r+1); addForces_k<<<1, tids>>>(v, dx, dy, spx, spy, fx, fy, r, tPitch); cutilCheckMsg("addForces_k failed."); } void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateTexture(v, DIM*sizeof(cData), DIM, tPitch); advectVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY); cutilCheckMsg("advectVelocity_k failed."); } void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc) { // Forward FFT cufftExecR2C(planr2c, (cufftReal*)vx, (cufftComplex*)vx); cufftExecR2C(planr2c, (cufftReal*)vy, (cufftComplex*)vy); uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1), 1); uint3 tids = make_uint3(TIDSX, TIDSY, 1); diffuseProject_k<<<grid, tids>>>(vx, vy, dx, dy, dt, visc, TILEY/TIDSY); cutilCheckMsg("diffuseProject_k failed."); // Inverse FFT cufftExecC2R(planc2r, (cufftComplex*)vx, (cufftReal*)vx); cufftExecC2R(planc2r, (cufftComplex*)vy, (cufftReal*)vy); } void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch); cutilCheckMsg("updateVelocity_k failed."); } void advectParticles(GLuint buffer, cData *v, int dx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); cData *p; cudaGLMapBufferObject((void**)&p, buffer); cutilCheckMsg("cudaGLMapBufferObject failed"); advectParticles_k<<<grid, tids>>>(p, v, dx, dy, dt, TILEY/TIDSY, tPitch); cutilCheckMsg("advectParticles_k failed."); cudaGLUnmapBufferObject(buffer); cutilCheckMsg("cudaGLUnmapBufferObject failed"); } void display(void) { cutilCheckError(cutStartTimer(timer)); // simulate fluid advectVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM, DT); diffuseProject(vxfield, vyfield, CPADW, DIM, DT, VIS); updateVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM); advectParticles(vbo, dvfield, DIM, DIM, DT); // render points from vertex buffer glClear(GL_COLOR_BUFFER_BIT); glColor4f(0,1,0,0.5f); glPointSize(1); glEnable(GL_POINT_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnableClientState(GL_VERTEX_ARRAY); glDisable(GL_DEPTH_TEST); glDisable(GL_CULL_FACE); glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo); glVertexPointer(2, GL_FLOAT, 0, NULL); glDrawArrays(GL_POINTS, 0, DS); glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); glDisableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_TEXTURE_COORD_ARRAY); glDisable(GL_TEXTURE_2D); // Finish timing before swap buffers to avoid refresh sync cutilCheckError(cutStopTimer(timer)); glutSwapBuffers(); fpsCount++; if (fpsCount == fpsLimit) { char fps[256]; float ifps = 1.f / (cutGetAverageTimerValue(timer) / 1000.f); sprintf(fps, "Cuda/GL Stable Fluids (%d x %d): %3.1f fps", DIM, DIM, ifps); glutSetWindowTitle(fps); fpsCount = 0; fpsLimit = (int)max(ifps, 1.f); cutilCheckError(cutResetTimer(timer)); } glutPostRedisplay(); if( frame < g_iFrameToCompare ) { ++frame; } else { printf( "TEST PASSED\n" ); exit (0); } } void autoTest() { for(int count=0;count<g_iFrameToCompare;count++) { // add in a little force so the automated testing is interesing. if(g_bQAReadback && g_bQAAddTestForce) { int x = wWidth/(count+1); int y = wHeight/(count+1); float fx = (x / (float)wWidth); float fy = (y / (float)wHeight); int nx = (int)(fx * DIM); int ny = (int)(fy * DIM); int ddx = 35; int ddy = 35; fx = ddx / (float)wWidth; fy = ddy / (float)wHeight; int spy = ny-FR; int spx = nx-FR; addForces(dvfield, DIM, DIM, spx, spy, FORCE * DT * fx, FORCE * DT * fy, FR); lastx = x; lasty = y; //g_bQAAddTestForce = false; // only add it once } display(); } // compare to offical reference image, printing PASS or FAIL. /* printf("> (Frame %d) Readback BackBuffer\n", 100); g_CheckRender->readback( wWidth, wHeight, NULL ); g_CheckRender->savePPM(sOriginal[0], true, NULL); if (!g_CheckRender->PPMvsPPM(sOriginal[0], sReference[0], MAX_EPSILON_ERROR)) { g_TotalErrors++; } */ } void idle(void) { glutPostRedisplay(); } void initParticles(cData *p, int dx, int dy) { int i, j; for (i = 0; i < dy; i++) { for (j = 0; j < dx; j++) { p[i*dx+j].x = ((j+0.5)/dx) + (rand() / (float)RAND_MAX - 0.5f) / dx; p[i*dx+j].y = ((i+0.5)/dy) + (rand() / (float)RAND_MAX - 0.5f) / dy; } } } void keyboard( unsigned char key, int x, int y) { switch( key) { case 27: exit (0); case 'r': memset(hvfield, 0, sizeof(cData) * DS); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); initParticles(particles, DIM, DIM); cudaGLUnregisterBufferObject(vbo); cutilCheckMsg("cudaGLUnregisterBufferObject failed"); glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo); glBufferDataARB(GL_ARRAY_BUFFER_ARB, sizeof(cData) * DS, particles, GL_DYNAMIC_DRAW_ARB); glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); cudaGLRegisterBufferObject(vbo); cutilCheckMsg("cudaGLRegisterBufferObject failed"); break; default: break; } } void click(int button, int updown, int x, int y) { lastx = x; lasty = y; clicked = !clicked; } void motion (int x, int y) { // Convert motion coordinates to domain float fx = (lastx / (float)wWidth); float fy = (lasty / (float)wHeight); int nx = (int)(fx * DIM); int ny = (int)(fy * DIM); if (clicked && nx < DIM-FR && nx > FR-1 && ny < DIM-FR && ny > FR-1) { int ddx = x - lastx; int ddy = y - lasty; fx = ddx / (float)wWidth; fy = ddy / (float)wHeight; int spy = ny-FR; int spx = nx-FR; addForces(dvfield, DIM, DIM, spx, spy, FORCE * DT * fx, FORCE * DT * fy, FR); lastx = x; lasty = y; } glutPostRedisplay(); } void reshape(int x, int y) { wWidth = x; wHeight = y; glViewport(0, 0, x, y); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, 1, 1, 0, 0, 1); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } void cleanup(void) { cudaGLUnregisterBufferObject(vbo); cutilCheckMsg("cudaGLUnregisterBufferObject failed"); unbindTexture(); deleteTexture(); // Free all host and device resources free(hvfield); free(particles); cudaFree(dvfield); cudaFree(vxfield); cudaFree(vyfield); cufftDestroy(planr2c); cufftDestroy(planc2r); glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); glDeleteBuffersARB(1, &vbo); cutilCheckError(cutDeleteTimer(timer)); } int initGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(wWidth, wHeight); glutCreateWindow("Compute Stable Fluids"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(click); glutMotionFunc(motion); glutReshapeFunc(reshape); glutIdleFunc(idle); glewInit(); if (! glewIsSupported( "GL_ARB_vertex_buffer_object" )) { fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush( stderr); return CUTFalse; } return CUTTrue; } int main(int argc, char** argv) { // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. if (CUTFalse == initGL(argc, argv)) { return CUTFalse; } // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilGLDeviceInit(argc, argv); else { cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ); } // automatied build testing harness if (cutCheckCmdLineFlag(argc, (const char **)argv, "qatest") || cutCheckCmdLineFlag(argc, (const char **)argv, "noprompt")) { g_bQAReadback = true; } // Allocate and initialize host data GLint bsize; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutResetTimer(timer)); hvfield = (cData*)malloc(sizeof(cData) * DS); memset(hvfield, 0, sizeof(cData) * DS); // Allocate and initialize device data cudaMallocPitch((void**)&dvfield, &tPitch, sizeof(cData)*DIM, DIM); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); // Temporary complex velocity field data cudaMalloc((void**)&vxfield, sizeof(cData) * PDS); cudaMalloc((void**)&vyfield, sizeof(cData) * PDS); setupTexture(DIM, DIM); bindTexture(); // Create particle array particles = (cData*)malloc(sizeof(cData) * DS); memset(particles, 0, sizeof(cData) * DS); initParticles(particles, DIM, DIM); // Create CUFFT transform plan configuration cufftPlan2d(&planr2c, DIM, DIM, CUFFT_R2C); cufftPlan2d(&planc2r, DIM, DIM, CUFFT_C2R); #if 0 glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(wWidth, wHeight); glutCreateWindow("Compute Stable Fluids"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(click); glutMotionFunc(motion); glutReshapeFunc(reshape); glutIdleFunc(idle); glewInit(); if (! glewIsSupported( "GL_ARB_vertex_buffer_object" )) { fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush( stderr); return CUTFalse; } #endif glGenBuffersARB(1, &vbo); glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo); glBufferDataARB(GL_ARRAY_BUFFER_ARB, sizeof(cData) * DS, particles, GL_DYNAMIC_DRAW_ARB); glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &bsize); if (bsize != (sizeof(cData) * DS)) goto EXTERR; glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); cudaGLRegisterBufferObject(vbo); cutilCheckMsg("cudaGLRegisterBufferObject failed"); if (g_bQAReadback) { /* g_CheckRender = new CheckBackBuffer(wWidth, wHeight, 4); g_CheckRender->setPixelFormat(GL_RGBA); g_CheckRender->setExecPath(argv[0]); g_CheckRender->EnableQAReadback(true); autoTest(); */ } else { atexit(cleanup); glutMainLoop(); } cudaThreadExit(); return 0; EXTERR: printf("Failed to initialize GL extensions.\n"); cudaThreadExit(); return 1; }
the_stack
#pragma once namespace gunrock { namespace app { /** * @brief Base data slice structure which contains common data structural needed * for primitives. * * @tparam SizeT Type of unsigned integer to use for array * indexing. (e.g., uint32) * @tparam VertexId Type of signed integer to use as vertex id (e.g., * uint32) * @tparam Value Type to use as vertex / edge associated values */ template <typename VertexT, typename SizeT, typename ValueT, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> // int MAX_NUM_VERTEX_ASSOCIATES, // int MAX_NUM_VALUE__ASSOCIATES> struct MgpuSlice { int num_gpus; // Number of GPUs int gpu_idx; // GPU index int wait_counter; // Wait counter for iteration loop control int max_num_vertex_associates; int max_num_value__associates; // int gpu_mallocing ; // Whether GPU is in malloc // int num_vertex_associate; // Number of associate values in VertexId type // for each vertex int num_value__associate; // Number of associate values // in Value type for each vertex int num_stages; // Number of stages // SizeT nodes ; // Number of vertices // SizeT edges ; // Number of edges // bool use_double_buffer ; // typedef unsigned char MaskT; // Incoming VertexId type associate values util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> *vertex_associate_in[2]; // Device pointers to incoming VertexId type associate values // util::Array1D<SizeT, VertexT*, ARRAY_FLAG, cudaHostRegisterFlag> // *vertex_associate_ins [2]; // Outgoing VertexId type associate values util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> *vertex_associate_out; // Device pointers to outgoing VertexId type associate values util::Array1D<SizeT, VertexT *, ARRAY_FLAG, cudaHostRegisterFlag | cudaHostAllocMapped | cudaHostAllocPortable> vertex_associate_outs; // Device pointers to device points to outgoing VertexId type associate values // util::Array1D<SizeT, VertexT**, ARRAY_FLAG, cudaHostRegisterFlag> // vertex_associate_outss ; // Device pointers to original VertexId type associate values util::Array1D<SizeT, VertexT *, ARRAY_FLAG, cudaHostRegisterFlag> vertex_associate_orgs; // Incoming Value type associate values util::Array1D<SizeT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> *value__associate_in[2]; // Device pointers to incoming Value type associate values // util::Array1D<SizeT, ValueT* > // *value__associate_ins [2]; // Outgoing Value type associate values util::Array1D<SizeT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> *value__associate_out; // Device pointers to outgoing Value type associate values util::Array1D<SizeT, ValueT *, ARRAY_FLAG, cudaHostRegisterFlag | cudaHostAllocMapped | cudaHostAllocPortable> value__associate_outs; // Device pointers to device pointers to outgoing Value type associate values // util::Array1D<SizeT, ValueT** > // value__associate_outss ; // Device pointers to original Value type associate values util::Array1D<SizeT, ValueT *, ARRAY_FLAG, cudaHostRegisterFlag> value__associate_orgs; // Number of outgoing vertices to peers util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag | cudaHostAllocMapped | cudaHostAllocPortable> out_length; // Number of incoming vertices from peers util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag> in_length[2]; util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag | cudaHostAllocMapped | cudaHostAllocPortable> in_length_out; // Incoming iteration numbers util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> in_iteration[2]; // Incoming vertices util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> *keys_in[2]; // Outgoing vertices util::Array1D<SizeT, VertexT *, ARRAY_FLAG, cudaHostRegisterFlag | cudaHostAllocMapped | cudaHostAllocPortable> keys_outs; // Device pointers to outgoing vertices util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> *keys_out; // Markers to separate vertices to peer GPUs // util::Array1D<SizeT, SizeT , ARRAY_FLAG, cudaHostRegisterFlag> // *keys_marker ; // Device pointer to the markers // util::Array1D<SizeT, SizeT* , ARRAY_FLAG, cudaHostRegisterFlag> // keys_markers ; // Vertex lookup array // util::Array1D<SizeT, SizeT , ARRAY_FLAG, cudaHostRegisterFlag> // *visit_lookup ; // Vertex valid in // util::Array1D<SizeT, VertexT , ARRAY_FLAG, cudaHostRegisterFlag> // *valid_in ; // Vertex valid out // util::Array1D<SizeT, VertexT , ARRAY_FLAG, cudaHostRegisterFlag> // *valid_out ; // GPU stream events arrays util::Array1D<SizeT, cudaEvent_t *, ARRAY_FLAG, cudaHostRegisterFlag> events[4]; // Whether the GPU stream events are set util::Array1D<SizeT, bool *, ARRAY_FLAG, cudaHostRegisterFlag> events_set[4]; // util::Array1D<SizeT, int, ARRAY_FLAG, cudaHostRegisterFlag> wait_marker; // GPU streams // util::Array1D<SizeT, cudaStream_t, ARRAY_FLAG, cudaHostRegisterFlag> // streams ; // current stages of each streams util::Array1D<SizeT, int, ARRAY_FLAG, cudaHostRegisterFlag> stages; // whether to show debug information for the streams util::Array1D<SizeT, bool, ARRAY_FLAG, cudaHostRegisterFlag> to_show; // compressed data structure for make_out kernel // util::Array1D<SizeT, char , ARRAY_FLAG, cudaHostRegisterFlag> // make_out_array ; // compressed data structure for expand_incoming kernel // util::Array1D<SizeT, char , ARRAY_FLAG, cudaHostRegisterFlag> // *expand_incoming_array ; // predecessors of vertices // util::Array1D<SizeT, VertexT , ARRAY_FLAG, cudaHostRegisterFlag> // preds ; // temporary storages for predecessors // util::Array1D<SizeT, VertexT , ARRAY_FLAG, cudaHostRegisterFlag> // temp_preds ; // Used for source distance // util::Array1D<SizeT, VertexT , ARRAY_FLAG, cudaHostRegisterFlag> // labels ; // util::Array1D<SizeT, MaskT , ARRAY_FLAG, cudaHostRegisterFlag> // visited_mask; util::Array1D<SizeT, int, ARRAY_FLAG, cudaHostRegisterFlag> latency_data; // arrays used to track data race, containing info about pervious assigment util::Array1D<SizeT, int, ARRAY_FLAG, cudaHostRegisterFlag> org_checkpoint; // checkpoint number util::Array1D<SizeT, VertexT *, ARRAY_FLAG, cudaHostRegisterFlag> org_d_out; // d_out address util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag> org_offset1; // offset1 util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag> org_offset2; // offset2 util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> org_queue_idx; // queue index util::Array1D<SizeT, int, ARRAY_FLAG, cudaHostRegisterFlag> org_block_idx; // blockIdx.x util::Array1D<SizeT, int, ARRAY_FLAG, cudaHostRegisterFlag> org_thread_idx; // threadIdx.x /** * @brief DataSliceBase default constructor */ MgpuSlice() { // Assign default values num_stages = 4; // num_vertex_associate = 0; // num_value__associate = 0; // gpu_idx = 0; // gpu_mallocing = 0; // use_double_buffer = false; // Assign NULs to pointers keys_out = NULL; keys_in[0] = NULL; keys_in[1] = NULL; vertex_associate_in[0] = NULL; vertex_associate_in[1] = NULL; vertex_associate_out = NULL; value__associate_in[0] = NULL; value__associate_in[1] = NULL; value__associate_out = NULL; // Assign names to arrays keys_outs.SetName("keys_outs"); vertex_associate_outs.SetName("vertex_associate_outs"); value__associate_outs.SetName("value__associate_outs"); vertex_associate_orgs.SetName("vertex_associate_orgs"); value__associate_orgs.SetName("value__associate_orgs"); out_length.SetName("out_length"); in_length[0].SetName("in_length[0]"); in_length[1].SetName("in_length[1]"); in_length_out.SetName("in_length_out"); in_iteration[0].SetName("in_iteration[0]"); in_iteration[1].SetName("in_iteration[0]"); wait_marker.SetName("wait_marker"); stages.SetName("stages"); to_show.SetName("to_show"); org_checkpoint.SetName("org_checkpoint"); org_d_out.SetName("org_d_out"); org_offset1.SetName("org_offset1"); org_offset2.SetName("org_offset2"); org_queue_idx.SetName("org_queue_idx"); org_block_idx.SetName("org_block_idx"); org_thread_idx.SetName("org_thread_idx"); latency_data.SetName("latency_data"); for (int i = 0; i < 4; i++) { events[i].SetName("events[]"); events_set[i].SetName("events_set[]"); } } // end DataSliceBase() /** * @brief DataSliceBase default destructor to release host / device memory */ /*virtual ~MgpuSlice() { Release(); }*/ cudaError_t Release(util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; // Set device by index GUARD_CU(util::SetDevice(gpu_idx)); // Release VertexId type incoming associate values and related pointers if (vertex_associate_in[0] != NULL) { for (int gpu = 0; gpu < num_gpus; gpu++) { GUARD_CU(vertex_associate_in[0][gpu].Release(target)); GUARD_CU(vertex_associate_in[1][gpu].Release(target)); } if (target & util::HOST) { delete[] vertex_associate_in[0]; delete[] vertex_associate_in[1]; vertex_associate_in[0] = NULL; vertex_associate_in[1] = NULL; } } // Release Value type incoming associate values and related pointers if (value__associate_in[0] != NULL) { for (int gpu = 0; gpu < num_gpus; gpu++) { GUARD_CU(value__associate_in[0][gpu].Release(target)); GUARD_CU(value__associate_in[1][gpu].Release(target)); } if (target & util::HOST) { delete[] value__associate_in[0]; delete[] value__associate_in[1]; value__associate_in[0] = NULL; value__associate_in[1] = NULL; } } // Release incoming keys and related pointers if (keys_in[0] != NULL) { for (int gpu = 0; gpu < num_gpus; gpu++) { GUARD_CU(keys_in[0][gpu].Release(target)); GUARD_CU(keys_in[1][gpu].Release(target)); } if (target & util::HOST) { delete[] keys_in[0]; delete[] keys_in[1]; keys_in[0] = NULL; keys_in[1] = NULL; } } // Release VertexId type outgoing associate values and pointers if (vertex_associate_out != NULL) { for (int gpu = 0; gpu < num_gpus; gpu++) { if (target & util::HOST) vertex_associate_outs[gpu] = NULL; GUARD_CU(vertex_associate_out[gpu].Release(target)); } if (target & util::HOST) { delete[] vertex_associate_out; vertex_associate_out = NULL; } GUARD_CU(vertex_associate_outs.Release(target)); } // Release Value type outgoing associate values and pointers if (value__associate_out != NULL) { for (int gpu = 0; gpu < num_gpus; gpu++) { if (target & util::HOST) value__associate_outs[gpu] = NULL; GUARD_CU(value__associate_out[gpu].Release(target)); } if (target & util::HOST) { delete[] value__associate_out; value__associate_out = NULL; } GUARD_CU(value__associate_outs.Release(target)); } // Release events and markers if (target & util::HOST) for (int i = 0; i < 4; i++) { if (events[i].GetPointer() != NULL) for (int gpu = 0; gpu < num_gpus * 2; gpu++) { for (int stage = 0; stage < num_stages; stage++) GUARD_CU2(cudaEventDestroy(events[i][gpu][stage]), "cudaEventDestroy failed"); delete[] events[i][gpu]; events[i][gpu] = NULL; delete[] events_set[i][gpu]; events_set[i][gpu] = NULL; } GUARD_CU(events[i].Release(target)); GUARD_CU(events_set[i].Release(target)); } // Release frontiers /*if (frontier_queues != NULL) { for (int gpu = 0; gpu <= num_gpus; gpu++) { for (int i = 0; i < 2; ++i) { GUARD_CU(frontier_queues[gpu].keys [i].Release()); GUARD_CU(frontier_queues[gpu].values[i].Release()); } } delete[] frontier_queues; frontier_queues = NULL; } // Release scanned_edges if (scanned_edges != NULL) { for (int gpu = 0; gpu <= num_gpus; gpu++) GUARD_CU(scanned_edges [gpu].Release()); delete[] scanned_edges; scanned_edges = NULL; }*/ // Release all other arrays GUARD_CU(keys_outs.Release(target)); GUARD_CU(in_length[0].Release(target)); GUARD_CU(in_length[1].Release(target)); GUARD_CU(in_length_out.Release(target)); GUARD_CU(in_iteration[0].Release(target)); GUARD_CU(in_iteration[1].Release(target)); GUARD_CU(wait_marker.Release(target)); GUARD_CU(out_length.Release(target)); GUARD_CU(vertex_associate_orgs.Release(target)); GUARD_CU(value__associate_orgs.Release(target)); GUARD_CU(stages.Release(target)); GUARD_CU(to_show.Release(target)); GUARD_CU(latency_data.Release(target)); GUARD_CU(org_checkpoint.Release(target)); GUARD_CU(org_d_out.Release(target)); GUARD_CU(org_offset1.Release(target)); GUARD_CU(org_offset2.Release(target)); GUARD_CU(org_queue_idx.Release(target)); GUARD_CU(org_block_idx.Release(target)); GUARD_CU(org_thread_idx.Release(target)); return retval; } // end Release() /** * @brief Initiate DataSliceBase * * @param[in] num_gpus Number of GPUs * @param[in] gpu_idx GPU index * @param[in] use_double_buffer * @param[in] graph Pointer to the CSR formated sub-graph * @param[in] num_in_nodes Number of incoming vertices from peers * @param[in] num_out_nodes Number of outgoing vertices to peers * @param[in] in_sizing Preallocation factor for incoming / * outgoing vertices * @param[in] skip_makeout_selection * \return Error occurred if any, otherwise * cudaSuccess */ cudaError_t Init(int num_gpus, int gpu_idx, // bool use_double_buffer , // Csr<VertexId, SizeT, Value> // *graph , SizeT num_nodes, SizeT max_queue_length, SizeT *num_in_nodes, SizeT *num_out_nodes, double trans_factor = 1.0, bool skip_makeout_selection = false) { cudaError_t retval = cudaSuccess; // Copy input values this->num_gpus = num_gpus; this->gpu_idx = gpu_idx; // this->use_double_buffer = use_double_buffer; // this->nodes = graph->nodes; // this->edges = graph->edges; // this->num_vertex_associate = num_vertex_associate; // this->num_value__associate = num_value__associate; // Set device by index GUARD_CU(util::SetDevice(gpu_idx)); GUARD_CU(in_length[0].Allocate(num_gpus, util::HOST)); GUARD_CU(in_length[1].Allocate(num_gpus, util::HOST)); // GUARD_CU(in_length_out.Init(num_gpus, util::HOST | util::DEVICE, // true, cudaHostAllocMapped | cudaHostAllocPortable)); GUARD_CU(in_length_out.Allocate(num_gpus, util::HOST | util::DEVICE)); GUARD_CU(in_iteration[0].Allocate(num_gpus, util::HOST)); GUARD_CU(in_iteration[1].Allocate(num_gpus, util::HOST)); // GUARD_CU(out_length .Init(num_gpus, util::HOST | util::DEVICE, // true, cudaHostAllocMapped | cudaHostAllocPortable)); GUARD_CU(out_length.Allocate(num_gpus, util::HOST | util::DEVICE)); GUARD_CU(vertex_associate_orgs.Allocate(max_num_vertex_associates, util::HOST | util::DEVICE)); GUARD_CU(value__associate_orgs.Allocate(max_num_value__associates, util::HOST | util::DEVICE)); GUARD_CU(latency_data.Allocate(120 * 1024, util::HOST | util::DEVICE)); for (SizeT i = 0; i < 120 * 1024; i++) latency_data[i] = rand(); GUARD_CU(latency_data.Move(util::HOST, util::DEVICE)); // Allocate / create event related variables GUARD_CU(wait_marker.Allocate(num_gpus * 2, util::HOST)); GUARD_CU(stages.Allocate(num_gpus * 2, util::HOST)); GUARD_CU(to_show.Allocate(num_gpus * 2, util::HOST)); for (int gpu = 0; gpu < num_gpus; gpu++) { wait_marker[gpu] = 0; } for (int i = 0; i < 4; i++) { GUARD_CU(events[i].Allocate(num_gpus * 2, util::HOST)); GUARD_CU(events_set[i].Allocate(num_gpus * 2, util::HOST)); for (int gpu = 0; gpu < num_gpus * 2; gpu++) { events[i][gpu] = new cudaEvent_t[num_stages]; events_set[i][gpu] = new bool[num_stages]; for (int stage = 0; stage < num_stages; stage++) { GUARD_CU2(cudaEventCreateWithFlags(&(events[i][gpu][stage]), cudaEventDisableTiming), "cudaEventCreate failed."); events_set[i][gpu][stage] = false; } } } for (int gpu = 0; gpu < num_gpus; gpu++) { for (int i = 0; i < 2; i++) { in_length[i][gpu] = 0; in_iteration[i][gpu] = 0; } } if (num_gpus == 1) return retval; // Create incoming buffer on device keys_in[0] = new util::Array1D<SizeT, VertexT>[num_gpus]; keys_in[1] = new util::Array1D<SizeT, VertexT>[num_gpus]; vertex_associate_in[0] = new util::Array1D<SizeT, VertexT>[num_gpus]; vertex_associate_in[1] = new util::Array1D<SizeT, VertexT>[num_gpus]; value__associate_in[0] = new util::Array1D<SizeT, ValueT>[num_gpus]; value__associate_in[1] = new util::Array1D<SizeT, ValueT>[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { for (int t = 0; t < 2; t++) { SizeT num_in_node = num_in_nodes[gpu] * trans_factor; vertex_associate_in[t][gpu].SetName("vertex_associate_in[][]"); GUARD_CU(vertex_associate_in[t][gpu].Allocate( num_in_node * max_num_vertex_associates, util::DEVICE)); value__associate_in[t][gpu].SetName("vertex_associate_in[][]"); GUARD_CU(value__associate_in[t][gpu].Allocate( num_in_node * max_num_value__associates, util::DEVICE)); keys_in[t][gpu].SetName("keys_in"); if (gpu != 0) { GUARD_CU(keys_in[t][gpu].Allocate(num_in_node, util::DEVICE)); } } } // Allocate outgoing buffer on device vertex_associate_out = new util::Array1D<SizeT, VertexT>[num_gpus]; value__associate_out = new util::Array1D<SizeT, ValueT>[num_gpus]; keys_out = new util::Array1D<SizeT, VertexT>[num_gpus]; // GUARD_CU(vertex_associate_outs. Init(num_gpus, util::HOST | util::DEVICE, // true, cudaHostAllocMapped | cudaHostAllocPortable)); GUARD_CU( vertex_associate_outs.Allocate(num_gpus, util::HOST | util::DEVICE)); // GUARD_CU(value__associate_outs. Init(num_gpus, util::HOST | util::DEVICE, // true, cudaHostAllocMapped | cudaHostAllocPortable)); GUARD_CU( value__associate_outs.Allocate(num_gpus, util::HOST | util::DEVICE)); // GUARD_CU(keys_outs . Init(num_gpus, util::HOST | util::DEVICE, // true, cudaHostAllocMapped | cudaHostAllocPortable)); GUARD_CU(keys_outs.Allocate(num_gpus, util::HOST | util::DEVICE)); for (int gpu = 0; gpu < num_gpus; gpu++) { SizeT num_out_node = num_nodes * trans_factor; keys_out[gpu].SetName("keys_out[]"); if (gpu != 0) { GUARD_CU(keys_out[gpu].Allocate(num_out_node, util::DEVICE)); keys_outs[gpu] = keys_out[gpu].GetPointer(util::DEVICE); } vertex_associate_out[gpu].SetName("vertex_associate_outs[]"); if (gpu != 0) { GUARD_CU(vertex_associate_out[gpu].Allocate( num_out_node * max_num_vertex_associates, util::DEVICE)); vertex_associate_outs[gpu] = vertex_associate_out[gpu].GetPointer(util::DEVICE); } value__associate_out[gpu].SetName("value__associate_outs[]"); if (gpu != 0) { GUARD_CU(value__associate_out[gpu].Allocate( num_out_node * max_num_value__associates, util::DEVICE)); value__associate_outs[gpu] = value__associate_out[gpu].GetPointer(util::DEVICE); } if (skip_makeout_selection && gpu == 1) break; } if (skip_makeout_selection) { for (int gpu = 2; gpu < num_gpus; gpu++) { keys_out[gpu].SetPointer(keys_out[1].GetPointer(util::DEVICE), keys_out[1].GetSize(), util::DEVICE); keys_outs[gpu] = keys_out[gpu].GetPointer(util::DEVICE); vertex_associate_out[gpu].SetPointer( vertex_associate_out[1].GetPointer(util::DEVICE), vertex_associate_out[1].GetSize(), util::DEVICE); vertex_associate_outs[gpu] = vertex_associate_out[gpu].GetPointer(util::DEVICE); value__associate_out[gpu].SetPointer( value__associate_out[1].GetPointer(util::DEVICE), value__associate_out[1].GetSize(), util::DEVICE); value__associate_outs[gpu] = value__associate_out[gpu].GetPointer(util::DEVICE); } } GUARD_CU(keys_outs.Move(util::HOST, util::DEVICE)); GUARD_CU(vertex_associate_outs.Move(util::HOST, util::DEVICE)); GUARD_CU(value__associate_outs.Move(util::HOST, util::DEVICE)); if (false) //(TO_TRACK) { GUARD_CU(org_checkpoint.Allocate(max_queue_length, util::DEVICE)); GUARD_CU(org_d_out.Allocate(max_queue_length, util::DEVICE)); GUARD_CU(org_offset1.Allocate(max_queue_length, util::DEVICE)); GUARD_CU(org_offset2.Allocate(max_queue_length, util::DEVICE)); GUARD_CU(org_queue_idx.Allocate(max_queue_length, util::DEVICE)); GUARD_CU(org_block_idx.Allocate(max_queue_length, util::DEVICE)); GUARD_CU(org_thread_idx.Allocate(max_queue_length, util::DEVICE)); } return retval; } // end Init(..) /** * @brief Performs reset work needed for mgpu slice. Must be called prior to * each search \return cudaError_t object which indicates the success of all * CUDA function calls. */ cudaError_t Reset(util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; // if (retval = util::SetDevice(gpu_idx)) return retval; for (int gpu = 0; gpu < num_gpus * 2; gpu++) wait_marker[gpu] = 0; for (int i = 0; i < 4; i++) for (int gpu = 0; gpu < num_gpus * 2; gpu++) for (int stage = 0; stage < num_stages; stage++) events_set[i][gpu][stage] = false; for (int gpu = 0; gpu < num_gpus; gpu++) for (int i = 0; i < 2; i++) in_length[i][gpu] = 0; for (int peer = 0; peer < num_gpus; peer++) out_length[peer] = 1; for (int peer = 0; peer < num_gpus; peer++) out_length[peer] = 1; return retval; } // end Reset(...) }; // end DataSliceBase } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
//#include "util.h" #include "util.cuh" #include "util_type.h" #include "util_type_internal.h" #include "util_func.h" #include "update_ops_cuda.h" #include "update_ops_cuda_device_functions.h" __global__ void H_gate_gpu(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; ITYPE basis0, basis1; GTYPE tmp; double inv_sqrt=1.0/sqrt(2.0); if (j < (dim >> 1)){ //basis0 = ((j & ~((ONE<< i)-1)) << 1) + (j & ((ONE<< i)-1)); //basis1 = basis0 + (ONE<< i); basis0 = (j >> target_qubit_index); basis0 = basis0 << (target_qubit_index + 1); basis0 += j & ((1ULL << target_qubit_index) - 1); basis1 = basis0 ^ (1ULL << target_qubit_index); tmp = state_gpu[basis0]; state_gpu[basis0] = cuCadd(tmp, state_gpu[basis1]); state_gpu[basis1] = cuCadd(tmp, make_cuDoubleComplex(-1*state_gpu[basis1].x, -1*state_gpu[basis1].y)); state_gpu[basis0] = make_cuDoubleComplex(state_gpu[basis0].x * inv_sqrt, state_gpu[basis0].y * inv_sqrt); state_gpu[basis1] = make_cuDoubleComplex(state_gpu[basis1].x * inv_sqrt, state_gpu[basis1].y * inv_sqrt); } } __host__ void H_gate_host(unsigned int target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE half_dim = dim >> 1; unsigned int block = half_dim <= 1024 ? half_dim : 1024; unsigned int grid = half_dim / block; H_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void X_gate_gpu(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; ITYPE basis0, basis1; GTYPE tmp; if (j < (dim>>1)){ //basis0 = ((j & ~((ONE<< i)-1)) << 1) + (j & ((ONE<< i)-1)); //basis1 = basis0 + (ONE<< i); basis0 = (j >> target_qubit_index); basis0 = basis0 << (target_qubit_index + 1); basis0 += j & ((1ULL << target_qubit_index) - 1); basis1 = basis0 ^ (1ULL << target_qubit_index); tmp = state_gpu[basis0]; state_gpu[basis0] = state_gpu[basis1]; state_gpu[basis1] = tmp; } } __host__ void X_gate_host(unsigned int target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE half_dim = dim >> 1; unsigned int block = half_dim <= 1024 ? half_dim : 1024; unsigned int grid = half_dim / block; X_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void Y_gate_gpu(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; ITYPE basis0, basis1; GTYPE tmp; if (j < (dim>>1)){ basis0 = (j >> target_qubit_index); basis0 = basis0 << (target_qubit_index + 1); basis0 += j & ((1ULL << target_qubit_index) - 1); basis1 = basis0 ^ (1ULL << target_qubit_index); tmp = state_gpu[basis0]; state_gpu[basis0] = make_cuDoubleComplex(cuCimag(state_gpu[basis1]), -cuCreal(state_gpu[basis1])); state_gpu[basis1] = make_cuDoubleComplex(-cuCimag(tmp), cuCreal(tmp)); } } __host__ void Y_gate_host(unsigned int target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE half_dim = dim >> 1; unsigned int block = half_dim <= 1024 ? half_dim : 1024; unsigned int grid = half_dim / block; Y_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void Z_gate_gpu(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE DIM) { ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; ITYPE basis0, basis1; if (j < (DIM>>1)){ basis0 = insert_zero_to_basis_index_device(j, target_qubit_index); basis1 = basis0^(1ULL<<target_qubit_index); state_gpu[basis1] = make_cuDoubleComplex(-cuCreal(state_gpu[basis1]), -cuCimag(state_gpu[basis1])); } } __host__ void Z_gate_host(unsigned int target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE half_dim = dim >> 1; unsigned int block = half_dim <= 1024 ? half_dim : 1024; unsigned int grid = half_dim / block; Z_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void CZ_gate_gpu(unsigned int large_index, unsigned int small_index, GTYPE *state_gpu, ITYPE DIM) { ITYPE head, body, tail; ITYPE basis11; ITYPE quarter_DIM = DIM >> 2; ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; if (j < quarter_DIM){ head = j >> (large_index - 1); body = (j & ((1ULL << (large_index- 1)) - 1)) >> small_index; // (j % 2^(large-1)) >> small tail = j & ((1ULL << small_index) - 1); // j%(2^small) basis11 = (head << (large_index + 1)) + (body << (small_index + 1)) + (1ULL << large_index) + (1ULL << small_index) + tail; state_gpu[basis11] = make_cuDoubleComplex(-cuCreal(state_gpu[basis11]), -cuCimag(state_gpu[basis11])); } } __host__ void CZ_gate_host(unsigned int control_qubit_index, unsigned int target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE quad_dim = dim >> 2; ITYPE large_index, small_index; if (control_qubit_index > target_qubit_index) { large_index = control_qubit_index; small_index = target_qubit_index; } else { large_index = target_qubit_index; small_index = control_qubit_index; } unsigned int block = quad_dim <= 1024 ? quad_dim : 1024; unsigned int grid = quad_dim / block; CZ_gate_gpu << <grid, block, 0, *cuda_stream >> > (large_index, small_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void CNOT_gate_gpu(unsigned int control_qubit_index, unsigned int target_qubit_index, GTYPE *psi_gpu, ITYPE dim) { unsigned int left, right; ITYPE head, body, tail; ITYPE basis10, basis11; GTYPE tmp_psi; ITYPE quarter_dim = dim >>2 ; ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; if (target_qubit_index > control_qubit_index){ left = target_qubit_index; right = control_qubit_index; } else { left = control_qubit_index; right = target_qubit_index; } if (j < quarter_dim){ head = j >> (left - 1); body = (j & ((1ULL << (left - 1)) - 1)) >> right; // (j % 2^(k-1)) >> i tail = j & ((1ULL << right) - 1); // j%(2^i) // ONE<<control basis10 = (head << (left + 1)) + (body << (right + 1)) + (1ULL << control_qubit_index) + tail; // ONE<<target, ONE<<control basis11 = (head << (left + 1)) + (body << (right + 1)) + (1ULL << target_qubit_index) + (1ULL << control_qubit_index) + tail; tmp_psi = psi_gpu[basis11]; psi_gpu[basis11] = psi_gpu[basis10]; psi_gpu[basis10] = tmp_psi; } } __host__ void CNOT_gate_host(unsigned int control_qubit_index, unsigned int target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE quad_dim = dim >> 2; unsigned int block = quad_dim <= 1024 ? quad_dim : 1024; unsigned int grid = quad_dim / block; CNOT_gate_gpu << <grid, block, 0, *cuda_stream >> > (control_qubit_index, target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void SWAP_gate_gpu(unsigned int target_qubit_index0, unsigned int target_qubit_index1, GTYPE *state_gpu, ITYPE dim) { ITYPE head, body, tail; ITYPE basis01, basis10; GTYPE tmp; ITYPE quarter_dim = dim >> 2; ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; if (j < quarter_dim){ head = j >> (target_qubit_index1 - 1); body = (j & ((1ULL << (target_qubit_index1 - 1)) - 1)) >> target_qubit_index0; // (j % 2^(k-1)) >> i tail = j & ((1ULL << target_qubit_index0) - 1); // j%(2^i) basis01 = (head << (target_qubit_index1 + 1)) + (body << (target_qubit_index0 + 1)) + (1ULL << target_qubit_index0) + tail; basis10 = (head << (target_qubit_index1 + 1)) + (body << (target_qubit_index0 + 1)) + (1ULL << target_qubit_index1) + tail; tmp = state_gpu[basis01]; state_gpu[basis01] = state_gpu[basis10]; state_gpu[basis10] = tmp; } } __host__ void SWAP_gate_host(unsigned int target_qubit_index0, unsigned int target_qubit_index1, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; unsigned int large_index, small_index; ITYPE quad_dim = dim >> 2; unsigned int block = quad_dim <= 1024 ? quad_dim : 1024; unsigned int grid = quad_dim / block; if (target_qubit_index1 > target_qubit_index0) { large_index = target_qubit_index1; small_index = target_qubit_index0; } else { large_index = target_qubit_index0; small_index = target_qubit_index1; } SWAP_gate_gpu << <grid, block, 0, *cuda_stream >> > (small_index, large_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void P0_gate_gpu(UINT target_qubit_index, GTYPE *state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; const ITYPE loop_dim = dim>>1; ITYPE mask = (1ULL << target_qubit_index); if(state_index<loop_dim){ ITYPE tmp_index = insert_zero_to_basis_index_device(state_index, target_qubit_index) ^ mask; state_gpu[tmp_index] = make_cuDoubleComplex(0.0, 0.0); } } __host__ void P0_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; const ITYPE loop_dim = dim >> 1; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; P0_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void P1_gate_gpu(UINT target_qubit_index, GTYPE *state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; const ITYPE loop_dim = dim>>1; if(state_index<loop_dim){ ITYPE tmp_index = insert_zero_to_basis_index_device(state_index, target_qubit_index); state_gpu[tmp_index] = make_cuDoubleComplex(0.0, 0.0); } } __host__ void P1_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; const ITYPE loop_dim = dim >> 1; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; P1_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void normalize_gpu(const double normalize_factor, GTYPE *state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; const ITYPE loop_dim = dim; if(state_index<loop_dim){ state_gpu[state_index] = make_cuDoubleComplex( normalize_factor * cuCreal(state_gpu[state_index]), normalize_factor * cuCimag(state_gpu[state_index]) ); } } __host__ void normalize_host(double squared_norm, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; const ITYPE loop_dim = dim; const double normalize_factor = sqrt(1./squared_norm); //const double normalize_factor = 1. / norm; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; normalize_gpu << <grid, block, 0, *cuda_stream >> > (normalize_factor, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __host__ void RX_gate_host(UINT target_qubit_index, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number){ single_qubit_Pauli_rotation_gate_host(target_qubit_index, 1, angle, state, dim, stream, device_number); } __host__ void RY_gate_host(UINT target_qubit_index, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number){ single_qubit_Pauli_rotation_gate_host(target_qubit_index, 2, angle, state, dim, stream, device_number); } __host__ void RZ_gate_host(UINT target_qubit_index, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number){ single_qubit_Pauli_rotation_gate_host(target_qubit_index, 3, angle, state, dim, stream, device_number); } // [[1,0],[0,i]] __host__ void S_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE diagonal_matrix[2]; diagonal_matrix[0] = CPPCTYPE(1.0, 0.0); diagonal_matrix[1] = CPPCTYPE(0.0, 1.0); single_qubit_diagonal_matrix_gate_host(target_qubit_index, diagonal_matrix, state, dim, stream, device_number); } // [[1,0],[0,-i]] __host__ void Sdag_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE diagonal_matrix[2]; diagonal_matrix[0] = CPPCTYPE(1.0, 0.0); diagonal_matrix[1] = CPPCTYPE(0.0, -1.0); single_qubit_diagonal_matrix_gate_host(target_qubit_index, diagonal_matrix, state, dim, stream, device_number); } // [[1,0],[0,exp(i*pi/4)]] , (1+i)/sprt(2) __host__ void T_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE diagonal_matrix[2]; diagonal_matrix[0] = CPPCTYPE(1.0, 0.0); diagonal_matrix[1] = CPPCTYPE(1.0 / sqrt(2), 1.0 / sqrt(2)); single_qubit_diagonal_matrix_gate_host(target_qubit_index, diagonal_matrix, state, dim, stream, device_number); } // [[1,0],[0,-exp(i*pi/4)]], (1-i)/sqrt(2) __host__ void Tdag_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE diagonal_matrix[2]; diagonal_matrix[0] = CPPCTYPE(1.0, 0.0); diagonal_matrix[1] = CPPCTYPE(1.0 / sqrt(2), -1.0 / sqrt(2)); single_qubit_diagonal_matrix_gate_host(target_qubit_index, diagonal_matrix, state, dim, stream, device_number); } __host__ void sqrtX_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE SQRT_X_GATE_MATRIX[4] = { std::complex<double>(0.5, 0.5), std::complex<double>(0.5, -0.5), std::complex<double>(0.5, -0.5), std::complex<double>(0.5, 0.5) }; single_qubit_dense_matrix_gate_host(target_qubit_index, SQRT_X_GATE_MATRIX, state, dim, stream, device_number); } __host__ void sqrtXdag_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE SQRT_X_DAG_GATE_MATRIX[4] = { std::complex<double>(0.5, -0.5), std::complex<double>(0.5, 0.5), std::complex<double>(0.5, 0.5), std::complex<double>(0.5, -0.5) }; single_qubit_dense_matrix_gate_host(target_qubit_index, SQRT_X_DAG_GATE_MATRIX, state, dim, stream, device_number); } __host__ void sqrtY_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE SQRT_Y_GATE_MATRIX[4] = { std::complex<double>(0.5, 0.5), std::complex<double>(-0.5, -0.5), std::complex<double>(0.5, 0.5), std::complex<double>(0.5, 0.5) }; single_qubit_dense_matrix_gate_host(target_qubit_index, SQRT_Y_GATE_MATRIX, state, dim, stream, device_number); } __host__ void sqrtYdag_gate_host(UINT target_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { CPPCTYPE SQRT_Y_DAG_GATE_MATRIX[4] = { std::complex<double>(0.5, -0.5), std::complex<double>(0.5, -0.5), std::complex<double>(-0.5, 0.5), std::complex<double>(0.5, -0.5) }; single_qubit_dense_matrix_gate_host(target_qubit_index, SQRT_Y_DAG_GATE_MATRIX, state, dim, stream, device_number); }
the_stack
// includes, system #include <stdio.h> #include <stdlib.h> // includes, project #include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples #include <shrQATest.h> // This is for automated testing output (--qatest) #include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #include <cuda.h> //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); } checkCudaErrors( cudaSetDevice(devID) ); printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while (current_device < device_count) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameter\n "); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } return devID; } // end of CUDA Helper Functions /* Add two vectors on the GPU */ __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) c[idx] = a[idx] + b[idx]; } // Allocate generic memory with malloc() and pin it laster instead of using cudaHostAlloc() bool bPinGenericMemory = false; // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) int main(int argc, char **argv) { int n, nelem, deviceCount; int idev = 0; // use default device 0 char *device = NULL; unsigned int flags; size_t bytes; float *a, *b, *c; // Pinned memory allocated on the CPU float *a_UA, *b_UA, *c_UA; // Non-4K Aligned Pinned memory on the CPU float *d_a, *d_b, *d_c; // Device pointers for mapped memory float errorNorm, refNorm, ref, diff; cudaDeviceProp deviceProp; shrQAStart(argc, argv); if(checkCmdLineFlag(argc, (const char **)argv, "help")) { printf("Usage: simpleZeroCopy [OPTION]\n\n"); printf("Options:\n"); printf(" --device=[device #] Specify the device to be used\n"); printf(" --use_generic_memory (optional) use generic page-aligned for system memory\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); } /* Get the device selected by the user or default to 0, and then set it. */ if(getCmdLineArgumentString(argc, (const char**)argv, "device", &device)) { cudaGetDeviceCount(&deviceCount); idev = atoi(device); if(idev >= deviceCount || idev < 0) { fprintf(stderr, "Device number %d is invalid, will use default CUDA device 0.\n", idev); idev = 0; } } if( checkCmdLineFlag( argc, (const char **)argv, "use_generic_memory") ) { #if defined(__APPLE__) || defined(MACOSX) bPinGenericMemory = false; // Generic Pinning of System Paged memory is not currently supported on Mac OSX #else bPinGenericMemory = true; #endif } if (bPinGenericMemory) { printf("> Using Generic System Paged Memory (malloc)\n"); } else { printf("> Using CUDA Host Allocated (cudaHostAlloc)\n"); } checkCudaErrors(cudaSetDevice(idev)); /* Verify the selected device supports mapped memory and set the device flags for mapping host memory. */ checkCudaErrors(cudaGetDeviceProperties(&deviceProp, idev)); #if CUDART_VERSION >= 2020 if(!deviceProp.canMapHostMemory) { fprintf(stderr, "Device %d does not support mapping CPU host memory!\n", idev); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, QA_PASSED); } checkCudaErrors(cudaSetDeviceFlags(cudaDeviceMapHost)); #else fprintf(stderr, "CUDART version %d.%d does not support <cudaDeviceProp.canMapHostMemory> field\n", , CUDART_VERSION/1000, (CUDART_VERSION%100)/10); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, QA_PASSED); #endif #if CUDART_VERSION < 4000 if (bPinGenericMemory) { fprintf(stderr, "CUDART version %d.%d does not support <cudaHostRegister> function\n", CUDART_VERSION/1000, (CUDART_VERSION%100)/10); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, QA_PASSED); } #endif /* Allocate mapped CPU memory. */ nelem = 1048576; bytes = nelem*sizeof(float); if (bPinGenericMemory) { #if CUDART_VERSION >= 4000 a_UA = (float *) malloc( bytes + MEMORY_ALIGNMENT ); b_UA = (float *) malloc( bytes + MEMORY_ALIGNMENT ); c_UA = (float *) malloc( bytes + MEMORY_ALIGNMENT ); // We need to ensure memory is aligned to 4K (so we will need to padd memory accordingly) a = (float *) ALIGN_UP( a_UA, MEMORY_ALIGNMENT ); b = (float *) ALIGN_UP( b_UA, MEMORY_ALIGNMENT ); c = (float *) ALIGN_UP( c_UA, MEMORY_ALIGNMENT ); checkCudaErrors(cudaHostRegister(a, bytes, CU_MEMHOSTALLOC_DEVICEMAP)); checkCudaErrors(cudaHostRegister(b, bytes, CU_MEMHOSTALLOC_DEVICEMAP)); checkCudaErrors(cudaHostRegister(c, bytes, CU_MEMHOSTALLOC_DEVICEMAP)); #endif } else { #if CUDART_VERSION >= 2020 flags = cudaHostAllocMapped; checkCudaErrors(cudaHostAlloc((void **)&a, bytes, flags)); checkCudaErrors(cudaHostAlloc((void **)&b, bytes, flags)); checkCudaErrors(cudaHostAlloc((void **)&c, bytes, flags)); #endif } /* Initialize the vectors. */ for(n = 0; n < nelem; n++) { a[n] = rand() / (float)RAND_MAX; b[n] = rand() / (float)RAND_MAX; } /* Get the device pointers for the pinned CPU memory mapped into the GPU memory space. */ #if CUDART_VERSION >= 2020 checkCudaErrors(cudaHostGetDevicePointer((void **)&d_a, (void *)a, 0)); checkCudaErrors(cudaHostGetDevicePointer((void **)&d_b, (void *)b, 0)); checkCudaErrors(cudaHostGetDevicePointer((void **)&d_c, (void *)c, 0)); #endif /* Call the GPU kernel using the CPU pointers residing in CPU mapped memory. */ printf("> vectorAddGPU kernel will add vectors using mapped CPU memory...\n"); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); vectorAddGPU<<<grid, block>>>(d_a, d_b, d_c, nelem); checkCudaErrors(cudaDeviceSynchronize()); getLastCudaError("vectorAddGPU() execution failed"); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for(n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); /* Memory clean up */ printf("> Releasing CPU memory...\n"); if (bPinGenericMemory) { #if CUDART_VERSION >= 4000 checkCudaErrors(cudaHostUnregister(a)); checkCudaErrors(cudaHostUnregister(b)); checkCudaErrors(cudaHostUnregister(c)); free(a_UA); free(b_UA); free(c_UA); #endif } else { #if CUDART_VERSION >= 2020 checkCudaErrors(cudaFreeHost(a)); checkCudaErrors(cudaFreeHost(b)); checkCudaErrors(cudaFreeHost(c)); #endif } cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (errorNorm/refNorm < 1.e-6f) ? QA_PASSED : QA_FAILED); }
the_stack
Parallel reduction kernels */ #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #if 1 #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif #include <device_functions.h> /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n/2 threads - only works for power-of-2 arrays This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) See the CUDA SDK "reduction" sample for more information. */ template <unsigned int blockSize> __device__ void reduceBlock(float *sdata, const unsigned int tid) { // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32 && blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } EMUSYNC; if (tid < 16 && blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } EMUSYNC; if (tid < 8 && blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } EMUSYNC; if (tid < 4 && blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } EMUSYNC; if (tid < 2 && blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } EMUSYNC; if (tid < 1 && blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } EMUSYNC; } template <unsigned int blockSize, bool nIsPow2> __device__ void reduceBlocks(const float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { sdata[tid] += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) sdata[tid] += g_idata[i+blockSize]; i += gridSize; } __syncthreads(); // do reduction in shared mem reduceBlock<blockSize>(sdata, tid); // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n) { reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); } // Global variable used by reduceSinglePass to count how many blocks have finished __device__ unsigned int retirementCount = 0; // This reduction kernel reduces an arbitrary size array in a single kernel invocation // It does so by keeping track of how many blocks have finished. After each thread // block completes the reduction of its own block of data, it "takes a ticket" by // atomically incrementing a global counter. If the ticket value is equal to the number // of thread blocks, then the block holding the ticket knows that it is the last block // to finish. This last block is responsible for summing the results of all the other // blocks. // // In order for this to work, we must be sure that before a block takes a ticket, all // of its memory transactions have completed. This is what __threadfence() does -- it // blocks until the results of all outstanding memory transactions within the // calling thread are visible to all other threads. // // For more details on the reduction algorithm (notably the multi-pass approach), see // the "reduction" sample in the CUDA SDK. template <unsigned int blockSize, bool nIsPow2> __global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n) { // // PHASE 1: Process all inputs assigned to this block // reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); // // PHASE 2: Last block finished will process all partial sums // if (gridDim.x > 1) { const unsigned int tid = threadIdx.x; __shared__ bool amLast; extern float __shared__ smem[]; // wait until all outstanding memory instructions in this thread are finished __threadfence(); // Thread 0 takes a ticket if( tid==0 ) { unsigned int ticket = atomicInc(&retirementCount, gridDim.x); // If the ticket ID is equal to the number of blocks, we are the last block! amLast = (ticket == gridDim.x-1); } __syncthreads(); // The last block sums the results of all other blocks if( amLast ) { // load block results back into shared memory smem[tid] = (tid < gridDim.x) ? g_odata[tid] : 0; __syncthreads(); reduceBlock<blockSize>(smem, tid); if( tid==0 ) { g_odata[0] = smem[0]; // reset retirement count so that next run succeeds retirementCount = 0; } } } } bool isPow2(unsigned int x) { return ((x&(x-1))==0); } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// extern "C" void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(float); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: reduceMultiPass<512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceMultiPass<256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceMultiPass<128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceMultiPass< 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceMultiPass< 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceMultiPass< 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceMultiPass< 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceMultiPass< 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceMultiPass< 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceMultiPass< 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: reduceMultiPass<512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceMultiPass<256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceMultiPass<128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceMultiPass< 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceMultiPass< 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceMultiPass< 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceMultiPass< 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceMultiPass< 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceMultiPass< 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceMultiPass< 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } } extern "C" void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(float); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: reduceSinglePass<512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceSinglePass<256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceSinglePass<128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceSinglePass< 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceSinglePass< 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceSinglePass< 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceSinglePass< 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceSinglePass< 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceSinglePass< 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceSinglePass< 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: reduceSinglePass<512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceSinglePass<256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceSinglePass<128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceSinglePass< 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceSinglePass< 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceSinglePass< 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceSinglePass< 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceSinglePass< 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceSinglePass< 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceSinglePass< 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } } #endif // #ifndef _REDUCE_KERNEL_H_
the_stack
static const uint64_t old1_T0[256] = { 0x78D8C07818281818, 0xAF2605AF23652323, 0xF9B87EF9C657C6C6, 0x6FFB136FE825E8E8, 0xA1CB4CA187948787, 0x6211A962B8D5B8B8, 0x0509080501030101, 0x6E0D426E4FD14F4F, 0xEE9BADEE365A3636, 0x04FF5904A6F7A6A6, 0xBD0CDEBDD26BD2D2, 0x060EFB06F502F5F5, 0x8096EF80798B7979, 0xCE305FCE6FB16F6F, 0xEF6DFCEF91AE9191, 0x07F8AA0752F65252, 0xFD4727FD60A06060, 0x76358976BCD9BCBC, 0xCD37ACCD9BB09B9B, 0x8C8A048C8E8F8E8E, 0x15D27115A3F8A3A3, 0x3C6C603C0C140C0C, 0x8A84FF8A7B8D7B7B, 0xE180B5E1355F3535, 0x69F5E8691D271D1D, 0x47B35347E03DE0E0, 0xAC21F6ACD764D7D7, 0xED9C5EEDC25BC2C2, 0x96436D962E722E2E, 0x7A29627A4BDD4B4B, 0x215DA321FE1FFEFE, 0x16D5821657F95757, 0x41BDA841153F1515, 0xB6E89FB677997777, 0xEB92A5EB37593737, 0x569E7B56E532E5E5, 0xD9138CD99FBC9F9F, 0x1723D317F00DF0F0, 0x7F206A7F4ADE4A4A, 0x95449E95DA73DADA, 0x25A2FA2558E85858, 0xCACF06CAC946C9C9, 0x8D7C558D297B2929, 0x225A50220A1E0A0A, 0x4F50E14FB1CEB1B1, 0x1AC9691AA0FDA0A0, 0xDA147FDA6BBD6B6B, 0xABD95CAB85928585, 0x733C8173BDDABDBD, 0x348FD2345DE75D5D, 0x5090805010301010, 0x0307F303F401F4F4, 0xC0DD16C0CB40CBCB, 0xC6D3EDC63E423E3E, 0x112D2811050F0505, 0xE6781FE667A96767, 0x53977353E431E4E4, 0xBB0225BB27692727, 0x5873325841C34141, 0x9DA72C9D8B808B8B, 0x01F65101A7F4A7A7, 0x94B2CF947D877D7D, 0xFB49DCFB95A29595, 0x9F568E9FD875D8D8, 0x30708B30FB10FBFB, 0x71CD2371EE2FEEEE, 0x91BBC7917C847C7C, 0xE37117E366AA6666, 0x8E7BA68EDD7ADDDD, 0x4BAFB84B17391717, 0x4645024647C94747, 0xDC1A84DC9EBF9E9E, 0xC5D41EC5CA43CACA, 0x995875992D772D2D, 0x792E9179BFDCBFBF, 0x1B3F381B07090707, 0x23AC0123ADEAADAD, 0x2FB0EA2F5AEE5A5A, 0xB5EF6CB583988383, 0xFFB685FF33553333, 0xF25C3FF263A56363, 0x0A12100A02060202, 0x38933938AAE3AAAA, 0xA8DEAFA871937171, 0xCFC60ECFC845C8C8, 0x7DD1C87D192B1919, 0x703B727049DB4949, 0x9A5F869AD976D9D9, 0x1D31C31DF20BF2F2, 0x48A84B48E338E3E3, 0x2AB9E22A5BED5B5B, 0x92BC349288858888, 0xC83EA4C89AB39A9A, 0xBE0B2DBE266A2626, 0xFABF8DFA32563232, 0x4A59E94AB0CDB0B0, 0x6AF21B6AE926E9E9, 0x337778330F110F0F, 0xA633E6A6D562D5D5, 0xBAF474BA809D8080, 0x7C27997CBEDFBEBE, 0xDEEB26DECD4ACDCD, 0xE489BDE4345C3434, 0x75327A7548D84848, 0x2454AB24FF1CFFFF, 0x8F8DF78F7A8E7A7A, 0xEA64F4EA90AD9090, 0x3E9DC23E5FE15F5F, 0xA03D1DA020602020, 0xD50F67D568B86868, 0x72CAD0721A2E1A1A, 0x2CB7192CAEEFAEAE, 0x5E7DC95EB4C1B4B4, 0x19CE9A1954FC5454, 0xE57FECE593A89393, 0xAA2F0DAA22662222, 0xE96307E964AC6464, 0x122ADB12F10EF1F1, 0xA2CCBFA273957373, 0x5A82905A12361212, 0x5D7A3A5D40C04040, 0x2848402808180808, 0xE89556E8C358C3C3, 0x7BDF337BEC29ECEC, 0x904D9690DB70DBDB, 0x1FC0611FA1FEA1A1, 0x83911C838D8A8D8D, 0xC9C8F5C93D473D3D, 0xF15BCCF197A49797, 0x0000000000000000, 0xD4F936D4CF4CCFCF, 0x876E45872B7D2B2B, 0xB3E197B3769A7676, 0xB0E664B0829B8282, 0xA928FEA9D667D6D6, 0x77C3D8771B2D1B1B, 0x5B74C15BB5C2B5B5, 0x29BE1129AFECAFAF, 0xDF1D77DF6ABE6A6A, 0x0DEABA0D50F05050, 0x4C57124C45CF4545, 0x1838CB18F308F3F3, 0xF0AD9DF030503030, 0x74C42B74EF2CEFEF, 0xC3DAE5C33F413F3F, 0x1CC7921C55FF5555, 0x10DB7910A2FBA2A2, 0x65E90365EA23EAEA, 0xEC6A0FEC65AF6565, 0x6803B968BAD3BABA, 0x934A65932F712F2F, 0xE78E4EE7C05DC0C0, 0x8160BE81DE7FDEDE, 0x6CFCE06C1C241C1C, 0x2E46BB2EFD1AFDFD, 0x641F52644DD74D4D, 0xE076E4E092AB9292, 0xBCFA8FBC759F7575, 0x1E36301E060A0606, 0x98AE24988A838A8A, 0x404BF940B2CBB2B2, 0x59856359E637E6E6, 0x367E70360E120E0E, 0x63E7F8631F211F1F, 0xF75537F762A66262, 0xA33AEEA3D461D4D4, 0x32812932A8E5A8A8, 0xF452C4F496A79696, 0x3A629B3AF916F9F9, 0xF6A366F6C552C5C5, 0xB11035B1256F2525, 0x20ABF22059EB5959, 0xAED054AE84918484, 0xA7C5B7A772967272, 0xDDECD5DD394B3939, 0x61165A614CD44C4C, 0x3B94CA3B5EE25E5E, 0x859FE78578887878, 0xD8E5DDD838483838, 0x869814868C898C8C, 0xB217C6B2D16ED1D1, 0x0BE4410BA5F2A5A5, 0x4DA1434DE23BE2E2, 0xF84E2FF861A36161, 0x4542F145B3C8B3B3, 0xA53415A521632121, 0xD60894D69CB99C9C, 0x66EEF0661E221E1E, 0x5261225243C54343, 0xFCB176FCC754C7C7, 0x2B4FB32BFC19FCFC, 0x14242014040C0404, 0x08E3B20851F35151, 0xC725BCC799B69999, 0xC4224FC46DB76D6D, 0x396568390D170D0D, 0x35798335FA13FAFA, 0x8469B684DF7CDFDF, 0x9BA9D79B7E827E7E, 0xB4193DB4246C2424, 0xD7FEC5D73B4D3B3B, 0x3D9A313DABE0ABAB, 0xD1F03ED1CE4FCECE, 0x5599885511331111, 0x89830C898F8C8F8F, 0x6B044A6B4ED24E4E, 0x5166D151B7C4B7B7, 0x60E00B60EB20EBEB, 0xCCC1FDCC3C443C3C, 0xBFFD7CBF819E8181, 0xFE40D4FE94A19494, 0x0C1CEB0CF704F7F7, 0x6718A167B9D6B9B9, 0x5F8B985F13351313, 0x9C517D9C2C742C2C, 0xB805D6B8D368D3D3, 0x5C8C6B5CE734E7E7, 0xCB3957CB6EB26E6E, 0xF3AA6EF3C451C4C4, 0x0F1B180F03050303, 0x13DC8A1356FA5656, 0x495E1A4944CC4444, 0x9EA0DF9E7F817F7F, 0x37882137A9E6A9A9, 0x82674D822A7E2A2A, 0x6D0AB16DBBD0BBBB, 0xE28746E2C15EC1C1, 0x02F1A20253F55353, 0x8B72AE8BDC79DCDC, 0x275358270B1D0B0B, 0xD3019CD39DBA9D9D, 0xC12B47C16CB46C6C, 0xF5A495F531533131, 0xB9F387B9749C7474, 0x0915E309F607F6F6, 0x434C0A4346CA4646, 0x26A50926ACE9ACAC, 0x97B53C9789868989, 0x44B4A044143C1414, 0x42BA5B42E13EE1E1, 0x4EA6B04E163A1616, 0xD2F7CDD23A4E3A3A, 0xD0066FD069BB6969, 0x2D41482D091B0909, 0xADD7A7AD70907070, 0x546FD954B6C7B6B6, 0xB71ECEB7D06DD0D0, 0x7ED63B7EED2AEDED, 0xDBE22EDBCC49CCCC, 0x57682A5742C64242, 0xC22CB4C298B59898, 0x0EED490EA4F1A4A4, 0x88755D8828782828, 0x3186DA315CE45C5C, 0x3F6B933FF815F8F8, 0xA4C244A486978686 }; static const uint64_t old1_RC[10] = { 0x4F01B887E8C62318, 0x52916F79F5D2A636, 0x357B0CA38E9BBC60, 0x57FE4B2EC2D7E01D, 0xDA4AF09FE5377715, 0x856BA0B10A29C958, 0x67053ECBF4105DBD, 0xD8957DA78B4127E4, 0x9E4717DD667CEEFB, 0x33835AAD07BF2DCA }; static const uint64_t old1_precomputed_round_key_64[72] = { 0xECE7FCC7F808AB3B, 0x44E9CB45024545CB, 0xB289A43CA4A489FE, 0xC5E1F3E1E1C5A9A0, 0xFCD4FCFCAC5C48AC, 0x418F8F0E90F70E8F, 0x7979078514077946, 0xF8B87868B8F8D8F8, 0xE4B6012A17B9C1EF, 0xAFF6AD2866D6C68D, 0xB0C745991504968F, 0x74E93F120FE2E675, 0xC1D216198D2A495B, 0x35B6A53DF6716FD7, 0xD6983228CCDC354F, 0xC3B8BFCB242AB159, 0x4AC0340260B548D4, 0x3B3582BB4F9BB769, 0xD8683C4AF17C46F8, 0x9EE05220D8214E61, 0xFED2431F5098E0E5, 0x387AF50F0776E24F, 0x7FFD300A74DE5AE1, 0xB753981921AEB24B, 0x296F5919978BA0BD, 0x52CC9DD7031E59AC, 0xE5E646C9A5165AE3, 0x4F848BDF70532817, 0xA01ADD556B693C51, 0xC7268B83DDB75E97, 0xCB908F9316E2C076, 0x8FC39EE0D946E9C5, 0x5B256B0FE7937D86, 0xF77C51751A22932C, 0x2582A83CD34108C4, 0xE218CDE28A9C790D, 0xC93A572E592E3594, 0xEA70CF71BC4E55AC, 0xC135C8214155BC85, 0x50989643A6B8456E, 0x60E970DFD74C71E6, 0xC0B5A520AB7C88D8, 0x3A66AA761D5B1400, 0x0502AB3087300DE6, 0xC06203EDED483DEA, 0xC2620EDF55C1CB74, 0xF36A22CF9AA452F1, 0xB3502DC83BA2660F, 0xCF249B6DB82273C3, 0xC74DADAB026388F2, 0xB8B77B3275AFCDE8, 0xC9947382C6D6A3C0, 0x84938F2258A6BC21, 0x1E51E15A3B99CDF7, 0xC812F9AC41F5CC05, 0x21BFEC61E9B9393E, 0xF576066160243540, 0x3A62D1CB6404180D, 0x8807A55C2AC7AFE2, 0x804237B54859503E, 0x1619B3612106744B, 0xC1ECB5643D81C76F, 0xBA7CBB8C13214C6C, 0xD241AEAD7622701E, 0xDD900A1B66BF748C, 0xCACCF665EC2391FE, 0xF9BED90100B89447, 0x4CF5D284E56B7A0F, 0x003EB289B6993F96, 0xE9DB01146199245D, 0x97701270F3F41CCB, 0x9C8CA117E01E4B49 }; //---------------------------------------------------------------------------------------------------------- static const uint64_t plain_T0[256] = { 0xD83078C018601818, 0x2646AF05238C2323, 0xB891F97EC63FC6C6, 0xFBCD6F13E887E8E8, 0xCB13A14C87268787, 0x116D62A9B8DAB8B8, 0x0902050801040101, 0x0D9E6E424F214F4F, 0x9B6CEEAD36D83636, 0xFF510459A6A2A6A6, 0x0CB9BDDED26FD2D2, 0x0EF706FBF5F3F5F5, 0x96F280EF79F97979, 0x30DECE5F6FA16F6F, 0x6D3FEFFC917E9191, 0xF8A407AA52555252, 0x47C0FD27609D6060, 0x35657689BCCABCBC, 0x372BCDAC9B569B9B, 0x8A018C048E028E8E, 0xD25B1571A3B6A3A3, 0x6C183C600C300C0C, 0x84F68AFF7BF17B7B, 0x806AE1B535D43535, 0xF53A69E81D741D1D, 0xB3DD4753E0A7E0E0, 0x21B3ACF6D77BD7D7, 0x9C99ED5EC22FC2C2, 0x435C966D2EB82E2E, 0x29967A624B314B4B, 0x5DE121A3FEDFFEFE, 0xD5AE168257415757, 0xBD2A41A815541515, 0xE8EEB69F77C17777, 0x926EEBA537DC3737, 0x9ED7567BE5B3E5E5, 0x1323D98C9F469F9F, 0x23FD17D3F0E7F0F0, 0x20947F6A4A354A4A, 0x44A9959EDA4FDADA, 0xA2B025FA587D5858, 0xCF8FCA06C903C9C9, 0x7C528D5529A42929, 0x5A1422500A280A0A, 0x507F4FE1B1FEB1B1, 0xC95D1A69A0BAA0A0, 0x14D6DA7F6BB16B6B, 0xD917AB5C852E8585, 0x3C677381BDCEBDBD, 0x8FBA34D25D695D5D, 0x9020508010401010, 0x07F503F3F4F7F4F4, 0xDD8BC016CB0BCBCB, 0xD37CC6ED3EF83E3E, 0x2D0A112805140505, 0x78CEE61F67816767, 0x97D55373E4B7E4E4, 0x024EBB25279C2727, 0x7382583241194141, 0xA70B9D2C8B168B8B, 0xF6530151A7A6A7A7, 0xB2FA94CF7DE97D7D, 0x4937FBDC956E9595, 0x56AD9F8ED847D8D8, 0x70EB308BFBCBFBFB, 0xCDC17123EE9FEEEE, 0xBBF891C77CED7C7C, 0x71CCE31766856666, 0x7BA78EA6DD53DDDD, 0xAF2E4BB8175C1717, 0x458E460247014747, 0x1A21DC849E429E9E, 0xD489C51ECA0FCACA, 0x585A99752DB42D2D, 0x2E637991BFC6BFBF, 0x3F0E1B38071C0707, 0xAC472301AD8EADAD, 0xB0B42FEA5A755A5A, 0xEF1BB56C83368383, 0xB666FF8533CC3333, 0x5CC6F23F63916363, 0x12040A1002080202, 0x93493839AA92AAAA, 0xDEE2A8AF71D97171, 0xC68DCF0EC807C8C8, 0xD1327DC819641919, 0x3B92707249394949, 0x5FAF9A86D943D9D9, 0x31F91DC3F2EFF2F2, 0xA8DB484BE3ABE3E3, 0xB9B62AE25B715B5B, 0xBC0D9234881A8888, 0x3E29C8A49A529A9A, 0x0B4CBE2D26982626, 0xBF64FA8D32C83232, 0x597D4AE9B0FAB0B0, 0xF2CF6A1BE983E9E9, 0x771E33780F3C0F0F, 0x33B7A6E6D573D5D5, 0xF41DBA74803A8080, 0x27617C99BEC2BEBE, 0xEB87DE26CD13CDCD, 0x8968E4BD34D03434, 0x3290757A483D4848, 0x54E324ABFFDBFFFF, 0x8DF48FF77AF57A7A, 0x643DEAF4907A9090, 0x9DBE3EC25F615F5F, 0x3D40A01D20802020, 0x0FD0D56768BD6868, 0xCA3472D01A681A1A, 0xB7412C19AE82AEAE, 0x7D755EC9B4EAB4B4, 0xCEA8199A544D5454, 0x7F3BE5EC93769393, 0x2F44AA0D22882222, 0x63C8E907648D6464, 0x2AFF12DBF1E3F1F1, 0xCCE6A2BF73D17373, 0x82245A9012481212, 0x7A805D3A401D4040, 0x4810284008200808, 0x959BE856C32BC3C3, 0xDFC57B33EC97ECEC, 0x4DAB9096DB4BDBDB, 0xC05F1F61A1BEA1A1, 0x9107831C8D0E8D8D, 0xC87AC9F53DF43D3D, 0x5B33F1CC97669797, 0x0000000000000000, 0xF983D436CF1BCFCF, 0x6E5687452BAC2B2B, 0xE1ECB39776C57676, 0xE619B06482328282, 0x28B1A9FED67FD6D6, 0xC33677D81B6C1B1B, 0x74775BC1B5EEB5B5, 0xBE432911AF86AFAF, 0x1DD4DF776AB56A6A, 0xEAA00DBA505D5050, 0x578A4C1245094545, 0x38FB18CBF3EBF3F3, 0xAD60F09D30C03030, 0xC4C3742BEF9BEFEF, 0xDA7EC3E53FFC3F3F, 0xC7AA1C9255495555, 0xDB591079A2B2A2A2, 0xE9C96503EA8FEAEA, 0x6ACAEC0F65896565, 0x036968B9BAD2BABA, 0x4A5E93652FBC2F2F, 0x8E9DE74EC027C0C0, 0x60A181BEDE5FDEDE, 0xFC386CE01C701C1C, 0x46E72EBBFDD3FDFD, 0x1F9A64524D294D4D, 0x7639E0E492729292, 0xFAEABC8F75C97575, 0x360C1E3006180606, 0xAE0998248A128A8A, 0x4B7940F9B2F2B2B2, 0x85D15963E6BFE6E6, 0x7E1C36700E380E0E, 0xE73E63F81F7C1F1F, 0x55C4F73762956262, 0x3AB5A3EED477D4D4, 0x814D3229A89AA8A8, 0x5231F4C496629696, 0x62EF3A9BF9C3F9F9, 0xA397F666C533C5C5, 0x104AB13525942525, 0xABB220F259795959, 0xD015AE54842A8484, 0xC5E4A7B772D57272, 0xEC72DDD539E43939, 0x1698615A4C2D4C4C, 0x94BC3BCA5E655E5E, 0x9FF085E778FD7878, 0xE570D8DD38E03838, 0x980586148C0A8C8C, 0x17BFB2C6D163D1D1, 0xE4570B41A5AEA5A5, 0xA1D94D43E2AFE2E2, 0x4EC2F82F61996161, 0x427B45F1B3F6B3B3, 0x3442A51521842121, 0x0825D6949C4A9C9C, 0xEE3C66F01E781E1E, 0x6186522243114343, 0xB193FC76C73BC7C7, 0x4FE52BB3FCD7FCFC, 0x2408142004100404, 0xE3A208B251595151, 0x252FC7BC995E9999, 0x22DAC44F6DA96D6D, 0x651A39680D340D0D, 0x79E93583FACFFAFA, 0x69A384B6DF5BDFDF, 0xA9FC9BD77EE57E7E, 0x1948B43D24902424, 0xFE76D7C53BEC3B3B, 0x9A4B3D31AB96ABAB, 0xF081D13ECE1FCECE, 0x9922558811441111, 0x8303890C8F068F8F, 0x049C6B4A4E254E4E, 0x667351D1B7E6B7B7, 0xE0CB600BEB8BEBEB, 0xC178CCFD3CF03C3C, 0xFD1FBF7C813E8181, 0x4035FED4946A9494, 0x1CF30CEBF7FBF7F7, 0x186F67A1B9DEB9B9, 0x8B265F98134C1313, 0x51589C7D2CB02C2C, 0x05BBB8D6D36BD3D3, 0x8CD35C6BE7BBE7E7, 0x39DCCB576EA56E6E, 0xAA95F36EC437C4C4, 0x1B060F18030C0303, 0xDCAC138A56455656, 0x5E88491A440D4444, 0xA0FE9EDF7FE17F7F, 0x884F3721A99EA9A9, 0x6754824D2AA82A2A, 0x0A6B6DB1BBD6BBBB, 0x879FE246C123C1C1, 0xF1A602A253515353, 0x72A58BAEDC57DCDC, 0x531627580B2C0B0B, 0x0127D39C9D4E9D9D, 0x2BD8C1476CAD6C6C, 0xA462F59531C43131, 0xF3E8B98774CD7474, 0x15F109E3F6FFF6F6, 0x4C8C430A46054646, 0xA5452609AC8AACAC, 0xB50F973C891E8989, 0xB42844A014501414, 0xBADF425BE1A3E1E1, 0xA62C4EB016581616, 0xF774D2CD3AE83A3A, 0x06D2D06F69B96969, 0x41122D4809240909, 0xD7E0ADA770DD7070, 0x6F7154D9B6E2B6B6, 0x1EBDB7CED067D0D0, 0xD6C77E3BED93EDED, 0xE285DB2ECC17CCCC, 0x6884572A42154242, 0x2C2DC2B4985A9898, 0xED550E49A4AAA4A4, 0x7550885D28A02828, 0x86B831DA5C6D5C5C, 0x6BED3F93F8C7F8F8, 0xC211A44486228686 }; static const uint64_t plain_RC[10] = { 0x4F01B887E8C62318, 0x52916F79F5D2A636, 0x357B0CA38E9BBC60, 0x57FE4B2EC2D7E01D, 0xDA4AF09FE5377715, 0x856BA0B10A29C958, 0x67053ECBF4105DBD, 0xD8957DA78B4127E4, 0x9E4717DD667CEEFB, 0x33835AAD07BF2DCA }; static const uint64_t plain_precomputed_round_key_64[72] = { 0x24AED1EAF889AB3B, 0xAFCBE94566454544, 0x89B2A4C5A4A4FE70, 0xA0E1CCE1E1A9FAC5, 0xFCB8FCFC5CC0AC48, 0x698F8F90260EF78F, 0x797985D707147996, 0xF878C8B868F8A8F8, 0x58704630DBBF19D3, 0xDB37CFAFD1235B29, 0x98AC958BC28A2C01, 0xA706B2C0B19E6381, 0xDB09B2B07A605E44, 0x71BC8CBCCF2C5B73, 0xD3DDEDEF240967DC, 0x197D3BD7F03B8D7B, 0x866511DEC1AABE38, 0x7F33874AD0F37C68, 0x57F0AD98DBFA37F3, 0xBC8D35EE5842E2C5, 0x7E246E99E8F00911, 0x0134B010EDD6C501, 0xD3EC287BF152C9FB, 0x4027F1C70CDC5632, 0x14CF9B9420A525AF, 0x4D53C4E3A92636C1, 0xE1F94077867D0FE6, 0x29066AE2BBE65D91, 0x8D5EFE4CCC545A96, 0xA63A3262CB31E9BE, 0x476A849618597BB1, 0x31AF592736C9F0D4, 0xB00B3725C0B5F9E2, 0xA5948416A2CB2B39, 0x148C34FACEF88A60, 0x19928C416437A57A, 0x893F83FAA146F3B3, 0x7CCF0278483F4997, 0x238F001EBAE8ADDC, 0x3D32B0ED494F7792, 0x2FFF4D7782634175, 0x00460355D038FAFF, 0x61F3983E49027DBF, 0x0BCEE59AC260A8F4, 0x279D5DEE445ADFC8, 0xA4007504555AF423, 0x8CE2F902121016B0, 0x1D33336829CD30AC, 0x89AD846882F16B03, 0x637146D862C64099, 0x10C2194B173E434C, 0xC586FF4CD3CF9CE2, 0x5326DF42A011FF21, 0x134BE46CCB008E1B, 0xCEB747A3F73B12A6, 0xCA33283B0E9018D9, 0xF92C9A0A7A671CD0, 0xB2B6634A532F942A, 0xB4A8ACFE46224288, 0x5935583DC75C4A47, 0xA16F5CA55D92A674, 0x395C73C48CE61777, 0xC61AEC530B3B2A08, 0x62E74D81EB58F62A, 0x3ABCEE01B6489548, 0x818EED6BC66B0DA5, 0x755A2688CF3DCEE0, 0xE99CF6C0DB4A8CC2, 0x1385717FD59CB754, 0x7B0B7D978A4B4143, 0x7A15F6DBBB351963, 0x27820137F64E7A6A };
the_stack
#include "decode.hpp" using namespace std; #define B_ELEM_PT 16 // each thread process 16 var. firstly #define B_ELEM_BITSHIFT 4 // log2(B_ELEM_PT) #define B_THREADS_PER_BLOCK 64 #define B_BLOCK_VAR_NUMS (B_ELEM_PT*B_THREADS_PER_BLOCK) template <typename T> __forceinline__ __device__ void compAndSwapIndices(T* data, size_t* indices, const size_t i, const size_t j, const bool dir) { if (dir == (data[i] > data[j])) { T tmp = data[i]; data[i] = data[j]; data[j] = tmp; size_t idx = indices[i]; indices[i] = indices[j]; indices[j] = idx; } } template <typename T1, typename T2> __forceinline__ __device__ void set_data(T1& , T2); template <> __forceinline__ __device__ void set_data(float& data, double value) { data = static_cast<float>(value); } template <> __forceinline__ __device__ void set_data(double& data, double value) { data = static_cast<double>(value); } template <> __forceinline__ __device__ void set_data(int& data, double value) { data = static_cast<int>(value); } template <> __forceinline__ __device__ void set_data(Pair<float, size_t>& data, double value) { data.k = static_cast<float>(value); data.v = INT_MAX; } #define B2GI(x, k, i) {compAndSwapIndices(x, k, i, i+1, ascending);} #define B4GI(x, k, i) { for(int j = 0; j < 2; ++j) { compAndSwapIndices(x, k, i+j, j+i+2, ascending); } \ B2GI(x, k, i) B2GI(x, k, i+2) } #define B8GI(x,k, i) { for(int j = 0; j < 4; ++j) { compAndSwapIndices(x, k, i+j, i+j+4, ascending); } \ B4GI(x, k, i) B4GI(x, k, i+4) } #define B16GI(x,k, i) { for(int j = 0; j < 8; ++j) { compAndSwapIndices(x, k, i+j, i+j+8, ascending); } \ B8GI(x, k, i) B8GI(x, k, i+8) } #define B32GI(x,k, i) { for(int j = 0; j < 16; ++j) { compAndSwapIndices(x, k, i+j, i+j+16, ascending); } \ B16GI(x, k, i) B16GI(x, k, i+16) } #define B64GI(x, k, i) { for(int j = 0; j < 32; ++j) { compAndSwapIndices(x, k, i+j, i+j+32, ascending);} \ B32GI(x, k, i) B32GI(x, k, i+32) } #define B128GI(x, k, i) { for(int j = 0; j < 64; ++j) compAndSwapIndices(x, k, i+j, i+j+64, ascending);\ B64GI(x, k, i) B64GI(x, k, i+64) } #define B256GI(x, k, i) { for(int j = 0; j < 128; ++j) compAndSwapIndices(x, k, i+j, i+j+128, ascending);\ B128GI(x, k, i) B128GI(x, k, i+128) } template <typename T> __global__ void bitonicLocalSortIndices(T* data, size_t* indices, const int batch_num, const size_t slice_len, const size_t padding_len, const int K) { const int tid = threadIdx.x; const int gid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ T smem[B_BLOCK_VAR_NUMS]; __shared__ size_t sind[B_BLOCK_VAR_NUMS]; int t_addr = tid << B_ELEM_BITSHIFT; int g_addr = gid << B_ELEM_BITSHIFT; bool ascending; // we firt read the global mem. to the buffer. T rx[B_ELEM_PT]; size_t ri[B_ELEM_PT]; for(int i = 0; i < B_ELEM_PT; ++i) { size_t index = (g_addr+i) % padding_len; if (index < slice_len ) { rx[i] = data[g_addr + i]; ri[i] = index; } else { rx[i] = INT_MIN*1.0; ri[i] = INT_MAX; } } ascending = false; for(int i = 0; i < B_ELEM_PT; i += 2) { B2GI(rx, ri, i); ascending ^= 1; } for(int i = 0; i < B_ELEM_PT; i += 4) { B4GI(rx, ri, i); ascending ^= 1; } for(int i = 0; i < B_ELEM_PT; i += 8) { B8GI(rx, ri, i); ascending ^= 1; } // write to the shared memory for(int i = 0; i < B_ELEM_PT; ++i) { smem[t_addr + i] = rx[i]; sind[t_addr + i] = ri[i]; } __syncthreads(); T* sdata = smem + t_addr; size_t * sidx = sind + t_addr; ascending = tid & 1; B16GI(sdata, sidx, 0); __syncthreads(); if (tid % 2 == 0 ) { ascending = (tid >> 1) & 1; B32GI(sdata, sidx, 0); } __syncthreads(); if (tid % 4 == 0 ) { ascending = (tid >> 2) & 1; B64GI(sdata, sidx, 0); } __syncthreads(); if (tid % 8 == 0 ) { ascending = (tid >> 3) & 1; B128GI(sdata, sidx, 0); } __syncthreads(); /// merge down-up-down-up-down-up-down-up /// down-up-down-up /// down-up /// down int seg_num = blockDim.x * B_ELEM_PT / K; size_t lo, hi; for( ; seg_num > 1; ) { if (tid < seg_num>> 1) { lo = K * tid; hi = (seg_num - 1 - tid) * K; for(int j = 0; j < K; ++j) compAndSwapIndices(smem, sind, lo + j, hi + j, false); ascending = tid & 1; B128GI(smem, sind, lo); } seg_num >>= 1; __syncthreads(); } for(int i = 0; i < B_ELEM_PT; ++i) { data[gid*B_ELEM_PT + i] = sdata[i]; indices[gid*B_ELEM_PT+i] = sidx[i]; } } template <typename T> __device__ void topk_merge_two_blocks(T* left, size_t* i_left, T* right, size_t* i_right, const int K) { int i, j; T tmp; size_t i_tmp; if (left[K-1] > right[0]) return; if (left[0] < right[K-1]) { for(i = 0; i < K; ++i) { left[i] = right[i]; i_left[i] = i_right[i]; } return; } for( i = 0; i < K; ++i ) { if (left[i] > right[0]) continue; tmp = left[i]; i_tmp = i_left[i]; left[i] = right[0]; i_left[i] = i_right[0]; for(j = 1; j < K; ++j) { if (tmp < right[j]) { right[j-1] = right[j]; i_right[j-1] = i_right[j]; } else { right[j-1] = tmp; i_right[j-1] = i_tmp; break; } } } } template <typename T> __global__ void topk_reduce_blocks(T* data, size_t* indices, const size_t num_per_block, const int blocks_per_batch, const int padding_blocks_per_batch, const int batch_num, const int K, const int power_k) { const size_t tid = threadIdx.x; const size_t gid = threadIdx.x + blockIdx.x * blockDim.x * 2; const size_t batch_id = gid / padding_blocks_per_batch; const size_t local_id = gid % padding_blocks_per_batch; if(local_id >= blocks_per_batch) return; size_t l_shift = batch_id * blocks_per_batch + local_id; size_t r_shift; int half, threads = blockDim.x * 2; const bool ascending = false; /// merge within a block while (threads > 1 && tid < (threads >> 1)) { half = (threads>>1) ; if (tid + half < blocks_per_batch) { // valid mem. access r_shift = l_shift + half; /// topk_merge_two_blocks<T>(data + num_per_block * l_shift, indices + num_per_block * l_shift, \ data + num_per_block * r_shift, indices + num_per_block * r_shift, K); /// like bitonic sort, this is faster than topk_merge_two_blocks for(int i = 0; i < power_k; ++i) { if(data[num_per_block*l_shift + i] < data[power_k + num_per_block*r_shift - 1 - i]) { data[num_per_block*l_shift+i] = data[power_k + num_per_block*r_shift - 1 -i]; indices[num_per_block*l_shift+i] = indices[power_k + num_per_block*r_shift-1-i]; } } B128GI(data + num_per_block * l_shift, indices + num_per_block*l_shift, 0) } threads >>= 1; __syncthreads(); } } template <typename T1, typename T2> __inline__ T1 divUp(T1 a, T2 b) { return (a + b - 1) / b; } __inline__ int get_threads(int num) { if (num > 512) return 256; if (num > 256) return 128; if (num > 128) return 64; return 32; } template <typename T> void merge_batch_topk(T* idata, size_t* indices, const int batch_num, const size_t padding_len, const int K, const int slice_blocks_num, const int block_var_num, cudaStream_t stream) { assert(K <= 128); size_t blocks_per_batch = slice_blocks_num; //int threads_per_block = slice_blocks_num > 128 ? 128 : 64; int threads_per_block = get_threads(slice_blocks_num); size_t padding_blocks_per_batch = divUp(blocks_per_batch, threads_per_block*2) * threads_per_block * 2; size_t num_blocks = padding_blocks_per_batch / threads_per_block / 2 * batch_num; size_t num_per_block = block_var_num; int log_k = log2_32(K); int power_k = 2 << (log_k -1); if (power_k != K) { power_k = 2 << log_k; } /// merge within each block while (blocks_per_batch > 1) { topk_reduce_blocks<T><<<num_blocks, threads_per_block, 0, stream>>>(idata, indices, num_per_block, \ blocks_per_batch, padding_blocks_per_batch, batch_num, K, power_k); num_per_block *= threads_per_block; num_per_block <<= 1; // threads_per_block = blocks_per_batch > 128 ? 128 : 64; threads_per_block = get_threads(blocks_per_batch); blocks_per_batch = num_blocks / batch_num; padding_blocks_per_batch = divUp(blocks_per_batch, threads_per_block*2) * threads_per_block; padding_blocks_per_batch <<= 1; num_blocks = padding_blocks_per_batch / threads_per_block * batch_num; num_blocks >>= 1; } } template <typename T> void bitonicBatchTopK(T* data, size_t* indices, const int batch_num, const size_t slice_len, const int K, cudaStream_t stream) { size_t padding_len = (slice_len + B_BLOCK_VAR_NUMS - 1) / B_BLOCK_VAR_NUMS * B_BLOCK_VAR_NUMS; int num_blocks = padding_len / B_BLOCK_VAR_NUMS * batch_num; int log_k = log2_32(K); int power_k = 2 << (log_k -1); if (power_k != K) { power_k = 2 << log_k; } /// local sort bitonicLocalSortIndices<T><<<num_blocks, B_THREADS_PER_BLOCK, 0, stream>>>(data, indices, batch_num, slice_len, padding_len, power_k); /// merge merge_batch_topk<T>(data, indices, batch_num, padding_len, K, num_blocks / batch_num, B_BLOCK_VAR_NUMS, stream); } __global__ void ctdet_decode_kernel( float* det, float* scores, size_t* indices, float* wh, float* reg, float* trans, const int batch_num, const int num_classes, const size_t slice_blocks_num, const size_t block_var_num, const int K, const int height, const int width, const bool reg_exist, const float thresh ) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= batch_num * K) return; // if (scores[tid] < thresh) return; //no nms, directly using threshold const size_t area = height * width; const size_t batch_id = tid / K; const size_t local_id = tid % K; float score = scores[batch_id * area * num_classes + local_id]; if (score < thresh) return; // no nms const size_t iid = slice_blocks_num * block_var_num * batch_id + local_id; const int batch_len = 1 + 6 * K; size_t class_id = indices[iid] / area; indices[iid] %= area; atomicAdd(&det[batch_id * batch_len], 1.0); float xs, ys; ys = static_cast<size_t>(indices[iid] / width) * 1.0; xs = static_cast<size_t>(indices[iid] % width) * 1.0; if (reg_exist) { // reg: Nx2xHxW -> Nx2xK xs += reg[batch_id*2*area + indices[iid]]; ys += reg[batch_id*2*area + area + indices[iid]]; } else { xs += 0.5; ys += 0.5; } float wh1 = wh[batch_id*2*area + indices[iid]] / 2.0; float wh2 = wh[batch_id*2*area + area + indices[iid]] / 2.0; float t0, t1, t2, t3; float tt0, tt1, tt2, tt3; t0 = xs - wh1; t1 = ys - wh2; t2 = xs + wh1; t3 = ys + wh2; /// inverse-warpAffine tt0 = trans[0] * t0 + trans[1] * t1 + trans[2]; tt1 = trans[3] * t0 + trans[4] * t1 + trans[5]; tt2 = trans[0] * t2 + trans[1] * t3 + trans[2]; tt3 = trans[3] * t2 + trans[4] * t3 + trans[5]; //printf("id:%d, score:%.4f, cls:%d, box:(%.1f, %.1f, %.1f, %.1f)\n", tid, scores[tid], class_id, tt0, tt1, tt2, tt3); /// det: N* (1 + 6*K) det[batch_id * batch_len + local_id * 6 + 0 + 1] = class_id; det[batch_id * batch_len + local_id * 6 + 1 + 1] = score; det[batch_id * batch_len + local_id * 6 + 2 + 1] = tt0; det[batch_id * batch_len + local_id * 6 + 3 + 1] = tt1; det[batch_id * batch_len + local_id * 6 + 4 + 1] = tt2; det[batch_id * batch_len + local_id * 6 + 5 + 1] = tt3; } __global__ void pose_decode_kernel( float* det, float* scores, size_t* indices, float* wh, float* reg, float* hps, float* hm_hp, size_t* hm_ind, float* hp_offset, float* trans, const int batch_num, const size_t slice_blocks_num, const size_t block_var_num, const int K, const int height, const int width, const bool reg_exist, const bool hm_hp_exist, const int num_joints, const float thresh ) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= batch_num * K) return; // if (scores[tid] < thresh) return; //! no nms, directly using threshold const int res_num = 2 + 4 + num_joints * 2; const int batch_len = (1 + res_num * K); const size_t area = height * width; const size_t batch_id = tid / K; const size_t local_id = tid % K; float score = scores[batch_id * area + local_id]; if (score < thresh) return; // no nms atomicAdd(&det[batch_id * batch_len], 1.0); //! number of det const size_t iid = slice_blocks_num * block_var_num * batch_id + local_id; size_t class_id = indices[iid] / area; indices[iid] %= area; float xs, ys; float p0, p1; float t0, t1, t2, t3; float tt0, tt1, tt2, tt3; float bias[2]; ys = static_cast<size_t>(indices[iid] / width) * 1.0; xs = static_cast<size_t>(indices[iid] % width) * 1.0; if (reg_exist) { // reg: Nx2xHxW -> Nx2xK bias[0] = reg[batch_id*2*area + indices[iid]]; bias[1] = reg[batch_id*2*area + area + indices[iid]]; } else { bias[0] = 0.5; bias[1] = 0.5; } float wh1 = wh[batch_id*2*area + indices[iid]] / 2.0; float wh2 = wh[batch_id*2*area + area + indices[iid]] / 2.0; t0 = xs - wh1 + bias[0]; t1 = ys - wh2 + bias[1]; t2 = xs + wh1 + bias[0]; t3 = ys + wh2 + bias[1]; /// inverse-warpAffine tt0 = trans[0] * t0 + trans[1] * t1 + trans[2]; tt1 = trans[3] * t0 + trans[4] * t1 + trans[5]; tt2 = trans[0] * t2 + trans[1] * t3 + trans[2]; tt3 = trans[3] * t2 + trans[4] * t3 + trans[5]; det[batch_id * batch_len + local_id * res_num + 0 + 1] = class_id; det[batch_id * batch_len + local_id * res_num + 1 + 1] = score; det[batch_id * batch_len + local_id * res_num + 2 + 1] = tt0; det[batch_id * batch_len + local_id * res_num + 3 + 1] = tt1; det[batch_id * batch_len + local_id * res_num + 4 + 1] = tt2; det[batch_id * batch_len + local_id * res_num + 5 + 1] = tt3; /// key points for(int i = 0; i < num_joints; ++i) { p0 = hps[batch_id*num_joints*2*area + i*2*area + indices[iid]] + xs; p1 = hps[batch_id*num_joints*2*area + (i*2+1)*area + indices[iid]] + ys; /// find the most closed point with a confidence > 0.1 if (hm_hp_exist) { float min_ds = static_cast<float>(INT_MAX); float near_xs = min_ds, near_ys = min_ds; // hm_hp: N x 17 x 128 x 128 float hm_hp_score, diff = min_ds; float hm_xs, hm_ys; size_t ind_tmp; for(int j = 0; j < K; ++j) { hm_hp_score = hm_hp[batch_id * num_joints * area + i * area + j]; if (hm_hp_score < 0.1) continue; ind_tmp = hm_ind[batch_id*num_joints*area + i * area + j] % area; hm_ys = static_cast<size_t>(ind_tmp / width) * 1.0 + hp_offset[batch_id*2*area + area + j]; hm_xs = static_cast<size_t>(ind_tmp % width) * 1.0 + hp_offset[batch_id*2*area + j]; diff = fabs(p0 - hm_xs) + fabs(p1 - hm_ys); if (diff < min_ds) { min_ds = diff; near_xs = hm_xs; near_ys = hm_ys; } } if (near_xs > t0 && near_xs < t2 && near_ys > t1 && near_ys < t3 && diff < max(t2-t0, t3-t1) * 0.5) { p0 = near_xs; p1 = near_ys; } } tt0 = trans[0] * p0 + trans[1] * p1 + trans[2]; tt1 = trans[3] * p0 + trans[4] * p1 + trans[5]; det[batch_id * batch_len + local_id * res_num + i*2 + 7] = tt0; det[batch_id * batch_len + local_id * res_num + i*2+1 + 7] = tt1; } } void ctdet_decode( float* det, float* wh, float* reg, float* heat, size_t* indices, float* inv_trans, const int batch_num, const int num_classes, const int height, const int width, const int K, const float threshold, const bool reg_exist, const bool cat_spec_wh, cudaStream_t stream) { ///inplace sort with blocks const size_t slice_len = height * width * num_classes; const size_t padding_len = (slice_len + B_BLOCK_VAR_NUMS - 1) / B_BLOCK_VAR_NUMS * B_BLOCK_VAR_NUMS; int num_blocks = padding_len / B_BLOCK_VAR_NUMS * batch_num; int log_k = log2_32(K); int power_k = 2 << (log_k -1); if (power_k != K) { power_k = 2 << log_k; } bitonicLocalSortIndices<float><<<num_blocks, B_THREADS_PER_BLOCK, 0, stream>>>(heat, indices, batch_num, slice_len, padding_len, power_k); CHECK_LAST_ERR("ctdet_bitonic_batch_topk_kernel"); /// merge merge_batch_topk<float>(heat, indices, batch_num, padding_len, K, num_blocks / batch_num, B_BLOCK_VAR_NUMS, stream); CHECK_LAST_ERR("ctdet_merge_batch_topk_kernel"); /// ctdet_decode_kernel<<<divUp(K * batch_num, 128), 128, 0, stream>>>( det, heat, indices, wh, reg, inv_trans, batch_num, num_classes, num_blocks/batch_num, B_BLOCK_VAR_NUMS, K, height, width, reg_exist, threshold); CHECK_LAST_ERR("ctdet_decode_kernel"); } void multi_pose_decode( float* det, float* heat, float* wh, float* reg, float* hps, float* hm_hp, float* hp_offset, size_t* heat_ind, size_t* hm_ind, float* inv_trans, const int batch_num, const int num_joints, const int height, const int width, const int K, const float threshold, const bool reg_exist, const bool hm_hp_exist, cudaStream_t stream) { const size_t area = height * width; const size_t heat_slice_len = area * 1; const size_t heat_padding_len = (heat_slice_len + B_BLOCK_VAR_NUMS - 1) / B_BLOCK_VAR_NUMS * B_BLOCK_VAR_NUMS; const int heat_num_blocks = heat_padding_len / B_BLOCK_VAR_NUMS * batch_num; const size_t hm_slice_len = area; const size_t hm_padding_len = (area + B_BLOCK_VAR_NUMS-1)/B_BLOCK_VAR_NUMS * B_BLOCK_VAR_NUMS; const int hm_batch_num = batch_num * num_joints; const int hm_num_blocks = hm_padding_len / B_BLOCK_VAR_NUMS * batch_num * num_joints; int log_k = log2_32(K); int power_k = 2 << (log_k -1); if (power_k != K) { power_k = 2 << log_k; } /// get the Top-K of the heat map bitonicLocalSortIndices<float><<<heat_num_blocks, B_THREADS_PER_BLOCK, 0, stream>>>(heat, heat_ind, batch_num, heat_slice_len, heat_padding_len, power_k); CHECK_LAST_ERR("pose_bitonic_topk_kernel"); merge_batch_topk<float>(heat, heat_ind, batch_num, heat_padding_len, K, heat_num_blocks / batch_num, B_BLOCK_VAR_NUMS, stream); CHECK_LAST_ERR("pose_merge_topk_kernel"); /// get the channel Top-K of hm_hp if (hm_hp_exist) { bitonicLocalSortIndices<float><<<hm_num_blocks, B_THREADS_PER_BLOCK, 0, stream>>>(hm_hp, hm_ind, hm_batch_num, \ hm_slice_len, hm_padding_len, power_k); CHECK_LAST_ERR("pose_bitonic_topk_kernel"); merge_batch_topk<float>(hm_hp, hm_ind, hm_batch_num, hm_padding_len, K, hm_num_blocks / hm_batch_num, B_BLOCK_VAR_NUMS, stream); CHECK_LAST_ERR("pose_merge_topk_kernel"); } /// decode pose_decode_kernel<<<divUp(K * batch_num, 128), 128, 0, stream>>>( det, heat, heat_ind, wh, reg, hps, hm_hp, hm_ind, hp_offset, inv_trans, batch_num, heat_num_blocks/batch_num, B_BLOCK_VAR_NUMS, K, height, width, reg_exist, hm_hp_exist, num_joints, threshold); CHECK_LAST_ERR("pose_decode_kernel"); } template void bitonicBatchTopK<int>(int*, size_t*, const int, const size_t, const int, cudaStream_t); template void bitonicBatchTopK<float>(float*, size_t*, const int, const size_t, const int, cudaStream_t); template void bitonicBatchTopK<double>(double*, size_t*, const int, const size_t, const int, cudaStream_t);
the_stack
#include <cub/cub.cuh> #include <cublas_v2.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <math_constants.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "longformer_attention_softmax.h" #include "attention_impl.h" using namespace onnxruntime::cuda; using namespace cub; #define CHECK(expr) \ if (!CUBLAS_CALL(expr)) { \ return false; \ } namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, int blockSize> __launch_bounds__(blockSize) __global__ void LongformerSoftmaxSimpleKernel(const int* global_attention, const int* global_index, const int* batch_global_num, const T* input, const T* attention_mask, T* output, float scaler, int dim0, int sequence_length, int attention_window) { typedef cub::BlockReduce<float, blockSize> BlockReduce; __shared__ typename BlockReduce::TempStorage block_reduce_temp; __shared__ float max_shared; __shared__ float sum_shared; const T* input_block = input + sequence_length * blockIdx.x; T* output_block = output + sequence_length * blockIdx.x; const int batch_index = blockIdx.x / dim0; const int row_index = blockIdx.x % sequence_length; const int global_num = batch_global_num[batch_index]; // To be consistent with Huggingface Longformer, the row of maksed word are set as zero. if ((float)attention_mask[batch_index * sequence_length + row_index] < 0.0f) { for (int i = threadIdx.x; i < sequence_length; i += blockSize) { output_block[i] = (T)(0); } return; } // local attention token int col_start = 0; int col_end = sequence_length; bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == (int)0); if (is_local_row) { col_start = row_index - attention_window; if (col_start < 0) { col_start = 0; } col_end = row_index + attention_window + 1; if (col_end > sequence_length) { col_end = sequence_length; } } const T* mask_block = attention_mask + sequence_length * batch_index; int tid = threadIdx.x; // calculate max input float max_input = -CUDART_INF_F; // #pragma unroll 16 for (int i = tid + col_start; i < col_end; i += blockSize) { float x = input_block[i]; x = x * scaler + (float)mask_block[i]; if (max_input < x) { max_input = x; } } if (is_local_row) { for (int g = tid; g < global_num; g += blockSize) { int i = global_index[g]; if (i < col_start || i >= col_end) { float x = input_block[i]; x = x * scaler + (float)mask_block[i]; if (max_input < x) { max_input = x; } } } } float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max()); if (tid == 0) { max_shared = max_block; } __syncthreads(); float sum_input = 0.f; // #pragma unroll 16 for (int i = tid + col_start; i < col_end; i += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); sum_input += x; } if (is_local_row) { for (int g = tid; g < global_num; g += blockSize) { int i = global_index[g]; if (i < col_start || i >= col_end) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); sum_input += x; } } } float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum()); if (tid == 0) { sum_shared = sum_block; } __syncthreads(); float recip_sum = 1.f / sum_shared; if (is_local_row) { // We only need to fill in zeros for blocks that will be used in the matrix multiplication // following the Softmax. // // For now zero-out only [row_index - 2*attention_window, row_index + 2*attention_window], // we can even be more agressive and reduce the zeroing out window size since // each row has entries in 3 blocks (3*attention_window size instead of 4*attention_window) int zero_start = row_index - 2 * attention_window; if (zero_start < 0) { zero_start = 0; } int zero_end = row_index + 2 * attention_window; if (zero_end > sequence_length) { zero_end = sequence_length; } for (int i = tid + zero_start; i < zero_end; i += blockSize) { if (i < col_start || i >= col_end) { output_block[i] = (T)(0.); } } } __syncthreads(); if (is_local_row) { for (int g = tid; g < global_num; g += blockSize) { int i = global_index[g]; if (i < col_start || i >= col_end) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); output_block[i] = (T)(recip_sum * x); } } } // #pragma unroll 16 for (int i = tid + col_start; i < col_end; i += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); output_block[i] = (T)(recip_sum * x); } } // Launch the softmax kernel for non compact memory. bool LaunchLongformerSoftmaxSimpleKernel( cudaStream_t stream, cublasHandle_t cublas, void* workspace, // softmax space const void* q, // transposed Q with shape (B, N, S, H) const void* k, // transposed K with shape (B, N, S, H) const void* v, // transposed V with shape (B, N, S, H) const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked. const void* global_q, // Q for global tokens with shape (B, N, S, H) const void* global_k, // K for global tokens with shape (B, N, S, H) const void* global_v, // V for global tokens with shape (B, N, S, H) const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1) void* output, // output with shape (B, N, S, H) float scaler, // scalar int batch_size, // batch size int sequence_length, // sequence length int num_heads, // number of heads int head_size, // hidden size per head int attention_window, // one sided windows size size_t element_size) { // size of element: 2 for half, and 4 for float bool is_fp16 = (element_size == 2); void* scratch1 = reinterpret_cast<char*>(workspace); size_t scratch1_size = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length); void* scratch2 = reinterpret_cast<char*>(scratch1) + scratch1_size; // setup shared parameters for two strided batched matrix multiplies cudaDataType_t Atype; cudaDataType_t Btype; cudaDataType_t Ctype; cudaDataType_t resultType; cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT; __half one_fp16, zero_fp16; float one_fp32, zero_fp32; void *alpha, *beta_0, *beta_1; if (is_fp16) { one_fp16 = __float2half(1.f); zero_fp16 = __float2half(0.f); alpha = static_cast<void*>(&one_fp16); beta_0 = static_cast<void*>(&zero_fp16); beta_1 = static_cast<void*>(&one_fp16); Atype = CUDA_R_16F; Btype = CUDA_R_16F; Ctype = CUDA_R_16F; resultType = CUDA_R_16F; algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; } else { one_fp32 = 1.f; zero_fp32 = 0.f; alpha = static_cast<void*>(&one_fp32); beta_0 = static_cast<void*>(&zero_fp32); beta_1 = static_cast<void*>(&one_fp32); Atype = CUDA_R_32F; Btype = CUDA_R_32F; Ctype = CUDA_R_32F; resultType = CUDA_R_32F; } // Strided batch matrix multiply // qk = q * k^T // Shapes: q and k = B x N x S x H, qk = B x N x S x S // Convert col-major to row-major by swapping q and k in Gemm // Local attention part // S x S is calculated using sliding block WxW (W is one sided window size) like the following: // [W][W] // [W][W][W] // [W][W][W] // [W][W] // The first and last rows have 2 blocks, and the remaining has 3 blocks per row. // The calculation are splited into 3 parts: Fill the middle rows, then the first row and finally the last row. // The results are stored in scratch1. int w = attention_window; int x_offset = num_heads * sequence_length * head_size; int y_offset = num_heads * sequence_length * sequence_length; int last_block = (sequence_length / w) - 1; int strideA = sequence_length * head_size; int strideB = sequence_length * head_size; int strideC = sequence_length * sequence_length; // When S == 2W, there is no middle rows of blocks: // [W][W] // [W][W] // We can use normal matrix multiplication in this case. if (sequence_length == 2 * w) { CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, sequence_length, sequence_length, head_size, alpha, k, Atype, head_size, sequence_length * head_size, q, Btype, head_size, sequence_length * head_size, beta_0, scratch1, Ctype, sequence_length, sequence_length * sequence_length, batch_size * num_heads, resultType, algo)); } else { // sequence_length > 2 * w for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_heads; ++j) { const void* q_head = reinterpret_cast<const char*>(q) + \ (i * x_offset + j * sequence_length * head_size + w * head_size) * element_size; const void* k_head = reinterpret_cast<const char*>(k) + (i * x_offset + j * sequence_length * head_size) * element_size; void* qk_head = reinterpret_cast<char*>(scratch1) + \ (i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size; int count = (sequence_length - 2 * w) / w; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, 3 * w, // m w, // n head_size, // k alpha, // alpha k_head, // A Atype, // A type head_size, // lda w * head_size, // strideA q_head, // B Btype, // B type head_size, // ldb w * head_size, // strideB beta_0, // beta qk_head, // C Ctype, // C type sequence_length, // ldc sequence_length * w + w, // strideC count, // batch count resultType, algo)); } } CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, 2 * w, // m w, // n head_size, // k alpha, // alpha k, // A Atype, // A type head_size, // lda strideA, // strideA q, // B Btype, // B type head_size, // ldb strideB, // strideB beta_0, // beta scratch1, // C Ctype, // C type sequence_length, // ldc strideC, // strideC batch_size * num_heads, // batch count resultType, algo)); const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size; const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size; void* qk_head = reinterpret_cast<char*>(scratch1) + \ (last_block * w * sequence_length + (last_block - 1) * w) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, 2 * w, w, head_size, alpha, k_head, Atype, head_size, strideA, q_head, Btype, head_size, strideB, beta_0, qk_head, Ctype, sequence_length, strideC, batch_size * num_heads, resultType, algo)); } const int* batch_global_count = reinterpret_cast<const int*>(pinned_buffer); // Global attention part for (int i = 0; i < batch_size; ++i) { if (batch_global_count[i] > 0) { const void* q_batch = reinterpret_cast<const char*>(q) + (i * x_offset) * element_size; const void* k_batch = reinterpret_cast<const char*>(k) + (i * x_offset) * element_size; void* qk_batch = reinterpret_cast<char*>(scratch1) + (i * y_offset) * element_size; // Local tokens attending global tokens CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, batch_global_count[i], sequence_length, head_size, alpha, k_batch, Atype, head_size, strideA, q_batch, Btype, head_size, strideB, beta_0, qk_batch, Ctype, sequence_length, strideC, num_heads, resultType, algo)); const void* global_q_batch = reinterpret_cast<const char*>(global_q) + \ (i * num_heads * sequence_length * head_size) * element_size; const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * x_offset) * element_size; int strideB_global = sequence_length * head_size; // Global tokens attending everything // This GEMMs need to be last to make sure all global token entries are re-written. CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, sequence_length, batch_global_count[i], head_size, alpha, global_k_batch, Atype, head_size, strideA, global_q_batch, Btype, head_size, strideB_global, beta_0, qk_batch, Ctype, sequence_length, strideC, num_heads, resultType, algo)); } } int dim0 = sequence_length * num_heads; int dim1 = sequence_length; void* softmax_out = scratch2; const int blockSize = 64; const int gridSize = batch_size * num_heads * sequence_length; if (is_fp16) { LongformerSoftmaxSimpleKernel<__half, blockSize><<<gridSize, blockSize, 0, stream>>>( global_attention, global_index, batch_global_num, static_cast<const __half*>(scratch1), static_cast<const __half*>(attention_mask), static_cast<__half*>(softmax_out), scaler, dim0, dim1, attention_window); } else { LongformerSoftmaxSimpleKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>( global_attention, global_index, batch_global_num, static_cast<const float*>(scratch1), static_cast<const float*>(attention_mask), static_cast<float*>(softmax_out), scaler, dim0, dim1, attention_window); } // Run the matrix multiply: output = softmax_out * v // softmax_out: B x N x S x S // v: B x N x S x H // attn_out: B x N x S x H // Calculation uses full Gemm (S == 2W) or sliding blocks (S > 2W) in a way similar to local attention part. if (sequence_length == 2 * w) { // convert col-major to row-major by swapping softmax_out and v CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, sequence_length, sequence_length, alpha, v, Atype, head_size, sequence_length * head_size, softmax_out, Btype, sequence_length, sequence_length * sequence_length, beta_0, output, Ctype, head_size, sequence_length * head_size, batch_size * num_heads, resultType, algo)); } else { // sequence_length > 2 * w for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_heads; ++j) { const void* v_head = reinterpret_cast<const char*>(v) + \ (i * x_offset + j * head_size * sequence_length) * element_size; const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \ (i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size; void* out_head = reinterpret_cast<char*>(output) + \ (i * x_offset + j * head_size * sequence_length + w * head_size) * element_size; int count = (sequence_length - 2 * w) / w; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, w, 3 * w, alpha, v_head, Atype, head_size, w * head_size, prob_head, Btype, sequence_length, sequence_length * w + w, beta_0, out_head, Ctype, head_size, w * head_size, count, resultType, algo)); } } CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, w, 2 * w, alpha, v, Atype, head_size, sequence_length * head_size, softmax_out, Btype, sequence_length, sequence_length * sequence_length, beta_0, output, Ctype, head_size, sequence_length * head_size, batch_size * num_heads, resultType, algo)); const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size; const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \ (sequence_length * last_block * w + (last_block - 1) * w) * element_size; void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, w, 2 * w, alpha, v_head, Atype, head_size, sequence_length * head_size, prob_head, Btype, sequence_length, sequence_length * sequence_length, beta_0, out_head, Ctype, head_size, sequence_length * head_size, batch_size * num_heads, resultType, algo)); } for (int i = 0; i < batch_size; ++i) { if (batch_global_count[i] > 0) { int glob_longdim_mm = (last_block - 1) * w; const void* v_head = reinterpret_cast<const char*>(v) + (i * x_offset) * element_size; const void* prob_head = reinterpret_cast<const char*>(softmax_out) + \ (i * y_offset + 2 * w * sequence_length) * element_size; void* out_head = reinterpret_cast<char*>(output) + (i * x_offset + 2 * w * head_size) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, glob_longdim_mm, batch_global_count[i], alpha, v_head, Atype, head_size, sequence_length * head_size, prob_head, Btype, sequence_length, sequence_length * sequence_length, beta_1, out_head, Ctype, head_size, sequence_length * head_size, num_heads, resultType, algo)); // Global tokens v_head = reinterpret_cast<const char*>(global_v) + (i * x_offset) * element_size; prob_head = reinterpret_cast<const char*>(softmax_out) + (i * y_offset) * element_size; out_head = reinterpret_cast<char*>(output) + (i * x_offset) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, batch_global_count[i], sequence_length, // Re-write entries completely alpha, v_head, Atype, head_size, sequence_length * head_size, prob_head, Btype, sequence_length, sequence_length * sequence_length, beta_0, // Use beta=0 to overwrite out_head, // Here assumes global tokens are at the beginning of sequence. Ctype, head_size, sequence_length * head_size, num_heads, resultType, algo)); } } return true; } } // namespace cuda } // namespace contrib } // namespace onnxruntime
the_stack
#include <utility> #include "paddle/fluid/framework/custom_tensor_utils.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/transform.h" namespace paddle { template <typename InType, typename OutType> struct CastDataTypeFunctor { HOSTDEVICE inline OutType operator()(InType in) const { return static_cast<OutType>(in); } }; template <typename InType> struct CastDataType { CastDataType(const framework::Tensor &in, framework::Tensor *out, const platform::DeviceContext *ctx) : in_(in), out_(out), ctx_(ctx) {} const framework::Tensor in_; framework::Tensor *out_; const platform::DeviceContext *ctx_; template <typename OutType> void apply() { auto *in_begin = in_.data<InType>(); auto *in_end = in_begin + in_.numel(); auto *out_begin = out_->mutable_data<OutType>(in_.place()); if (platform::is_cpu_place(in_.place())) { platform::Transform<platform::CPUDeviceContext> trans; auto *context = static_cast<const platform::CPUDeviceContext *>(ctx_); trans(*context, in_begin, in_end, out_begin, CastDataTypeFunctor<InType, OutType>()); #if defined(__NVCC__) || defined(__HIPCC__) } else if (platform::is_gpu_place(in_.place())) { platform::Transform<platform::CUDADeviceContext> trans; auto *context = static_cast<const platform::CUDADeviceContext *>(ctx_); trans(*context, in_begin, in_end, out_begin, CastDataTypeFunctor<InType, OutType>()); context->Wait(); #endif } else { PADDLE_THROW(platform::errors::Unimplemented( "Place type is not supported when casting data type.")); } } }; template <typename T> void GpuCopy(T *src, T *dst, PlaceType src_plc, PlaceType dst_plc, int64_t ele_size) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); int device_num = paddle::platform::GetCurrentDeviceId(); platform::CUDAPlace gpu_place(device_num); auto *dev_ctx = static_cast<const platform::CUDADeviceContext *>(pool.Get(gpu_place)); if ((src_plc == PlaceType::kGPU) && (dst_plc == PlaceType::kCPU)) { memory::Copy(platform::CPUPlace(), static_cast<void *>(dst), gpu_place, src, ele_size, dev_ctx->stream()); } else if ((src_plc == PlaceType::kGPU) && (dst_plc == PlaceType::kGPU)) { memory::Copy(gpu_place, static_cast<void *>(dst), gpu_place, src, ele_size, dev_ctx->stream()); } else if ((src_plc == PlaceType::kCPU) && (dst_plc == PlaceType::kGPU)) { memory::Copy(gpu_place, static_cast<void *>(dst), platform::CPUPlace(), src, ele_size, dev_ctx->stream()); } else { PADDLE_THROW(platform::errors::Unavailable( "Only GPU related Copy can reach this func.")); } #ifdef PADDLE_WITH_HIP hipStreamSynchronize(dev_ctx->stream()); #else cudaStreamSynchronize(dev_ctx->stream()); #endif #endif } #define GET_CASTED_TENSOR \ if (!tensor_) { \ tensor_ = std::make_shared<framework::LoDTensor>(); \ } \ auto *tensor = static_cast<framework::LoDTensor *>(tensor_.get()); #define GET_INNER_PLACE \ platform::Place place; \ switch (place_) { \ case PlaceType::kCPU: \ place = platform::CPUPlace(); \ break; \ case PlaceType::kGPU: \ place = platform::CUDAPlace(); \ break; \ default: \ PADDLE_THROW(platform::errors::Unavailable( \ "Custom operator unsupported place id(%d)", \ static_cast<int>(place_))); \ } void Tensor::reshape(const std::vector<int64_t> &shape) { GET_CASTED_TENSOR auto new_dim = framework::make_ddim(shape); tensor->Resize(new_dim); } Tensor::Tensor(const PlaceType &place) : tensor_(std::make_shared<framework::LoDTensor>()), place_(place), stream_(StreamWrapper()) {} Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape) : tensor_(std::make_shared<framework::LoDTensor>()), place_(place), stream_(StreamWrapper()) { GET_CASTED_TENSOR tensor->Resize(framework::make_ddim(shape)); } template <typename T> T *Tensor::mutable_data(const PlaceType &place) { place_ = place; return mutable_data<T>(); } template <typename T> T *Tensor::mutable_data() { GET_CASTED_TENSOR PADDLE_ENFORCE_GT( tensor->numel(), 0, platform::errors::PreconditionNotMet( "You should call Tensor::Reshape(const std::vector<int> " "&shape)" "function before retrieving mutable_data from input tensor.")); switch (static_cast<int>(place_)) { case static_cast<int>(PlaceType::kCPU): { return tensor->mutable_data<T>(platform::CPUPlace()); } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) case static_cast<int>(PlaceType::kGPU): { int device_num = platform::GetCurrentDeviceId(); return tensor->mutable_data<T>(platform::CUDAPlace(device_num)); } #endif default: PADDLE_THROW(platform::errors::Unavailable( "Custom operator unsupported place id(%d)", static_cast<int>(place_))); } } template <typename T> T *Tensor::data() const { GET_CASTED_TENSOR; auto *res = tensor->data<T>(); return res; } DataType Tensor::type() const { GET_CASTED_TENSOR; auto type = tensor->type(); if (type == framework::proto::VarType::FP32) { return DataType::FLOAT32; } else if (type == framework::proto::VarType::INT64) { return DataType::INT64; } else if (type == framework::proto::VarType::INT32) { return DataType::INT32; } else if (type == framework::proto::VarType::INT16) { return DataType::INT16; } else if (type == framework::proto::VarType::INT8) { return DataType::INT8; } else if (type == framework::proto::VarType::UINT8) { return DataType::UINT8; } else if (type == framework::proto::VarType::FP64) { return DataType::FLOAT64; } else if (type == framework::proto::VarType::BOOL) { return DataType::BOOL; } else if (type == framework::proto::VarType::COMPLEX64) { return DataType::COMPLEX64; } else if (type == framework::proto::VarType::COMPLEX128) { return DataType::COMPLEX128; } else if (type == framework::proto::VarType::FP16) { return DataType::FLOAT16; } // TODO(JiabinYang) Support more dtype here return DataType::FLOAT32; } template <typename T> Tensor Tensor::copy_to(const PlaceType &target_place) const { GET_CASTED_TENSOR; PADDLE_ENFORCE_GE(tensor->numel(), 0, platform::errors::PreconditionNotMet( "You should call Tensor::Reshape(const " "std::vector<int> &shape)" "function before copying data from cpu.")); size_t ele_size = tensor->numel() * sizeof(T); auto *p_src_data = tensor->data<T>(); auto src_place = place(); Tensor target = Tensor(target_place); target.reshape(shape()); auto *p_target_data = target.template mutable_data<T>(); if ((src_place == PlaceType::kCPU) && (target_place == PlaceType::kCPU)) { std::memcpy(static_cast<void *>(p_target_data), p_src_data, ele_size); } else if ((src_place == PlaceType::kGPU) && (target_place == PlaceType::kCPU)) { GpuCopy<T>(p_src_data, p_target_data, src_place, target_place, ele_size); } else if ((src_place == PlaceType::kCPU) && (target_place == PlaceType::kGPU)) { GpuCopy<T>(p_src_data, p_target_data, src_place, target_place, ele_size); } else if ((src_place == PlaceType::kGPU) && (target_place == PlaceType::kGPU)) { GpuCopy<T>(p_src_data, p_target_data, src_place, target_place, ele_size); } else { PADDLE_THROW(platform::errors::Unavailable( "Not supported place transform of place: %d to place: %d", static_cast<int>(src_place), static_cast<int>(target_place))); } return target; } Tensor Tensor::slice(const int64_t begin_idx, const int64_t end_idx) const { GET_CASTED_TENSOR GET_INNER_PLACE framework::Tensor intermediate = tensor->Slice(begin_idx, end_idx); Tensor target = Tensor(place_); framework::CustomTensorUtils::ShareDataFrom( static_cast<const void *>(&intermediate), target); return target; } template PD_DLL_DECL Tensor Tensor::copy_to<float>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<double>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<int64_t>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<int32_t>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<uint8_t>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<int8_t>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<int16_t>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<bool>(const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<paddle::platform::complex<float>>( const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<paddle::platform::complex<double>>( const PlaceType &target_place) const; template PD_DLL_DECL Tensor Tensor::copy_to<paddle::platform::float16>(const PlaceType &target_place) const; template PD_DLL_DECL float *Tensor::data<float>() const; template PD_DLL_DECL double *Tensor::data<double>() const; template PD_DLL_DECL int64_t *Tensor::data<int64_t>() const; template PD_DLL_DECL int32_t *Tensor::data<int32_t>() const; template PD_DLL_DECL uint8_t *Tensor::data<uint8_t>() const; template PD_DLL_DECL int8_t *Tensor::data<int8_t>() const; template PD_DLL_DECL int16_t *Tensor::data<int16_t>() const; template PD_DLL_DECL bool *Tensor::data<bool>() const; template PD_DLL_DECL paddle::platform::complex<float> *Tensor::data<paddle::platform::complex<float>>() const; template PD_DLL_DECL paddle::platform::complex<double> *Tensor::data<paddle::platform::complex<double>>() const; template PD_DLL_DECL paddle::platform::float16 * Tensor::data<paddle::platform::float16>() const; template PD_DLL_DECL float *Tensor::mutable_data<float>(); template PD_DLL_DECL double *Tensor::mutable_data<double>(); template PD_DLL_DECL int64_t *Tensor::mutable_data<int64_t>(); template PD_DLL_DECL int32_t *Tensor::mutable_data<int32_t>(); template PD_DLL_DECL uint8_t *Tensor::mutable_data<uint8_t>(); template PD_DLL_DECL int8_t *Tensor::mutable_data<int8_t>(); template PD_DLL_DECL int16_t *Tensor::mutable_data<int16_t>(); template PD_DLL_DECL bool *Tensor::mutable_data<bool>(); template PD_DLL_DECL paddle::platform::complex<float> *Tensor::mutable_data<paddle::platform::complex<float>>(); template PD_DLL_DECL paddle::platform::complex<double> *Tensor::mutable_data<paddle::platform::complex<double>>(); template PD_DLL_DECL paddle::platform::float16 * Tensor::mutable_data<paddle::platform::float16>(); template PD_DLL_DECL float *Tensor::mutable_data<float>(const PlaceType &place); template PD_DLL_DECL double *Tensor::mutable_data<double>( const PlaceType &place); template PD_DLL_DECL int64_t *Tensor::mutable_data<int64_t>( const PlaceType &place); template PD_DLL_DECL int32_t *Tensor::mutable_data<int32_t>( const PlaceType &place); template PD_DLL_DECL uint8_t *Tensor::mutable_data<uint8_t>( const PlaceType &place); template PD_DLL_DECL int8_t *Tensor::mutable_data<int8_t>( const PlaceType &place); template PD_DLL_DECL int16_t *Tensor::mutable_data<int16_t>( const PlaceType &place); template PD_DLL_DECL bool *Tensor::mutable_data<bool>(const PlaceType &place); template PD_DLL_DECL paddle::platform::complex<float> * Tensor::mutable_data<paddle::platform::complex<float>>(const PlaceType &place); template PD_DLL_DECL paddle::platform::complex<double> * Tensor::mutable_data<paddle::platform::complex<double>>(const PlaceType &place); template PD_DLL_DECL paddle::platform::float16 * Tensor::mutable_data<paddle::platform::float16>(const PlaceType &place); std::vector<int64_t> Tensor::shape() const { GET_CASTED_TENSOR return framework::vectorize<int64_t>(tensor->dims()); } const PlaceType &Tensor::place() const { GET_CASTED_TENSOR; if (platform::is_cpu_place(tensor->place())) { place_ = PlaceType::kCPU; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) } else if (platform::is_gpu_place(tensor->place())) { place_ = PlaceType::kGPU; #endif } else { PADDLE_THROW(platform::errors::Unimplemented( "Current Tensor hold unsupported Place Type, Please Init it" "using Tensor::mutable_data<T>(PaddlePlace) with T among:" "Place::kCPU or Place::kGPU")); } return place_; } Tensor Tensor::cast(const DataType &target_type) const { GET_CASTED_TENSOR; Tensor rlt = Tensor(place()); rlt.reshape(this->shape()); auto rlt_tensor_ = static_cast<framework::LoDTensor *>(rlt.tensor_.get()); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto ctx = pool.Get(tensor->place()); auto src_type = tensor->type(); auto dst_type = framework::CustomTensorUtils::ConvertEnumDTypeToInnerDType(target_type); switch (src_type) { case framework::proto::VarType::FP32: framework::VisitDataType(dst_type, CastDataType<float>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::FP64: framework::VisitDataType(dst_type, CastDataType<double>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::INT32: framework::VisitDataType(dst_type, CastDataType<int>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::INT64: framework::VisitDataType( dst_type, CastDataType<int64_t>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::BOOL: framework::VisitDataType(dst_type, CastDataType<bool>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::INT16: framework::VisitDataType( dst_type, CastDataType<int16_t>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::UINT8: framework::VisitDataType( dst_type, CastDataType<uint8_t>(*tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::COMPLEX64: framework::VisitDataType(dst_type, CastDataType<paddle::platform::complex<float>>( *tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::COMPLEX128: framework::VisitDataType(dst_type, CastDataType<paddle::platform::complex<double>>( *tensor, rlt_tensor_, ctx)); break; case framework::proto::VarType::FP16: framework::VisitDataType( dst_type, CastDataType<paddle::platform::float16>(*tensor, rlt_tensor_, ctx)); break; // TODO(JiabinYang) Support more dtype here default: PADDLE_THROW(platform::errors::Unimplemented( "Data type (%s) is not supported when casting data type.", framework::DataTypeToString(src_type))); } return rlt; } int64_t Tensor::size() const { GET_CASTED_TENSOR; return tensor->numel(); } bool Tensor::is_initialized() const { GET_CASTED_TENSOR; if (tensor->IsInitialized()) { return true; } else { return false; } } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gpuStream_t Tensor::stream() const { if (!stream_.IsStreamSet()) { PADDLE_THROW(platform::errors::PreconditionNotMet( "Stream is not Set, only input tensor will have " "stream which is set by framework ")); } else { return reinterpret_cast<gpuStream_t>(stream_.GetStream()); } } #endif namespace framework { void CustomTensorUtils::ShareDataTo(const paddle::Tensor &src, void *dst) { static_cast<framework::LoDTensor *>(dst)->ShareDataWith( *static_cast<framework::LoDTensor *>(src.tensor_.get())); } void CustomTensorUtils::ShareDataFrom(const void *src, const paddle::Tensor &dst) { if (!dst.tensor_) { dst.tensor_ = std::make_shared<framework::LoDTensor>(); } auto *tensor = static_cast<framework::LoDTensor *>(dst.tensor_.get()); tensor->ShareDataWith(*static_cast<const framework::LoDTensor *>(src)); } } // namespace framework } // namespace paddle
the_stack
#include "pcl/gpu/features/device/rodrigues.hpp" #include "pcl/gpu/features/device/pair_features.hpp" namespace pcl { namespace device { struct PpfImpl { enum { CTA_SIZE = 256 }; PtrSz<PointType> points; const NormalType *normals; PtrSz<int> indices; mutable PPFSignature* output; __device__ __forceinline__ void operator()() const { int total = points.size * indices.size; int idx = blockIdx.x * CTA_SIZE + threadIdx.x; if (idx > total) return; int index_i = idx / points.size; // indices int j = idx % points.size; // points int i = indices.data[index_i]; PPFSignature out; if (i != j) { float3 pi = fetch(points.data, i); float3 ni = fetch(normals, i); float3 pj = fetch(points.data, j); float3 nj = fetch(normals, j); //if (computePPFPairFeature(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4)) if (computePairFeatures(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4)) computeAlfaM(pi, ni, pj, out.alpha_m); else out.f1 = out.f2 = out.f3 = out.f4 = out.alpha_m = 0.f; } else out.f1 = out.f2 = out.f3 = out.f4 = out.alpha_m = 0.f; output[idx] = out; } template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const { //return *(float3*)&data[index]; T t = data[index]; return make_float3(t.x, t.y, t.z); } }; __global__ void estimatePpfKernel(const PpfImpl ppf) { ppf(); } struct PpfRgbImpl { enum { CTA_SIZE = 256 }; PtrSz<PointXYZRGB> points; const NormalType *normals; PtrSz<int> indices; mutable PPFRGBSignature* output; __device__ __forceinline__ void operator()() const { int total = points.size * indices.size; int idx = blockIdx.x * CTA_SIZE + threadIdx.x; if (idx > total) return; int index_i = idx / points.size; // indices int j = idx % points.size; // points int i = indices.data[index_i]; PPFRGBSignature out; if (i != j) { int ci; float3 pi = fetchXYZRGB(points.data, i, ci); float3 ni = fetch(normals, i); int cj; float3 pj = fetchXYZRGB(points.data, j, cj); float3 nj = fetch(normals, j); if (computeRGBPairFeatures(pi, ni, ci, pj, nj, cj, out.f1, out.f2, out.f3, out.f4, out.r_ratio, out.g_ratio, out.b_ratio)) //if (computePairFeatures(pi, ni, pj, nj, out.f1, out.f2, out.f3, out.f4)) { computeAlfaM(pi, ni, pj, out.alpha_m); //computeRGBPairFeatures_RGBOnly(ci, cj, out.r_ratio, out.g_ratio, out.b_ratio); } else out.f1 = out.f2 = out.f3 = out.f4 = out.r_ratio = out.g_ratio = out.b_ratio = out.alpha_m = 0.f; } else out.f1 = out.f2 = out.f3 = out.f4 = out.r_ratio = out.g_ratio = out.b_ratio = out.alpha_m = 0.f; output[idx] = out; } template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const { //return *(float3*)&data[index]; T t = data[index]; return make_float3(t.x, t.y, t.z); } __forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const { float4 xyzrgb = data[index]; color = __float_as_int(xyzrgb.w); return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z); } }; __global__ void estimatePpfRgbKernel(const PpfRgbImpl ppfrgb) { ppfrgb(); } } } void pcl::device::computePPF(const PointCloud& input, const Normals& normals, const Indices& indices, DeviceArray<PPFSignature>& output) { int total = (int)input.size() * (int)indices.size(); output.create(total); PpfImpl ppf; ppf.points = input; ppf.normals = normals; ppf.indices = indices; ppf.output = output; int block = PpfImpl::CTA_SIZE; int grid = divUp(total, block); estimatePpfKernel<<<grid, block>>>(ppf); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); //printFuncAttrib(estimatePpfKernel); } void pcl::device::computePPFRGB(const PointXYZRGBCloud& input, const Normals& normals, const Indices& indices, DeviceArray<PPFRGBSignature>& output) { int total = (int)input.size() * (int)indices.size(); output.create(total); PpfRgbImpl ppfrgb; ppfrgb.points = input; ppfrgb.normals = normals; ppfrgb.indices = indices; ppfrgb.output = output; int block = PpfRgbImpl::CTA_SIZE; int grid = divUp(total, block); estimatePpfRgbKernel<<<grid, block>>>(ppfrgb); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); //printFuncAttrib(estimatePpfRgbKernel); } namespace pcl { namespace device { struct PpfRgbRegionImpl { enum { CTA_SIZE = 256, WARPS = CTA_SIZE / Warp::WARP_SIZE, FSize = sizeof(PPFRGBSignature)/sizeof(float), FSizeWithoutAlfaM = FSize - 1 }; struct plus { __forceinline__ __device__ float operator()(const float &lhs, const volatile float& rhs) const { return lhs + rhs; } }; const PointXYZRGB* points; const NormalType* normals; PtrSz<int> indices; PtrStep<int> gindices; const int *sizes; mutable PPFRGBSignature* output; __device__ __forceinline__ void operator()() const { int tid = threadIdx.x; int warpid = Warp::id(); int index_i = blockIdx.x * WARPS + warpid; if (index_i >= indices.size) return; int i = indices[index_i]; int size = sizes[index_i]; const int* ginds = gindices.ptr(index_i); int lane = Warp::laneId(); __shared__ float3 points_buf[WARPS]; __shared__ float3 normasl_buf[WARPS]; __shared__ int colors_buf[WARPS]; if (lane == 0) { points_buf[warpid] = fetchXYZRGB(points, i, colors_buf[warpid]); normasl_buf[warpid] = fetch(normals, i); } __shared__ float cta_buf[7][CTA_SIZE + 1]; cta_buf[0][tid] = cta_buf[1][tid] = cta_buf[2][tid] = cta_buf[3][tid] = 0.f; cta_buf[4][tid] = cta_buf[5][tid] = cta_buf[6][tid] = 0.f; for(int c = lane; c < size; c+= Warp::STRIDE) { int j = ginds[c]; if (i != j) { int cj; float3 pj = fetchXYZRGB(points, j, cj); float3 nj = fetch(normals, j); float f1, f2, f3, f4, r_ratio, g_ratio, b_ratio; if (computeRGBPairFeatures(points_buf[warpid], normasl_buf[warpid], colors_buf[warpid], pj, nj, cj, f1, f2, f3, f4, r_ratio, g_ratio, b_ratio)) //computeRGBPairFeatures(points_buf[warpid], normasl_buf[warpid], colors_buf[warpid], pj, nj, cj, f1, f2, f3, f4, r_ratio, g_ratio, b_ratio); { cta_buf[0][tid] += f1; cta_buf[1][tid] += f2; cta_buf[2][tid] += f3; cta_buf[3][tid] += f4; cta_buf[4][tid] += r_ratio; cta_buf[5][tid] += g_ratio; cta_buf[6][tid] += b_ratio; } } } Warp::reduce(&cta_buf[0][tid - lane], plus()); Warp::reduce(&cta_buf[1][tid - lane], plus()); Warp::reduce(&cta_buf[2][tid - lane], plus()); Warp::reduce(&cta_buf[3][tid - lane], plus()); Warp::reduce(&cta_buf[4][tid - lane], plus()); Warp::reduce(&cta_buf[5][tid - lane], plus()); Warp::reduce(&cta_buf[6][tid - lane], plus()); float val = 0.f; if (lane < FSizeWithoutAlfaM) val = cta_buf[lane][tid - lane]/size; float *ptr = (float*)&output[index_i]; if (lane < FSize) ptr[lane] = val; } __forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const { float4 xyzrgb = data[index]; color = __float_as_int(xyzrgb.w); return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z); } template<class T> __forceinline__ __device__ float3 fetch(const T* data, int index) const { //return *(float3*)&data[index]; T t = data[index]; return make_float3(t.x, t.y, t.z); } }; __global__ void estiamtePpfRgbRegionKernel(const PpfRgbRegionImpl impl) { impl(); } } } void pcl::device::computePPFRGBRegion(const PointXYZRGBCloud& cloud, const Normals& normals, const Indices& indices, const NeighborIndices& nn_indices, DeviceArray<PPFRGBSignature>& output) { output.create(nn_indices.sizes.size()); PpfRgbRegionImpl impl; impl.points = cloud; impl.normals = normals; impl.indices = indices; impl.gindices = nn_indices; impl.sizes = nn_indices.sizes; impl.output = output; int block = PpfRgbRegionImpl::CTA_SIZE; int grid = divUp((int)impl.indices.size, PpfRgbRegionImpl::WARPS); estiamtePpfRgbRegionKernel<<<grid, block>>>(impl); cudaSafeCall( cudaGetLastError() ); cudaSafeCall(cudaDeviceSynchronize()); //printFuncAttrib(estiamtePpfRgbRegionKernel); }
the_stack
#include "_reg_optimiser_gpu.h" #include "_reg_optimiser_kernels.cu" /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_optimiser_gpu::reg_optimiser_gpu() :reg_optimiser<float>::reg_optimiser() { this->currentDOF_gpu=NULL; this->bestDOF_gpu=NULL; this->gradient_gpu=NULL; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_optimiser_gpu::reg_optimiser_gpu() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_optimiser_gpu::~reg_optimiser_gpu() { if(this->bestDOF_gpu!=NULL) cudaCommon_free<float4>(&this->bestDOF_gpu);; this->bestDOF_gpu=NULL; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_optimiser_gpu::~reg_optimiser_gpu() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_optimiser_gpu::Initialise(size_t nvox, int dim, bool optX, bool optY, bool optZ, size_t maxit, size_t start, InterfaceOptimiser *obj, float *cppData, float *gradData, size_t a, float *b, float *c ) { this->dofNumber=nvox; this->ndim=dim; this->optimiseX=optX; this->optimiseY=optY; this->optimiseZ=optZ; this->maxIterationNumber=maxit; this->currentIterationNumber=start; // Arrays are converted from float to float4 this->currentDOF_gpu=reinterpret_cast<float4 *>(cppData); if(gradData!=NULL) this->gradient_gpu=reinterpret_cast<float4 *>(gradData); if(this->bestDOF_gpu!=NULL) cudaCommon_free<float4>(&this->bestDOF_gpu); if(cudaCommon_allocateArrayToDevice(&this->bestDOF_gpu, (int)(this->GetVoxNumber()))){ printf("[NiftyReg ERROR] Error when allocating the best control point array on the GPU.\n"); reg_exit(1); } this->StoreCurrentDOF(); this->objFunc=obj; this->bestObjFunctionValue = this->currentObjFunctionValue = this->objFunc->GetObjectiveFunctionValue(); #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_optimiser_gpu::Initialise() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_optimiser_gpu::RestoreBestDOF() { // restore forward transformation NR_CUDA_SAFE_CALL( cudaMemcpy(this->currentDOF_gpu, this->bestDOF_gpu, this->GetVoxNumber()*sizeof(float4), cudaMemcpyDeviceToDevice)) } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_optimiser_gpu::StoreCurrentDOF() { // Store forward transformation NR_CUDA_SAFE_CALL( cudaMemcpy(this->bestDOF_gpu, this->currentDOF_gpu, this->GetVoxNumber()*sizeof(float4), cudaMemcpyDeviceToDevice)) } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_optimiser_gpu::Perturbation(float length) { /// @todo } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_conjugateGradient_gpu::reg_conjugateGradient_gpu() :reg_optimiser_gpu::reg_optimiser_gpu() { this->array1=NULL; this->array2=NULL; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_conjugateGradient_gpu::reg_conjugateGradient_gpu() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_conjugateGradient_gpu::~reg_conjugateGradient_gpu() { if(this->array1!=NULL) cudaCommon_free<float4>(&this->array1); this->array1=NULL; if(this->array2!=NULL) cudaCommon_free<float4>(&this->array2); this->array2=NULL; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_conjugateGradient_gpu::~reg_conjugateGradient_gpu() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_conjugateGradient_gpu::Initialise(size_t nvox, int dim, bool optX, bool optY, bool optZ, size_t maxit, size_t start, InterfaceOptimiser *obj, float *cppData, float *gradData, size_t a, float *b, float *c) { reg_optimiser_gpu::Initialise(nvox, dim, optX, optY, optZ, maxit, start, obj, cppData, gradData ); this->firstcall=true; if(cudaCommon_allocateArrayToDevice<float4>(&this->array1, (int)(this->GetVoxNumber()))){ printf("[NiftyReg ERROR] Error when allocating the first conjugate gradient_gpu array on the GPU.\n"); reg_exit(1); } if(cudaCommon_allocateArrayToDevice<float4>(&this->array2, (int)(this->GetVoxNumber()))){ printf("[NiftyReg ERROR] Error when allocating the second conjugate gradient_gpu array on the GPU.\n"); reg_exit(1); } #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_conjugateGradient_gpu::Initialise() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_conjugateGradient_gpu::UpdateGradientValues() { if(this->firstcall==true){ reg_initialiseConjugateGradient_gpu(&(this->gradient_gpu), &(this->array1), &(this->array2), (int)(this->GetVoxNumber())); this->firstcall=false; } else{ reg_GetConjugateGradient_gpu(&this->gradient_gpu, &this->array1, &this->array2, (int)(this->GetVoxNumber())); } return; } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_conjugateGradient_gpu::Optimise(float maxLength, float smallLength, float &startLength) { this->UpdateGradientValues(); reg_optimiser::Optimise(maxLength, smallLength, startLength); } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_conjugateGradient_gpu::Perturbation(float length) { reg_optimiser_gpu::Perturbation(length); this->firstcall=true; } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_conjugateGradient_gpu::reg_test_optimiser() { this->UpdateGradientValues(); reg_optimiser_gpu::reg_test_optimiser(); } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_initialiseConjugateGradient_gpu(float4 **gradientArray_d, float4 **conjugateG_d, float4 **conjugateH_d, int nodeNumber) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NodeNumber,&nodeNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *gradientArray_d, nodeNumber*sizeof(float4))) const unsigned int Grid_reg_initialiseConjugateGradient = (unsigned int)reg_ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_initialiseConjugateGradient)); dim3 G1(Grid_reg_initialiseConjugateGradient,Grid_reg_initialiseConjugateGradient,1); dim3 B1(NR_BLOCK->Block_reg_initialiseConjugateGradient,1,1); reg_initialiseConjugateGradient_kernel <<< G1, B1 >>> (*conjugateG_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture)) NR_CUDA_SAFE_CALL(cudaMemcpy(*conjugateH_d, *conjugateG_d, nodeNumber*sizeof(float4), cudaMemcpyDeviceToDevice)) } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_GetConjugateGradient_gpu(float4 **gradientArray_d, float4 **conjugateG_d, float4 **conjugateH_d, int nodeNumber) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NodeNumber,&nodeNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, conjugateGTexture, *conjugateG_d, nodeNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, conjugateHTexture, *conjugateH_d, nodeNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *gradientArray_d, nodeNumber*sizeof(float4))) // gam = sum((grad+g)*grad)/sum(HxG); const unsigned int Grid_reg_GetConjugateGradient1 = (unsigned int)reg_ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_GetConjugateGradient1)); dim3 B1(NR_BLOCK->Block_reg_GetConjugateGradient1,1,1); dim3 G1(Grid_reg_GetConjugateGradient1,Grid_reg_GetConjugateGradient1,1); float2 *sum_d; NR_CUDA_SAFE_CALL(cudaMalloc(&sum_d, nodeNumber*sizeof(float2))) reg_GetConjugateGradient1_kernel <<< G1, B1 >>> (sum_d); NR_CUDA_CHECK_KERNEL(G1,B1) float2 *sum_h;NR_CUDA_SAFE_CALL(cudaMallocHost(&sum_h, nodeNumber*sizeof(float2))) NR_CUDA_SAFE_CALL(cudaMemcpy(sum_h,sum_d, nodeNumber*sizeof(float2),cudaMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(cudaFree(sum_d)) double dgg = 0.0; double gg = 0.0; for(int i=0; i<nodeNumber; i++){ dgg += sum_h[i].x; gg += sum_h[i].y; } float gam = (float)(dgg / gg); NR_CUDA_SAFE_CALL(cudaFreeHost((void *)sum_h)) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ScalingFactor,&gam,sizeof(float))) const unsigned int Grid_reg_GetConjugateGradient2 = (unsigned int)reg_ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_GetConjugateGradient2)); dim3 B2(NR_BLOCK->Block_reg_GetConjugateGradient2,1,1); dim3 G2(Grid_reg_GetConjugateGradient2,Grid_reg_GetConjugateGradient2,1); reg_GetConjugateGradient2_kernel <<< G2, B2 >>> (*gradientArray_d, *conjugateG_d, *conjugateH_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(conjugateGTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(conjugateHTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture)) } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ float reg_getMaximalLength_gpu(float4 **gradientArray_d, int nodeNumber) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // Copy constant memory value and bind texture NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NodeNumber,&nodeNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *gradientArray_d, nodeNumber*sizeof(float4))) float *dist_d=NULL; NR_CUDA_SAFE_CALL(cudaMalloc(&dist_d,nodeNumber*sizeof(float))) const unsigned int Grid_reg_getEuclideanDistance = (unsigned int)reg_ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_getEuclideanDistance)); dim3 B1(NR_BLOCK->Block_reg_getEuclideanDistance,1,1); dim3 G1(Grid_reg_getEuclideanDistance,Grid_reg_getEuclideanDistance,1); reg_getEuclideanDistance_kernel <<< G1, B1 >>> (dist_d); NR_CUDA_CHECK_KERNEL(G1,B1) // Unbind the textures NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture)) float maxDistance = reg_maxReduction_gpu(dist_d,nodeNumber); NR_CUDA_SAFE_CALL(cudaFree(dist_d)) return maxDistance; } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_updateControlPointPosition_gpu(nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **bestControlPointPosition_d, float4 **gradientArray_d, float currentLength) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int nodeNumber = controlPointImage->nx * controlPointImage->ny * controlPointImage->nz; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NodeNumber,&nodeNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ScalingFactor,&currentLength,sizeof(float))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, controlPointTexture, *bestControlPointPosition_d, nodeNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *gradientArray_d, nodeNumber*sizeof(float4))) const unsigned int Grid_reg_updateControlPointPosition = (unsigned int)reg_ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_updateControlPointPosition)); dim3 B1(NR_BLOCK->Block_reg_updateControlPointPosition,1,1); dim3 G1(Grid_reg_updateControlPointPosition,Grid_reg_updateControlPointPosition,1); reg_updateControlPointPosition_kernel <<< G1, B1 >>> (*controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) // Unbind the textures NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture)) #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_updateControlPointPosition_gpu() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ #endif // _reg_optimiser_GPU_CU
the_stack
#define TPB1 32 #define TPB2 128 #define Nrow 4 #define Ncol 4 #define u64type uint4 #define vectype uint48 #define memshift 3 __device__ __forceinline__ uint2 LD4S(const int index,const uint2* shared_mem) { return shared_mem[(index * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x]; } __device__ __forceinline__ void ST4S(const int index, const uint2 data, uint2* shared_mem) { shared_mem[(index * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x] = data; } __device__ __forceinline__ void Gfunc_v35(uint2 &a, uint2 &b, uint2 &c, uint2 &d){ a += b;d = SWAPUINT2(d^a); c += d;b = ROR24(b^c); a += b;d = ROR16(d^a); c += d;b = ROR2(b^c, 63); } __device__ __forceinline__ void round_lyra_v35(uint2x4 s[4]) { Gfunc_v35(s[0].x, s[1].x, s[2].x, s[3].x); Gfunc_v35(s[0].y, s[1].y, s[2].y, s[3].y); Gfunc_v35(s[0].z, s[1].z, s[2].z, s[3].z); Gfunc_v35(s[0].w, s[1].w, s[2].w, s[3].w); Gfunc_v35(s[0].x, s[1].y, s[2].z, s[3].w); Gfunc_v35(s[0].y, s[1].z, s[2].w, s[3].x); Gfunc_v35(s[0].z, s[1].w, s[2].x, s[3].y); Gfunc_v35(s[0].w, s[1].x, s[2].y, s[3].z); } __device__ __forceinline__ uint2 __shfl(uint2 a, uint32_t b, uint32_t c) { return make_uint2(__shfl(a.x, b, c), __shfl(a.y, b, c)); } __device__ __forceinline__ void round_lyra_v35(uint2 s[4]) { Gfunc_v35(s[0], s[1], s[2], s[3]); s[1] = __shfl(s[1], threadIdx.x + 1, 4); s[2] = __shfl(s[2], threadIdx.x + 2, 4); s[3] = __shfl(s[3], threadIdx.x + 3, 4); Gfunc_v35(s[0], s[1], s[2], s[3]); s[1] = __shfl(s[1], threadIdx.x + 3, 4); s[2] = __shfl(s[2], threadIdx.x + 2, 4); s[3] = __shfl(s[3], threadIdx.x + 1, 4); } __device__ __forceinline__ void reduceDuplexRowSetupV2(uint2 state[4],uint2* shared){ uint2 state1[Ncol][3], state0[Ncol][3], state2[3]; #pragma unroll 4 for (int i = 0; i < Ncol; i++) { #pragma unroll 3 for (int j = 0; j < 3; j++) state0[Ncol - i - 1][j] = state[j]; round_lyra_v35(state); } #pragma unroll 4 for (int i = 0; i < Ncol; i++) { #pragma unroll for (int j = 0; j < 3; j++) state[j] ^= state0[i][j]; round_lyra_v35(state); #pragma unroll for (int j = 0; j < 3; j++) state1[Ncol - i - 1][j] = state0[i][j]; #pragma unroll for (int j = 0; j < 3; j++) state1[Ncol - i - 1][j] ^= state[j]; } for (int i = 0; i < Ncol; i++){ const uint32_t s0 = memshift * Ncol * 0 + i * memshift; const uint32_t s2 = memshift * Ncol * 2 + memshift * (Ncol - 1) - i*memshift; #pragma unroll for (int j = 0; j < 3; j++) state[j] ^= state1[i][j] + state0[i][j]; round_lyra_v35(state); #pragma unroll for (int j = 0; j < 3; j++) state2[j] = state1[i][j]; #pragma unroll for (int j = 0; j < 3; j++) state2[j] ^= state[j]; #pragma unroll for (int j = 0; j < 3; j++) ST4S(s2 + j, state2[j],shared); //ˆêŒÂŽè‘O‚̃XƒŒƒbƒh‚©‚çƒf[ƒ^‚ð–á‚€(“¯Žž‚ɈêŒÂæ‚̃XƒŒƒbƒh‚Ƀf[ƒ^‚ð‘—‚é) uint2 Data0 = __shfl(state[0], threadIdx.x - 1, 4); uint2 Data1 = __shfl(state[1], threadIdx.x - 1, 4); uint2 Data2 = __shfl(state[2], threadIdx.x - 1, 4); if (threadIdx.x == 0) { state0[i][0] ^= Data2; state0[i][1] ^= Data0; state0[i][2] ^= Data1; } else { state0[i][0] ^= Data0; state0[i][1] ^= Data1; state0[i][2] ^= Data2; } #pragma unroll for (int j = 0; j < 3; j++) ST4S(s0 + j, state0[i][j],shared); #pragma unroll for (int j = 0; j < 3; j++) state0[i][j] = state2[j]; } // #pragma nounroll for (int i = 0; i < Ncol; i++) { const uint32_t s1 = memshift * Ncol * 1 + i*memshift; const uint32_t s3 = memshift * Ncol * 3 + memshift * (Ncol - 1) - i*memshift; #pragma unroll 3 for (int j = 0; j < 3; j++) state[j] ^= state1[i][j] + state0[Ncol - i - 1][j]; round_lyra_v35(state); #pragma unroll 3 for (int j = 0; j < 3; j++) state0[Ncol - i - 1][j] ^= state[j]; #pragma unroll 3 for (int j = 0; j < 3; j++) ST4S(s3 + j, state0[Ncol - i - 1][j],shared); //ˆêŒÂŽè‘O‚̃XƒŒƒbƒh‚©‚çƒf[ƒ^‚ð–á‚€(“¯Žž‚ɈêŒÂæ‚̃XƒŒƒbƒh‚Ƀf[ƒ^‚ð‘—‚é) uint2 Data0 = __shfl(state[0], threadIdx.x - 1, 4); uint2 Data1 = __shfl(state[1], threadIdx.x - 1, 4); uint2 Data2 = __shfl(state[2], threadIdx.x - 1, 4); if (threadIdx.x == 0) { state1[i][0] ^= Data2; state1[i][1] ^= Data0; state1[i][2] ^= Data1; } else { state1[i][0] ^= Data0; state1[i][1] ^= Data1; state1[i][2] ^= Data2; } #pragma unroll 3 for (int j = 0; j < 3; j++) ST4S(s1 + j, state1[i][j],shared); } } __device__ void reduceDuplexRowtV2(const int rowIn, const int rowInOut, const int rowOut, uint2 state[4],uint2 *shared) { uint2 state1[3]; uint32_t ps1 = memshift * Ncol * rowIn; uint32_t ps2 = memshift * Ncol * rowInOut; uint32_t ps3 = memshift * Ncol * rowOut; #pragma unroll 1 for (int i = 0; i < Ncol; i++){ uint32_t s1 = ps1 + i*memshift; uint32_t s2 = ps2 + i*memshift; uint32_t s3 = ps3 + i*memshift; #pragma unroll 3 for (int j = 0; j < 3; j++) state1[j] = LD4S(s2 + j,shared); #pragma unroll 3 for (int j = 0; j < 3; j++){ state[j] = state[j] ^ (state1[j] + LD4S(s1 + j,shared)); } round_lyra_v35(state); //���Ž��O�̃X���b�h�����f�[�^���Ⴄ(�����Ɉ����̃X���b�h�Ƀf�[�^�𑗂�) uint2 Data0 = __shfl(state[0], threadIdx.x - 1, 4); uint2 Data1 = __shfl(state[1], threadIdx.x - 1, 4); uint2 Data2 = __shfl(state[2], threadIdx.x - 1, 4); if (threadIdx.x != 0) { state1[0] ^= Data0; state1[1] ^= Data1; state1[2] ^= Data2; } else { state1[0] ^= Data2; state1[1] ^= Data0; state1[2] ^= Data1; } #pragma unroll 3 for (int j = 0; j < 3; j++) ST4S(s2 + j, state1[j],shared); #pragma unroll 3 for (int j = 0; j < 3; j++) state1[j] = LD4S(s3 + j, shared); #pragma unroll 3 for (int j = 0; j < 3; j++) ST4S(s3 + j, state1[j] ^ state[j],shared); } } __device__ void reduceDuplexRowtV2_4(const int rowInOut, uint2 state[4], uint2* shared) { int i, j; uint2 last[3],last2[3]; const uint32_t ps1 = memshift * Ncol * 2; const uint32_t ps2 = memshift * Ncol * rowInOut; #pragma unroll for (int j = 0; j < 3; j++) last[j] = LD4S(ps2 + j,shared); #pragma unroll for (int j = 0; j < 3; j++) state[j] ^= LD4S(ps1 + j,shared) + last[j]; round_lyra_v35(state); //���Ž��O�̃X���b�h�����f�[�^���Ⴄ(�����Ɉ����̃X���b�h�Ƀf�[�^�𑗂�) uint2 Data0 = __shfl(state[0], threadIdx.x - 1, 4); uint2 Data1 = __shfl(state[1], threadIdx.x - 1, 4); uint2 Data2 = __shfl(state[2], threadIdx.x - 1, 4); if (threadIdx.x == 0) { last[0] ^= Data2; last[1] ^= Data0; last[2] ^= Data1; } else { last[0] ^= Data0; last[1] ^= Data1; last[2] ^= Data2; } if (rowInOut == 3) { #pragma unroll for (j = 0; j < 3; j++) last[j] ^= state[j]; } #pragma unroll for (i = 1; i < Ncol; i++) { const uint32_t s1 = ps1 + i*memshift; const uint32_t s2 = ps2 + i*memshift; #pragma unroll for (j = 0; j < 3; j++) last2[j] = LD4S(s2 + j,shared); #pragma unroll for (j = 0; j < 3; j++) state[j] ^= LD4S(s1 + j,shared) + last2[j]; round_lyra_v35(state); } #pragma unroll for (int j = 0; j < 3; j++) state[j] ^= last[j]; } __constant__ const uint2x4 blake2b_IV[2] = { 0xf3bcc908lu, 0x6a09e667lu, 0x84caa73blu, 0xbb67ae85lu, 0xfe94f82blu, 0x3c6ef372lu, 0x5f1d36f1lu, 0xa54ff53alu, 0xade682d1lu, 0x510e527flu, 0x2b3e6c1flu, 0x9b05688clu, 0xfb41bd6blu, 0x1f83d9ablu, 0x137e2179lu, 0x5be0cd19lu }; __constant__ const uint2x4 Mask[2] = { 0x00000020lu, 0x00000000lu, 0x00000020lu, 0x00000000lu, 0x00000020lu, 0x00000000lu, 0x00000001lu, 0x00000000lu, 0x00000004lu, 0x00000000lu, 0x00000004lu, 0x00000000lu, 0x00000080lu, 0x00000000lu, 0x00000000lu, 0x01000000lu }; __global__ __launch_bounds__(TPB2) void lyra2v2_gpu_hash_32_1(uint32_t threads,uint2* DMatrix,const uint2* __restrict__ outputHash) { const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint2x4 state[4]; uint2x4 *DState = (uint2x4*)DMatrix; if (thread < threads) { state[0].x = state[1].x = __ldg(&outputHash[thread + threads * 0]); state[0].y = state[1].y = __ldg(&outputHash[thread + threads * 1]); state[0].z = state[1].z = __ldg(&outputHash[thread + threads * 2]); state[0].w = state[1].w = __ldg(&outputHash[thread + threads * 3]); state[2] = blake2b_IV[0]; state[3] = blake2b_IV[1]; #pragma unroll 1 for (int i = 0; i<12; i++) round_lyra_v35(state); state[0] ^= Mask[0]; state[1] ^= Mask[1]; for (int i = 0; i<12; i++) round_lyra_v35(state); DState[blockDim.x * gridDim.x * 0 + thread] = state[0]; DState[blockDim.x * gridDim.x * 1 + thread] = state[1]; DState[blockDim.x * gridDim.x * 2 + thread] = state[2]; DState[blockDim.x * gridDim.x * 3 + thread] = state[3]; } //thread } __global__ __launch_bounds__(TPB1) void lyra2v2_gpu_hash_32_2(uint32_t threads,uint2* DState) { const uint32_t thread = blockDim.y * blockIdx.x + threadIdx.y; __shared__ uint2 shared[48 * TPB1]; if (thread < threads) { uint2 state[4]; state[0] = ((uint2*)DState)[(0 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x]; state[1] = ((uint2*)DState)[(1 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x]; state[2] = ((uint2*)DState)[(2 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x]; state[3] = ((uint2*)DState)[(3 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x]; reduceDuplexRowSetupV2(state,shared); uint32_t rowa; int prev = 3; #pragma unroll 2 for (int i = 0; i < 3; i++) { rowa = __shfl(state[0].x, 0, 4) & 3; reduceDuplexRowtV2(prev, rowa, i, state,shared); prev = i; } rowa = __shfl(state[0].x, 0, 4) & 3; reduceDuplexRowtV2_4(rowa, state,shared); ((uint2*)DState)[(0 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[0]; ((uint2*)DState)[(1 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[1]; ((uint2*)DState)[(2 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[2]; ((uint2*)DState)[(3 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[3]; } //thread } __global__ __launch_bounds__(TPB2) void lyra2v2_gpu_hash_32_3(uint32_t threads,const uint2* __restrict__ DMatrix,uint2 *outputHash) { const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint2x4 state[4]; uint2x4 *DState = (uint2x4*)DMatrix; if (thread < threads) { state[0] = __ldg4(&DState[blockDim.x * gridDim.x * 0 + thread]); state[1] = __ldg4(&DState[blockDim.x * gridDim.x * 1 + thread]); state[2] = __ldg4(&DState[blockDim.x * gridDim.x * 2 + thread]); state[3] = __ldg4(&DState[blockDim.x * gridDim.x * 3 + thread]); #pragma unroll 1 for (int i = 0; i < 12; i++) round_lyra_v35(state); outputHash[thread + threads * 0] = state[0].x; outputHash[thread + threads * 1] = state[0].y; outputHash[thread + threads * 2] = state[0].z; outputHash[thread + threads * 3] = state[0].w; } //thread } __host__ void lyra2v2_cpu_hash_32(int thr_id, uint32_t threads,uint2* DMatrix, uint2 *d_outputHash) { dim3 grid1((threads * 4 + TPB1 - 1) / TPB1); dim3 block1(4, TPB1>>2); dim3 grid2((threads + TPB2 - 1) / TPB2); dim3 block2(TPB2); lyra2v2_gpu_hash_32_1 <<<grid2, block2 >>>(threads,DMatrix,d_outputHash); lyra2v2_gpu_hash_32_2 <<<grid1, block1 >>>(threads,DMatrix); lyra2v2_gpu_hash_32_3 <<<grid2, block2 >>>(threads,DMatrix,d_outputHash); }
the_stack
#include <functional> #include <algorithm> THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN const size_t NUM_SAMPLES = 10000; template <class InputVector, class OutputVector, class Operator, class ReferenceOperator> void TestUnaryFunctional(void) { typedef typename InputVector::value_type InputType; typedef typename OutputVector::value_type OutputType; thrust::host_vector<InputType> std_input = unittest::random_samples<InputType>(NUM_SAMPLES); thrust::host_vector<OutputType> std_output(NUM_SAMPLES); InputVector input = std_input; OutputVector output(NUM_SAMPLES); thrust::transform( input.begin(), input.end(), output.begin(), Operator()); thrust::transform(std_input.begin(), std_input.end(), std_output.begin(), ReferenceOperator()); ASSERT_EQUAL(output, std_output); } template <class InputVector, class OutputVector, class Operator, class ReferenceOperator> void TestBinaryFunctional(void) { typedef typename InputVector::value_type InputType; typedef typename OutputVector::value_type OutputType; thrust::host_vector<InputType> std_input1 = unittest::random_samples<InputType>(NUM_SAMPLES); thrust::host_vector<InputType> std_input2 = unittest::random_samples<InputType>(NUM_SAMPLES); thrust::host_vector<OutputType> std_output(NUM_SAMPLES); // Replace zeros to avoid divide by zero exceptions std::replace(std_input2.begin(), std_input2.end(), (InputType) 0, (InputType) 1); InputVector input1 = std_input1; InputVector input2 = std_input2; OutputVector output(NUM_SAMPLES); thrust::transform( input1.begin(), input1.end(), input2.begin(), output.begin(), Operator()); thrust::transform(std_input1.begin(), std_input1.end(), std_input2.begin(), std_output.begin(), ReferenceOperator()); // Note: FP division is not bit-equal, even when nvcc is invoked with --prec-div ASSERT_ALMOST_EQUAL(output, std_output); } // XXX add bool to list // Instantiate a macro for all integer-like data types #define INSTANTIATE_INTEGER_TYPES(Macro, vector_type, operator_name) \ Macro(vector_type, operator_name, unittest::int8_t ) \ Macro(vector_type, operator_name, unittest::uint8_t ) \ Macro(vector_type, operator_name, unittest::int16_t ) \ Macro(vector_type, operator_name, unittest::uint16_t) \ Macro(vector_type, operator_name, unittest::int32_t ) \ Macro(vector_type, operator_name, unittest::uint32_t) \ Macro(vector_type, operator_name, unittest::int64_t ) \ Macro(vector_type, operator_name, unittest::uint64_t) // Instantiate a macro for all integer and floating point data types #define INSTANTIATE_ALL_TYPES(Macro, vector_type, operator_name) \ INSTANTIATE_INTEGER_TYPES(Macro, vector_type, operator_name) \ Macro(vector_type, operator_name, float) // op(T) -> T #define INSTANTIATE_UNARY_ARITHMETIC_FUNCTIONAL_TEST(vector_type, operator_name, data_type) \ TestUnaryFunctional< thrust::vector_type<data_type>, \ thrust::vector_type<data_type>, \ thrust::operator_name<data_type>, \ std::operator_name<data_type> >(); // XXX revert OutputVector<T> back to bool // op(T) -> bool #define INSTANTIATE_UNARY_LOGICAL_FUNCTIONAL_TEST(vector_type, operator_name, data_type) \ TestUnaryFunctional< thrust::vector_type<data_type>, \ thrust::vector_type<data_type>, \ thrust::operator_name<data_type>, \ std::operator_name<data_type> >(); // op(T,T) -> T #define INSTANTIATE_BINARY_ARITHMETIC_FUNCTIONAL_TEST(vector_type, operator_name, data_type) \ TestBinaryFunctional< thrust::vector_type<data_type>, \ thrust::vector_type<data_type>, \ thrust::operator_name<data_type>, \ std::operator_name<data_type> >(); // XXX revert OutputVector<T> back to bool // op(T,T) -> bool #define INSTANTIATE_BINARY_LOGICAL_FUNCTIONAL_TEST(vector_type, operator_name, data_type) \ TestBinaryFunctional< thrust::vector_type<data_type>, \ thrust::vector_type<data_type>, \ thrust::operator_name<data_type>, \ std::operator_name<data_type> >(); // op(T) -> T #define DECLARE_UNARY_ARITHMETIC_FUNCTIONAL_UNITTEST(operator_name, OperatorName) \ void Test##OperatorName##FunctionalHost(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_UNARY_ARITHMETIC_FUNCTIONAL_TEST, host_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalHost); \ void Test##OperatorName##FunctionalDevice(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_UNARY_ARITHMETIC_FUNCTIONAL_TEST, device_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalDevice); // op(T) -> bool #define DECLARE_UNARY_LOGICAL_FUNCTIONAL_UNITTEST(operator_name, OperatorName) \ void Test##OperatorName##FunctionalHost(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_UNARY_LOGICAL_FUNCTIONAL_TEST, host_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalHost); \ void Test##OperatorName##FunctionalDevice(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_UNARY_LOGICAL_FUNCTIONAL_TEST, device_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalDevice); // op(T,T) -> T #define DECLARE_BINARY_ARITHMETIC_FUNCTIONAL_UNITTEST(operator_name, OperatorName) \ void Test##OperatorName##FunctionalHost(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_BINARY_ARITHMETIC_FUNCTIONAL_TEST, host_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalHost); \ void Test##OperatorName##FunctionalDevice(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_BINARY_ARITHMETIC_FUNCTIONAL_TEST, device_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalDevice); // op(T,T) -> T (for integer T only) #define DECLARE_BINARY_INTEGER_ARITHMETIC_FUNCTIONAL_UNITTEST(operator_name, OperatorName) \ void Test##OperatorName##FunctionalHost(void) \ { \ INSTANTIATE_INTEGER_TYPES( INSTANTIATE_BINARY_ARITHMETIC_FUNCTIONAL_TEST, host_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalHost); \ void Test##OperatorName##FunctionalDevice(void) \ { \ INSTANTIATE_INTEGER_TYPES( INSTANTIATE_BINARY_ARITHMETIC_FUNCTIONAL_TEST, device_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalDevice); // op(T,T) -> bool #define DECLARE_BINARY_LOGICAL_FUNCTIONAL_UNITTEST(operator_name, OperatorName) \ void Test##OperatorName##FunctionalHost(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_BINARY_LOGICAL_FUNCTIONAL_TEST, host_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalHost); \ void Test##OperatorName##FunctionalDevice(void) \ { \ INSTANTIATE_ALL_TYPES( INSTANTIATE_BINARY_LOGICAL_FUNCTIONAL_TEST, device_vector, operator_name); \ } \ DECLARE_UNITTEST(Test##OperatorName##FunctionalDevice); // Create the unit tests DECLARE_UNARY_ARITHMETIC_FUNCTIONAL_UNITTEST(negate, Negate); DECLARE_UNARY_LOGICAL_FUNCTIONAL_UNITTEST(logical_not, LogicalNot); // Ad-hoc testing for other functionals template <class Vector> void TestIdentityFunctional(void) { typedef typename Vector::value_type T; Vector input(3); input[0] = 0; input[1] = 1; input[2] = 2; Vector output(3); thrust::transform(input.begin(), input.end(), output.begin(), thrust::identity<T>()); ASSERT_EQUAL(input, output); } DECLARE_VECTOR_UNITTEST(TestIdentityFunctional); template <class Vector> void TestProject1stFunctional(void) { typedef typename Vector::value_type T; Vector lhs(3); Vector rhs(3); lhs[0] = 0; rhs[0] = 3; lhs[1] = 1; rhs[1] = 4; lhs[2] = 2; rhs[2] = 5; Vector output(3); thrust::transform(lhs.begin(), lhs.end(), rhs.begin(), output.begin(), thrust::project1st<T,T>()); ASSERT_EQUAL(output, lhs); } DECLARE_VECTOR_UNITTEST(TestProject1stFunctional); template <class Vector> void TestProject2ndFunctional(void) { typedef typename Vector::value_type T; Vector lhs(3); Vector rhs(3); lhs[0] = 0; rhs[0] = 3; lhs[1] = 1; rhs[1] = 4; lhs[2] = 2; rhs[2] = 5; Vector output(3); thrust::transform(lhs.begin(), lhs.end(), rhs.begin(), output.begin(), thrust::project2nd<T,T>()); ASSERT_EQUAL(output, rhs); } DECLARE_VECTOR_UNITTEST(TestProject2ndFunctional); template <class Vector> void TestMaximumFunctional(void) { typedef typename Vector::value_type T; Vector input1(3); Vector input2(3); input1[0] = 8; input1[1] = 3; input1[2] = 7; input2[0] = 5; input2[1] = 6; input2[2] = 9; Vector output(3); thrust::transform(input1.begin(), input1.end(), input2.begin(), output.begin(), thrust::maximum<T>()); ASSERT_EQUAL(output[0], 8); ASSERT_EQUAL(output[1], 6); ASSERT_EQUAL(output[2], 9); } DECLARE_VECTOR_UNITTEST(TestMaximumFunctional); template <class Vector> void TestMinimumFunctional(void) { typedef typename Vector::value_type T; Vector input1(3); Vector input2(3); input1[0] = 8; input1[1] = 3; input1[2] = 7; input2[0] = 5; input2[1] = 6; input2[2] = 9; Vector output(3); thrust::transform(input1.begin(), input1.end(), input2.begin(), output.begin(), thrust::minimum<T>()); ASSERT_EQUAL(output[0], 5); ASSERT_EQUAL(output[1], 3); ASSERT_EQUAL(output[2], 7); } DECLARE_VECTOR_UNITTEST(TestMinimumFunctional); template <class Vector> void TestNot1(void) { typedef typename Vector::value_type T; Vector input(5); input[0] = 1; input[1] = 0; input[2] = 1; input[3] = 1; input[4] = 0; Vector output(5); thrust::transform(input.begin(), input.end(), output.begin(), thrust::not1(thrust::identity<T>())); ASSERT_EQUAL(output[0], 0); ASSERT_EQUAL(output[1], 1); ASSERT_EQUAL(output[2], 0); ASSERT_EQUAL(output[3], 0); ASSERT_EQUAL(output[4], 1); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestNot1); // GCC 11 fails to build this test case with a spurious error in a // very specific scenario: // - GCC 11 // - CPP system for both host and device // - C++11 dialect #if !(defined(THRUST_GCC_VERSION) && \ THRUST_GCC_VERSION >= 110000 && \ THRUST_GCC_VERSION < 120000 && \ THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_CPP && \ THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CPP && \ THRUST_CPP_DIALECT == 2011) template <class Vector> void TestNot2(void) { typedef typename Vector::value_type T; Vector input1(5); Vector input2(5); input1[0] = 1; input1[1] = 0; input1[2] = 1; input1[3] = 1; input1[4] = 0; input2[0] = 1; input2[1] = 1; input2[2] = 0; input2[3] = 1; input2[4] = 1; Vector output(5); thrust::transform(input1.begin(), input1.end(), input2.begin(), output.begin(), thrust::not2(thrust::equal_to<T>())); ASSERT_EQUAL(output[0], 0); ASSERT_EQUAL(output[1], 1); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 0); ASSERT_EQUAL(output[4], 1); } DECLARE_VECTOR_UNITTEST(TestNot2); #endif // Weird GCC11 failure case THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
the_stack
#include <cuda.h> #include <limits.h> #include "THCHalf.h" /// Class for numeric limits of the particular data type, which /// includes support for `half`. /// Unfortunately since `half` does not have a constructor, these have /// to be expressed as functions (either that or non-const statics). template <typename T> struct THCNumerics { }; template <> struct THCNumerics<unsigned char> { static inline __host__ __device__ unsigned char min() { return 0; } static inline __host__ __device__ unsigned char max() { return UCHAR_MAX; } static inline __host__ __device__ bool lt(unsigned char a, unsigned char b) { return a < b; } static inline __host__ __device__ bool le(unsigned char a, unsigned char b) { return a <= b; } static inline __host__ __device__ bool gt(unsigned char a, unsigned char b) { return a > b; } static inline __host__ __device__ bool ge(unsigned char a, unsigned char b) { return a >= b; } static inline __host__ __device__ bool eq(unsigned char a, unsigned char b) { return a == b; } static inline __host__ __device__ bool ne(unsigned char a, unsigned char b) { return a != b; } static inline __host__ __device__ unsigned char add(unsigned char a, unsigned char b) { return a + b; } static inline __host__ __device__ unsigned char mul(unsigned char a, unsigned char b) { return a * b; } static inline __host__ __device__ unsigned char sub(unsigned char a, unsigned char b) { return a - b; } static inline __host__ __device__ unsigned char div(unsigned char a, unsigned char b) { return a / b; } static inline __host__ __device__ unsigned char abs(unsigned char a) { return abs(a); } }; template <> struct THCNumerics<char> { static inline __host__ __device__ char min() { return CHAR_MIN; } static inline __host__ __device__ char max() { return CHAR_MAX; } static inline __host__ __device__ bool lt(char a, char b) { return a < b; } static inline __host__ __device__ bool le(char a, char b) { return a <= b; } static inline __host__ __device__ bool gt(char a, char b) { return a > b; } static inline __host__ __device__ bool ge(char a, char b) { return a >= b; } static inline __host__ __device__ bool eq(char a, char b) { return a == b; } static inline __host__ __device__ bool ne(char a, char b) { return a != b; } static inline __host__ __device__ char neg(char a) { return -a; } static inline __host__ __device__ char add(char a, char b) { return a + b; } static inline __host__ __device__ char mul(char a, char b) { return a * b; } static inline __host__ __device__ char sub(char a, char b) { return a - b; } static inline __host__ __device__ char div(char a, char b) { return a / b; } static inline __host__ __device__ char abs(char a) { return ::abs((int)a); } }; template <> struct THCNumerics<short> { static inline __host__ __device__ short min() { return SHRT_MIN; } static inline __host__ __device__ short max() { return SHRT_MAX; } static inline __host__ __device__ bool lt(short a, short b) { return a < b; } static inline __host__ __device__ bool le(short a, short b) { return a <= b; } static inline __host__ __device__ bool gt(short a, short b) { return a > b; } static inline __host__ __device__ bool ge(short a, short b) { return a >= b; } static inline __host__ __device__ bool eq(short a, short b) { return a == b; } static inline __host__ __device__ bool ne(short a, short b) { return a != b; } static inline __host__ __device__ short neg(short a) { return -a; } static inline __host__ __device__ short add(short a, short b) { return a + b; } static inline __host__ __device__ short mul(short a, short b) { return a * b; } static inline __host__ __device__ short sub(short a, short b) { return a - b; } static inline __host__ __device__ short div(short a, short b) { return a / b; } static inline __host__ __device__ short abs(short a) { return ::abs((int)a); } }; template <> struct THCNumerics<int> { static inline __host__ __device__ int min() { return INT_MIN; } static inline __host__ __device__ int max() { return INT_MAX; } static inline __host__ __device__ bool lt(int a, int b) { return a < b; } static inline __host__ __device__ bool le(int a, int b) { return a <= b; } static inline __host__ __device__ bool gt(int a, int b) { return a > b; } static inline __host__ __device__ bool ge(int a, int b) { return a >= b; } static inline __host__ __device__ bool eq(int a, int b) { return a == b; } static inline __host__ __device__ bool ne(int a, int b) { return a != b; } static inline __host__ __device__ int neg(int a) { return -a; } static inline __host__ __device__ int add(int a, int b) { return a + b; } static inline __host__ __device__ int mul(int a, int b) { return a * b; } static inline __host__ __device__ int sub(int a, int b) { return a - b; } static inline __host__ __device__ int div(int a, int b) { return a / b; } static inline __host__ __device__ int abs(int a) { return ::abs(a); } }; template <> struct THCNumerics<long> { static inline __host__ __device__ long min() { return LONG_MIN; } static inline __host__ __device__ long max() { return LONG_MAX; } static inline __host__ __device__ bool lt(long a, long b) { return a < b; } static inline __host__ __device__ bool le(long a, long b) { return a <= b; } static inline __host__ __device__ bool gt(long a, long b) { return a > b; } static inline __host__ __device__ bool ge(long a, long b) { return a >= b; } static inline __host__ __device__ bool eq(long a, long b) { return a == b; } static inline __host__ __device__ bool ne(long a, long b) { return a != b; } static inline __host__ __device__ long neg(long a) { return -a; } static inline __host__ __device__ long add(long a, long b) { return a + b; } static inline __host__ __device__ long mul(long a, long b) { return a * b; } static inline __host__ __device__ long sub(long a, long b) { return a - b; } static inline __host__ __device__ long div(long a, long b) { return a / b; }; static inline __host__ __device__ long abs(long a) { return labs(a); } }; #ifdef CUDA_HALF_TENSOR template <> struct THCNumerics<half> { #if CUDA_VERSION < 9000 static inline __host__ __device__ half min() { half h; h.x = 0xfbff; return h; } static inline __host__ __device__ half max() { half h; h.x = 0x7bff; return h; } #else static inline __host__ __device__ half min() { __half_raw h; h.x = 0xfbff; return h; } static inline __host__ __device__ half max() { __half_raw h; h.x = 0x7bff; return h; } #endif static inline __host__ __device__ bool lt(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hlt(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return fa < fb; #endif #else // __CUDA_ARCH__ return THC_half2float(a) < THC_half2float(b); #endif } static inline __host__ __device__ bool le(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hle(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return fa <= fb; #endif #else // __CUDA_ARCH__ return THC_half2float(a) <= THC_half2float(b); #endif } static inline __host__ __device__ bool gt(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hgt(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return fa > fb; #endif #else // __CUDA_ARCH__ return THC_half2float(a) > THC_half2float(b); #endif } static inline __host__ __device__ bool ge(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hge(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return fa >= fb; #endif #else // __CUDA_ARCH__ return THC_half2float(a) >= THC_half2float(b); #endif } static inline __host__ __device__ bool eq(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __heq(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return fa == fb; #endif #else // __CUDA_ARCH__ return THC_half2float(a) == THC_half2float(b); #endif } static inline __host__ __device__ bool ne(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hne(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return fa != fb; #endif #else // __CUDA_ARCH__ return THC_half2float(a) != THC_half2float(b); #endif } static inline __host__ __device__ half exp(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hexp(a); #else float fa = __half2float(a); return __float2half(expf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(expf(THC_half2float(a))); #endif } static inline __host__ __device__ half exp10(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hexp10(a); #else float fa = __half2float(a); return __float2half(exp10f(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(exp10f(THC_half2float(a))); #endif } static inline __host__ __device__ half log(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hlog(a); #else float fa = __half2float(a); return __float2half(logf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(logf(THC_half2float(a))); #endif } static inline __host__ __device__ half log1p(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(log1pf(fa)); #else // __CUDA_ARCH__ return THC_float2half(log1pf(THC_half2float(a))); #endif } static inline __host__ __device__ half lgamma(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(lgammaf(fa)); #else // __CUDA_ARCH__ return THC_float2half(lgammaf(THC_half2float(a))); #endif } static inline __host__ __device__ half cos(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hcos(a); #else float fa = __half2float(a); return __float2half(cosf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(cosf(THC_half2float(a))); #endif } static inline __host__ __device__ half sin(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hsin(a); #else float fa = __half2float(a); return __float2half(sinf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(sinf(THC_half2float(a))); #endif } static inline __host__ __device__ half sqrt(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hsqrt(a); #else float fa = __half2float(a); return __float2half(sqrtf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(sqrtf(THC_half2float(a))); #endif } static inline __host__ __device__ half rsqrt(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hrsqrt(a); #else float fa = __half2float(a); return __float2half(rsqrtf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(rsqrtf(THC_half2float(a))); #endif } static inline __host__ __device__ half ceil(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hceil(a); #else float fa = __half2float(a); return __float2half(ceilf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(ceilf(THC_half2float(a))); #endif } static inline __host__ __device__ half floor(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return hfloor(a); #else float fa = __half2float(a); return __float2half(floorf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(floorf(THC_half2float(a))); #endif } static inline __host__ __device__ half trunc(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return htrunc(a); #else float fa = __half2float(a); return __float2half(truncf(fa)); #endif #else // __CUDA_ARCH__ return THC_float2half(truncf(THC_half2float(a))); #endif } static inline __host__ __device__ half neg(half a) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hneg(a); #else float fa = __half2float(a); return __float2half(-fa); #endif #else // __CUDA_ARCH__ return THC_float2half(-(THC_half2float(a))); #endif } static inline __host__ __device__ half acos(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(acosf(fa)); #else // __CUDA_ARCH__ return THC_float2half(acosf(THC_half2float(a))); #endif } static inline __host__ __device__ half cosh(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(coshf(fa)); #else // __CUDA_ARCH__ return THC_float2half(coshf(THC_half2float(a))); #endif } static inline __host__ __device__ half asin(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(asinf(fa)); #else // __CUDA_ARCH__ return THC_float2half(asinf(THC_half2float(a))); #endif } static inline __host__ __device__ half sinh(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(sinhf(fa)); #else // __CUDA_ARCH__ return THC_float2half(sinhf(THC_half2float(a))); #endif } static inline __host__ __device__ half tan(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(tanf(fa)); #else // __CUDA_ARCH__ return THC_float2half(tanf(THC_half2float(a))); #endif } static inline __host__ __device__ half atan(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(atanf(fa)); #else // __CUDA_ARCH__ return THC_float2half(atanf(THC_half2float(a))); #endif } static inline __host__ __device__ half tanh(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(tanhf(fa)); #else // __CUDA_ARCH__ return THC_float2half(tanhf(THC_half2float(a))); #endif } static inline __host__ __device__ half abs(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(fabs(fa)); #else // __CUDA_ARCH__ return THC_float2half(fabs(THC_half2float(a))); #endif } static inline __host__ __device__ half round(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(roundf(fa)); #else // __CUDA_ARCH__ return THC_float2half(roundf(THC_half2float(a))); #endif } static inline __host__ __device__ half frac(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(fa - truncf(fa)); #else // __CUDA_ARCH__ float fa = THC_half2float(a); return THC_float2half(fa - floorf(fa)); #endif } static inline __host__ __device__ half cinv(half a) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); return __float2half(1.0f / fa); #else // __CUDA_ARCH__ return THC_float2half(1.0f / THC_half2float(a)); #endif } static inline __host__ __device__ half add(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hadd(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return __float2half( fa + fb ); #endif #else // __CUDA_ARCH__ return THC_float2half(THC_half2float(a) + THC_half2float(b)); #endif } static inline __host__ __device__ half div(half a, half b) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); float fb = __half2float(b); return __float2half( fa / fb ); #else // __CUDA_ARCH__ return THC_float2half(THC_half2float(a) / THC_half2float(b)); #endif } static inline __host__ __device__ half mul(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hmul(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return __float2half( fa * fb ); #endif #else // __CUDA_ARCH__ return THC_float2half(THC_half2float(a) * THC_half2float(b)); #endif } static inline __host__ __device__ half sub(half a, half b) { #ifdef __CUDA_ARCH__ #ifdef CUDA_HALF_INSTRUCTIONS return __hsub(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return __float2half( fa - fb ); #endif #else // __CUDA_ARCH__ return THC_float2half(THC_half2float(a) - THC_half2float(b)); #endif } static inline __host__ __device__ half pow(half a, half b) { #ifdef __CUDA_ARCH__ float fa = __half2float(a); float fb = __half2float(b); return __float2half(powf(fa, fb)); #else // __CUDA_ARCH__ return THC_float2half(powf(THC_half2float(a), THC_half2float(b))); #endif } }; #endif template <> struct THCNumerics<float> { static inline __host__ __device__ float min() { return -FLT_MAX; } static inline __host__ __device__ float max() { return FLT_MAX; } static inline __host__ __device__ bool lt(float a, float b) { return a < b; } static inline __host__ __device__ bool le(float a, float b) { return a <= b; } static inline __host__ __device__ bool gt(float a, float b) { return a > b; } static inline __host__ __device__ bool ge(float a, float b) { return a >= b; } static inline __host__ __device__ bool eq(float a, float b) { return a == b; } static inline __host__ __device__ bool ne(float a, float b) { return a != b; } static inline __host__ __device__ float lgamma(float a) { return lgammaf(a);} static inline __host__ __device__ float exp (float a) { return expf(a); } static inline __host__ __device__ float exp10(float a) { return exp10f(a); } static inline __host__ __device__ float log (float a) { return logf(a); } static inline __host__ __device__ float log1p(float a) { return log1pf(a); } static inline __host__ __device__ float cos (float a) { return cosf(a); } static inline __host__ __device__ float sin (float a) { return sinf(a); } static inline __host__ __device__ float sqrt (float a) { return sqrtf(a); } static inline __host__ __device__ float rsqrt(float a) { return rsqrtf(a); } static inline __host__ __device__ float ceil (float a) { return ceilf(a); } static inline __host__ __device__ float floor(float a) { return floorf(a); } static inline __host__ __device__ float trunc(float a) { return truncf(a); } static inline __host__ __device__ float neg (float a) { return -a; } static inline __host__ __device__ float acos (float a) { return acosf(a); } static inline __host__ __device__ float cosh (float a) { return coshf(a); } static inline __host__ __device__ float acosh(float a) { return acoshf(a); } static inline __host__ __device__ float asin (float a) { return asinf(a); } static inline __host__ __device__ float sinh (float a) { return sinhf(a); } static inline __host__ __device__ float asinh(float a) { return asinhf(a); } static inline __host__ __device__ float tan (float a) { return tanf(a); } static inline __host__ __device__ float atan (float a) { return atanf(a); } static inline __host__ __device__ float tanh (float a) { return tanhf(a); } static inline __host__ __device__ float abs (float a) { return fabs(a); } static inline __host__ __device__ float round(float a) { return roundf(a); } static inline __host__ __device__ float frac (float a) { return a - truncf(a); } static inline __host__ __device__ float cinv (float a) { return 1.0f / a; } static inline __host__ __device__ float add (float a, float b) { return a + b; } static inline __host__ __device__ float div (float a, float b) { return a / b; } static inline __host__ __device__ float mul (float a, float b) { return a * b; } static inline __host__ __device__ float sub (float a, float b) { return a - b; } static inline __host__ __device__ float pow (float a, float b) { return powf(a, b); } }; template <> struct THCNumerics<double> { static inline __host__ __device__ double min() { return -DBL_MAX; } static inline __host__ __device__ double max() { return DBL_MAX; } static inline __host__ __device__ bool lt(double a, double b) { return a < b; } static inline __host__ __device__ bool le(double a, double b) { return a <= b; } static inline __host__ __device__ bool gt(double a, double b) { return a > b; } static inline __host__ __device__ bool ge(double a, double b) { return a >= b; } static inline __host__ __device__ bool eq(double a, double b) { return a == b; } static inline __host__ __device__ bool ne(double a, double b) { return a != b; } static inline __host__ __device__ double lgamma(double a) { return ::lgamma(a);} static inline __host__ __device__ double exp (double a) { return ::exp(a); } static inline __host__ __device__ double exp10(double a) { return ::exp10(a); } static inline __host__ __device__ double log (double a) { return ::log(a); } static inline __host__ __device__ double log1p(double a) { return ::log1p(a); } static inline __host__ __device__ double cos (double a) { return ::cos(a); } static inline __host__ __device__ double sin (double a) { return ::sin(a); } static inline __host__ __device__ double sqrt (double a) { return ::sqrt(a); } static inline __host__ __device__ double rsqrt(double a) { return ::rsqrt(a); } static inline __host__ __device__ double ceil (double a) { return ::ceil(a); } static inline __host__ __device__ double floor(double a) { return ::floor(a); } static inline __host__ __device__ double trunc(double a) { return ::trunc(a); } static inline __host__ __device__ double neg (double a) { return -a; } static inline __host__ __device__ double acos (double a) { return ::acos(a); } static inline __host__ __device__ double cosh (double a) { return ::cosh(a); } static inline __host__ __device__ double acosh(double a) { return ::acosh(a); } static inline __host__ __device__ double asin (double a) { return ::asin(a); } static inline __host__ __device__ double sinh (double a) { return ::sinh(a); } static inline __host__ __device__ double asinh(double a) { return ::asinh(a); } static inline __host__ __device__ double tan (double a) { return ::tan(a); } static inline __host__ __device__ double atan (double a) { return ::atan(a); } static inline __host__ __device__ double tanh (double a) { return ::tanh(a); } static inline __host__ __device__ double abs (double a) { return ::abs(a); } static inline __host__ __device__ double round(double a) { return ::round(a); } static inline __host__ __device__ double frac (double a) { return a - ::trunc(a); } static inline __host__ __device__ double cinv (double a) { return 1.0 / a; } static inline __host__ __device__ double add (double a, double b) { return a + b; } static inline __host__ __device__ double div (double a, double b) { return a / b; } static inline __host__ __device__ double mul (double a, double b) { return a * b; } static inline __host__ __device__ double sub (double a, double b) { return a - b; } static inline __host__ __device__ double pow (double a, double b) { return ::pow(a, b); } }; /// `half` has some type conversion issues associated with it, since it /// is a struct without a constructor/implicit conversion constructor. /// We use this to convert scalar values to the given type that the /// tensor expects. template <typename In, typename Out> struct ScalarConvert { static __host__ __device__ Out to(const In v) { return (Out) v; } }; #ifdef CUDA_HALF_TENSOR template <typename Out> struct ScalarConvert<half, Out> { static __host__ __device__ Out to(const half v) { #ifdef __CUDA_ARCH__ return (Out) __half2float(v); #else return (Out) THC_half2float(v); #endif } }; template <typename In> struct ScalarConvert<In, half> { static __host__ __device__ half to(const In v) { #ifdef __CUDA_ARCH__ return __float2half((float) v); #else return THC_float2half((float) v); #endif } }; template <> struct ScalarConvert<half, half> { static __host__ __device__ half to(const half v) { return v; } }; #endif #endif // THC_NUMERICS_INC
the_stack
#include "flowfilter/gpu/util.h" #include "flowfilter/gpu/error.h" #include "flowfilter/gpu/update.h" #include "flowfilter/gpu/device/update_k.h" namespace flowfilter { namespace gpu { FlowUpdate::FlowUpdate() : Stage() { __configured = false; __inputFlowSet = false; __inputImageSet = false; __inputImageGradientSet = false; __gamma = 1.0; __maxflow = 1.0; } FlowUpdate::FlowUpdate(GPUImage inputFlow, GPUImage inputImage, GPUImage inputImageGradient, const float gamma, const float maxflow) : Stage() { __configured = false; __inputFlowSet = false; __inputImageSet = false; __inputImageGradientSet = false; setGamma(gamma); setMaxFlow(maxflow); setInputFlow(inputFlow); setInputImage(inputImage); setInputImageGradient(inputImageGradient); configure(); } FlowUpdate::~FlowUpdate() { // nothing to do... } void FlowUpdate::configure() { if(!__inputFlowSet) { std::cerr << "ERROR: FlowUpdate::configure(): input flow not set" << std::endl; throw std::exception(); } if(!__inputImageSet) { std::cerr << "ERROR: FlowUpdate::configure(): input image not set" << std::endl; throw std::exception(); } if(!__inputImageGradientSet) { std::cerr << "ERROR: FlowUpdate::configure(): input image gradient not set" << std::endl; throw std::exception(); } int height = __inputFlow.height(); int width = __inputFlow.width(); // verify that height and width of inputs are all the same if(height != __inputImage.height() || height != __inputImageGradient.height() || width != __inputImage.width() || width != __inputImageGradient.width()) { std::cerr << "ERROR: FlowUpdate::configure(): input buffers do not match height and width" << std::endl; throw std::exception(); } __flowUpdated = GPUImage(height, width, 2, sizeof(float)); __imageUpdated = GPUImage(height, width, 1, sizeof(float)); // configure block and grid sizes __block = dim3(32, 32, 1); configureKernelGrid(height, width, __block, __grid); __configured = true; } void FlowUpdate::compute() { startTiming(); if(!__configured) { std::cerr << "ERROR: FlowUpdate::compute() stage not configured." << std::endl; exit(-1); } flowUpdate_k<<<__grid, __block, 0, __stream>>>( __inputImage.wrap<float>(), __inputImageGradient.wrap<float2>(), __imageUpdated.wrap<float>(), __inputFlow.wrap<float2>(), __imageUpdated.wrap<float>(), __flowUpdated.wrap<float2>(), __gamma, __maxflow); stopTiming(); } float FlowUpdate::getGamma() const { return __gamma; } void FlowUpdate::setGamma(const float gamma) { if(gamma <= 0) { std::cerr << "ERROR: FlowUpdate::setGamma(): gamma should be greater than zero: " << gamma << std::endl; throw std::exception(); } __gamma = gamma; } float FlowUpdate::getMaxFlow() const { return __maxflow; } void FlowUpdate::setMaxFlow(const float maxflow) { __maxflow = maxflow; } void FlowUpdate::setInputFlow(GPUImage inputFlow) { if(inputFlow.depth() != 2) { std::cerr << "ERROR: FlowUpdate::setInputFlow(): input flow should have depth 2: " << inputFlow.depth() << std::endl; throw std::exception(); } if(inputFlow.itemSize() != 4) { std::cerr << "ERROR: FlowUpdate::setInputFlow(): input flow should have item size 4: " << inputFlow.itemSize() << std::endl; throw std::exception(); } __inputFlow = inputFlow; __inputFlowSet = true; } void FlowUpdate::setInputImage(GPUImage image) { if(image.depth() != 1) { std::cerr << "ERROR: FlowUpdate::setInputImage(): input image should have depth 1: " << image.depth() << std::endl; throw std::exception(); } if(image.itemSize() != sizeof(float)) { std::cerr << "ERROR: FlowUpdate::setInputImage(): input image should have item size 4: " << image.itemSize() << std::endl; throw std::exception(); } __inputImage = image; __inputImageSet = true; } void FlowUpdate::setInputImageGradient(GPUImage imageGradient) { if(imageGradient.depth() != 2) { std::cerr << "ERROR: FlowUpdate::setInputImageGradient(): input image gradient should have depth 2: " << imageGradient.depth() << std::endl; throw std::exception(); } if(imageGradient.itemSize() != sizeof(float)) { std::cerr << "ERROR: FlowUpdate::setInputImageGradient(): input image gradient should have item size 4: " << imageGradient.itemSize() << std::endl; throw std::exception(); } __inputImageGradient = imageGradient; __inputImageGradientSet = true; } GPUImage FlowUpdate::getUpdatedFlow() { return __flowUpdated; } GPUImage FlowUpdate::getUpdatedImage() { return __imageUpdated; } //############################################### // DeltaFlowUpdate //############################################### DeltaFlowUpdate::DeltaFlowUpdate() : Stage() { __gamma = 1.0; __maxflow = 1.0; __configured = false; __inputDeltaFlowSet = false; __inputImageOldSet = false; __inputFlowSet = false; __inputImageSet = false; __inputImageGradientSet = false; } DeltaFlowUpdate::DeltaFlowUpdate(GPUImage inputFlow, GPUImage inputDeltaFlow, GPUImage inputImageOld, GPUImage inputImage, GPUImage inputImageGradient, const float gamma, const float maxflow) : Stage() { __configured = false; __inputDeltaFlowSet = false; __inputImageOldSet = false; __inputFlowSet = false; __inputImageSet = false; __inputImageGradientSet = false; setGamma(gamma); setMaxFlow(maxflow); setInputDeltaFlow(inputDeltaFlow); setInputFlow(inputFlow); setInputImageOld(inputImageOld); setInputImage(inputImage); setInputImageGradient(inputImageGradient); configure(); } DeltaFlowUpdate::~DeltaFlowUpdate() { // nothing to do } void DeltaFlowUpdate::configure() { if(!__inputFlowSet) { std::cerr << "ERROR: DeltaFlowUpdate::configure(): input flow not set" << std::endl; throw std::exception(); } if(!__inputDeltaFlowSet) { std::cerr << "ERROR: DeltaFlowUpdate::configure(): input delta flow not set" << std::endl; throw std::exception(); } if(!__inputImageOldSet) { std::cerr << "ERROR: DeltaFlowUpdate::configure(): input image prior not set" << std::endl; throw std::exception(); } if(!__inputImageSet) { std::cerr << "ERROR: DeltaFlowUpdate::configure(): input image not set" << std::endl; throw std::exception(); } if(!__inputImageGradientSet) { std::cerr << "ERROR: DeltaFlowUpdate::configure(): input image gradient not set" << std::endl; throw std::exception(); } int height = __inputDeltaFlow.height(); int width = __inputDeltaFlow.width(); // verify that height and width of inputs are all the same if( height != __inputImage.height() || width != __inputImage.width() || height != __inputImageGradient.height() || width != __inputImageGradient.width() || height != __inputImageOld.height() || width != __inputImageOld.width()) { std::cerr << "ERROR: DeltaFlowUpdate::configure(): input buffers do not match height and width" << std::endl; throw std::exception(); } // configure texture for reading inputFlow // using normalized texture coordinates and linear interpolation __inputFlowTexture = GPUTexture(__inputFlow, cudaChannelFormatKindFloat, cudaAddressModeClamp, cudaFilterModePoint, cudaReadModeElementType, true); // outputs __flowUpdated = GPUImage(height, width, 2, sizeof(float)); __deltaFlowUpdated = GPUImage(height, width, 2, sizeof(float)); __imageUpdated = GPUImage(height, width, 1, sizeof(float)); // configure block and grid sizes __block = dim3(32, 32, 1); configureKernelGrid(height, width, __block, __grid); __configured = true; } void DeltaFlowUpdate::compute() { startTiming(); if(!__configured) { std::cerr << "ERROR: DeltaFlowUpdate::compute() stage not configured." << std::endl; exit(-1); } deltaFlowUpdate_k<<<__grid, __block, 0, __stream>>> ( __inputImage.wrap<float>(), __inputImageGradient.wrap<float2>(), __inputImageOld.wrap<float>(), __inputDeltaFlow.wrap<float2>(), __inputFlowTexture.getTextureObject(), __imageUpdated.wrap<float>(), __deltaFlowUpdated.wrap<float2>(), __flowUpdated.wrap<float2>(), __gamma, __maxflow); stopTiming(); } float DeltaFlowUpdate::getGamma() const { return __gamma; } void DeltaFlowUpdate::setGamma(const float gamma) { if(gamma <= 0) { std::cerr << "ERROR: DeltaFlowUpdate::setGamma(): gamma should be greater than zero: " << gamma << std::endl; throw std::exception(); } __gamma = gamma; } float DeltaFlowUpdate::getMaxFlow() const { return __maxflow; } void DeltaFlowUpdate::setMaxFlow(const float maxflow) { __maxflow = maxflow; } void DeltaFlowUpdate::setInputFlow(GPUImage inputFlow) { if(inputFlow.depth() != 2) { std::cerr << "ERROR: DeltaFlowUpdate::setInputFlow(): input flow should have depth 2: " << inputFlow.depth() << std::endl; throw std::exception(); } if(inputFlow.itemSize() != 4) { std::cerr << "ERROR: DeltaFlowUpdate::setInputFlow(): input flow should have item size 4: " << inputFlow.itemSize() << std::endl; throw std::exception(); } __inputFlow = inputFlow; __inputFlowSet = true; } void DeltaFlowUpdate::setInputDeltaFlow(GPUImage inputDeltaFlow) { if(inputDeltaFlow.depth() != 2) { std::cerr << "ERROR: DeltaFlowUpdate::setInputDeltaFlow(): input delta flow should have depth 2: " << inputDeltaFlow.depth() << std::endl; throw std::exception(); } if(inputDeltaFlow.itemSize() != 4) { std::cerr << "ERROR: DeltaFlowUpdate::setInputFlow(): input flow should have item size 4: " << inputDeltaFlow.itemSize() << std::endl; throw std::exception(); } __inputDeltaFlow = inputDeltaFlow; __inputDeltaFlowSet = true; } void DeltaFlowUpdate::setInputImageOld(GPUImage image) { if(image.depth() != 1) { std::cerr << "ERROR: DeltaFlowUpdate::setInputImageOld(): input image should have depth 1: " << image.depth() << std::endl; throw std::exception(); } if(image.itemSize() != sizeof(float)) { std::cerr << "ERROR: DeltaFlowUpdate::setInputImageOld(): input image should have item size 4: " << image.itemSize() << std::endl; throw std::exception(); } __inputImageOld = image; __inputImageOldSet = true; } void DeltaFlowUpdate::setInputImage(GPUImage image) { if(image.depth() != 1) { std::cerr << "ERROR: DeltaFlowUpdate::setInputImage(): input image should have depth 1: " << image.depth() << std::endl; throw std::exception(); } if(image.itemSize() != sizeof(float)) { std::cerr << "ERROR: DeltaFlowUpdate::setInputImage(): input image should have item size 4: " << image.itemSize() << std::endl; throw std::exception(); } __inputImage = image; __inputImageSet = true; } void DeltaFlowUpdate::setInputImageGradient(GPUImage imageGradient) { if(imageGradient.depth() != 2) { std::cerr << "ERROR: DeltaFlowUpdate::setInputImageGradient(): input image gradient should have depth 2: " << imageGradient.depth() << std::endl; throw std::exception(); } if(imageGradient.itemSize() != sizeof(float)) { std::cerr << "ERROR: DeltaFlowUpdate::setInputImageGradient(): input image gradient should have item size 4: " << imageGradient.itemSize() << std::endl; throw std::exception(); } __inputImageGradient = imageGradient; __inputImageGradientSet = true; } GPUImage DeltaFlowUpdate::getUpdatedFlow() { return __flowUpdated; } GPUImage DeltaFlowUpdate::getUpdatedDeltaFlow() { return __deltaFlowUpdated; } GPUImage DeltaFlowUpdate::getUpdatedImage() { return __imageUpdated; } }; // namespace gpu }; // namespace flowfilter
the_stack
* \file * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within global memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/device_reduce_dispatch.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within global memory. ![](reduce_logo.png) * \ingroup DeviceModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>) * uses a binary combining operator to compute a single aggregate from a sequence of input elements. * * \par Usage Considerations * \cdp_class{DeviceReduce} * * \par Performance * \linear_performance{reduction, reduce-by-key, and run-length encode} * * \par * The following chart illustrates DeviceReduce::Sum * performance across different CUDA architectures for \p int32 keys. * * \image html reduce_int32.png * * \par * The following chart illustrates DeviceReduce::ReduceByKey (summation) * performance across different CUDA architectures for \p fp32 * values. Segments are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. * * \image html reduce_by_key_fp32_len_500.png * * \par * The following chart illustrates DeviceReduce::RunLengthEncode performance across * different CUDA architectures for \p int32 items. * Segments have lengths uniformly sampled from [1,1000]. * * \image html rle_int32_len_500.png * * \par * \plots_below * * */ struct DeviceReduce { /** * \brief Computes a device-wide reduction using the specified binary \p reduction_op functor. * * \par * - Does not support non-commutative reduction operators. * - \devicestorage * - \cdp * * \par Performance * Performance is typically similar to DeviceReduce::Sum. * * \par Snippet * The code snippet below illustrates a custom min reduction of a device vector of \p int items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * __host__ __device__ __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ ] * CustomMin min_op; * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduction * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op); * * // d_out <-- [0] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIterator <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam ReductionOp <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename InputIterator, typename OutputIterator, typename ReductionOp> __host__ __device__ static cudaError_t Reduce( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to the input sequence of data items OutputIterator d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) ReductionOp reduction_op, ///< [in] Binary reduction functor (e.g., an instance of cub::Sum, cub::Min, cub::Max, etc.) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceReduceDispatch<InputIterator, OutputIterator, Offset, ReductionOp> DeviceReduceDispatch; return DeviceReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, stream, debug_synchronous); } /** * \brief Computes a device-wide sum using the addition ('+') operator. * * \par * - Does not support non-commutative reduction operators. * - \devicestorage * - \cdp * * \par Performance * The following charts illustrate saturated reduction (sum) performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. * * \image html reduce_int32.png * \image html reduce_int64.png * * \par Snippet * The code snippet below illustrates the sum reduction of a device vector of \p int items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sum-reduction * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items); * * // d_out <-- [38] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIterator <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIterator, typename OutputIterator> __host__ __device__ static cudaError_t Sum( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to the input sequence of data items OutputIterator d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceReduceDispatch<InputIterator, OutputIterator, Offset, cub::Sum> DeviceReduceDispatch; return DeviceReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Sum(), stream, debug_synchronous); } /** * \brief Computes a device-wide minimum using the less-than ('<') operator. * * \par * - Does not support non-commutative minimum operators. * - \devicestorage * - \cdp * * \par Performance * Performance is typically similar to DeviceReduce::Sum. * * \par Snippet * The code snippet below illustrates the min-reduction of a device vector of \p int items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_min, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run min-reduction * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_min, num_items); * * // d_out <-- [0] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIterator <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIterator, typename OutputIterator> __host__ __device__ static cudaError_t Min( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to the input sequence of data items OutputIterator d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceReduceDispatch<InputIterator, OutputIterator, Offset, cub::Min> DeviceReduceDispatch; return DeviceReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Min(), stream, debug_synchronous); } /** * \brief Finds the first device-wide minimum using the less-than ('<') operator, also returning the index of that item. * * \par * Assuming the input \p d_in has value type \p T, the output \p d_out must have value type * <tt>ItemOffsetPair<T, int></tt>. The minimum value is written to <tt>d_out.value</tt> and its * location in the input array is written to <tt>d_out.offset</tt>. * * \par * - Does not support non-commutative minimum operators. * - \devicestorage * - \cdp * * \par Performance * Performance is typically similar to DeviceReduce::Sum. * * \par Snippet * The code snippet below illustrates the argmin-reduction of a device vector of \p int items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * ItemOffsetPair<int, int> *d_out; // e.g., [{ , }] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmin-reduction * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); * * // d_out <-- [{0, 5}] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIterator <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>ItemOffsetPair<T, int></tt>) \iterator */ template < typename InputIterator, typename OutputIterator> __host__ __device__ static cudaError_t ArgMin( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to the input sequence of data items OutputIterator d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Wrapped input iterator typedef ArgIndexInputIterator<InputIterator, int> ArgIndexInputIterator; ArgIndexInputIterator d_argmin_in(d_in, 0); // Dispatch type typedef DeviceReduceDispatch<ArgIndexInputIterator, OutputIterator, Offset, cub::ArgMin> DeviceReduceDispatch; return DeviceReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_argmin_in, d_out, num_items, cub::ArgMin(), stream, debug_synchronous); } /** * \brief Computes a device-wide maximum using the greater-than ('>') operator. * * \par * - Does not support non-commutative maximum operators. * - \devicestorage * - \cdp * * \par Performance * Performance is typically similar to DeviceReduce::Sum. * * \par Snippet * The code snippet below illustrates the max-reduction of a device vector of \p int items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run max-reduction * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); * * // d_out <-- [9] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIterator <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIterator, typename OutputIterator> __host__ __device__ static cudaError_t Max( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to the input sequence of data items OutputIterator d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceReduceDispatch<InputIterator, OutputIterator, Offset, cub::Max> DeviceReduceDispatch; return DeviceReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Max(), stream, debug_synchronous); } /** * \brief Finds the first device-wide maximum using the greater-than ('>') operator, also returning the index of that item * * \par * Assuming the input \p d_in has value type \p T, the output \p d_out must have value type * <tt>ItemOffsetPair<T, int></tt>. The maximum value is written to <tt>d_out.value</tt> and its * location in the input array is written to <tt>d_out.offset</tt>. * * \par * - Does not support non-commutative maximum operators. * - \devicestorage * - \cdp * * \par Performance * Performance is typically similar to DeviceReduce::Sum. * * \par Snippet * The code snippet below illustrates the argmax-reduction of a device vector of \p int items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * ItemOffsetPair<int, int> *d_out; // e.g., [{ , }] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmax-reduction * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); * * // d_out <-- [{9, 6}] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIterator <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>ItemOffsetPair<T, int></tt>) \iterator */ template < typename InputIterator, typename OutputIterator> __host__ __device__ static cudaError_t ArgMax( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to the input sequence of data items OutputIterator d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Wrapped input iterator typedef ArgIndexInputIterator<InputIterator, int> ArgIndexInputIterator; ArgIndexInputIterator d_argmax_in(d_in, 0); // Dispatch type typedef DeviceReduceDispatch<ArgIndexInputIterator, OutputIterator, Offset, cub::ArgMax> DeviceReduceDispatch; return DeviceReduceDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_argmax_in, d_out, num_items, cub::ArgMax(), stream, debug_synchronous); } /** * \brief Reduces segments of values, where segments are demarcated by corresponding runs of identical keys. * * \par * This operation computes segmented reductions using the specified binary * \p reduction_op functor. Each "run" of consecutive, identical keys in \p d_keys_in * is used to identify a corresponding segment of values in \p d_values_in. The first key in * the <em>i</em><sup>th</sup> segment is copied to <tt>d_keys_out[<em>i</em>]</tt>, and * the value aggregate for that segment is written to <tt>d_values_out[<em>i</em>]</tt>. * The total number of segments discovered is written to \p d_num_segments. * * \par * - The <tt>==</tt> equality operator is used to determine whether keys are equivalent * - \devicestorage * - \cdp * * \par Performance * The following chart illustrates reduction-by-key (sum) performance across * different CUDA architectures for \p fp32 and \p fp64 values, respectively. Segments * are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. * * \image html reduce_by_key_fp32_len_500.png * \image html reduce_by_key_fp64_len_500.png * * \par * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: * * \image html reduce_by_key_fp32_len_5.png * \image html reduce_by_key_fp64_len_5.png * * \par Snippet * The code snippet below illustrates the segmented reduction of \p int values grouped * by runs of associated \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * __host__ __device__ __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 8 * int *d_keys_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_values_in; // e.g., [0, 7, 1, 6, 2, 5, 3, 4] * int *d_keys_out; // e.g., [ , , , , , , , ] * int *d_values_out; // e.g., [ , , , , , , , ] * int *d_num_segments; // e.g., [ ] * CustomMin reduction_op; * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSelect::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_segments, reduction_op, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduce-by-key * cub::DeviceSelect::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_segments, reduction_op, num_items); * * // d_keys_out <-- [0, 2, 9, 5, 8] * // d_values_out <-- [0, 1, 6, 2, 4] * // d_num_segments <-- [5] * * \endcode * * \tparam KeyInputIterator <b>[inferred]</b> Random-access input iterator type for reading input keys \iterator * \tparam KeyOutputIterator <b>[inferred]</b> Random-access output iterator type for writing output keys \iterator * \tparam ValueInputIterator <b>[inferred]</b> Random-access input iterator type for reading input values \iterator * \tparam ValueOutputIterator <b>[inferred]</b> Random-access output iterator type for writing output values \iterator * \tparam NumSegmentsIterator <b>[inferred]</b> Output iterator type for recording the number of segments encountered \iterator * \tparam ReductionOp <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename KeyInputIterator, typename KeyOutputIterator, typename ValueInputIterator, typename ValueOutputIterator, typename NumSegmentsIterator, typename ReductionOp> __host__ __device__ __forceinline__ static cudaError_t ReduceByKey( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeyInputIterator d_keys_in, ///< [in] Pointer to consecutive runs of input keys KeyOutputIterator d_keys_out, ///< [out] Pointer to output keys (one key per run) ValueInputIterator d_values_in, ///< [in] Pointer to consecutive runs of input values ValueOutputIterator d_values_out, ///< [out] Pointer to output value aggregates (one aggregate per run) NumSegmentsIterator d_num_segments, ///< [out] Pointer to total number of segments ReductionOp reduction_op, ///< [in] Binary reduction functor (e.g., an instance of cub::Sum, cub::Min, cub::Max, etc.) int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int Offset; // Signed integer type for global offsets typedef NullType* FlagIterator; // Flag iterator type (not used) typedef NullType SelectOp; // Selection op (not used) typedef Equality EqualityOp; // Default == operator return DeviceReduceByKeyDispatch<KeyInputIterator, KeyOutputIterator, ValueInputIterator, ValueOutputIterator, NumSegmentsIterator, EqualityOp, ReductionOp, Offset>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_segments, EqualityOp(), reduction_op, num_items, stream, debug_synchronous); } /** * \brief Counts the segment lengths in the sequence \p d_in, where segments are demarcated by runs of identical values. * * \par * This operation computes a run-length encoding of \p d_in, where segments are identified * by "runs" of consecutive, identical values. The length of the <em>i</em><sup>th</sup> segment * is written to <tt>d_counts_out[<em>i</em>]</tt>. The unique values are also compacted, * i.e., the first value in the <em>i</em><sup>th</sup> segment is copied to * <tt>d_compacted_out[<em>i</em>]</tt>. The total number of segments discovered is written * to \p d_num_segments. * * \par * - The <tt>==</tt> equality operator is used to determine whether values are equivalent * - \devicestorage * - \cdp * * \par Performance * The following charts illustrate saturated encode performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. Segments have * lengths uniformly sampled from [1,1000]. * * \image html rle_int32_len_500.png * \image html rle_int64_len_500.png * * \par * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: * * \image html rle_int32_len_5.png * \image html rle_int64_len_5.png * * \par Snippet * The code snippet below illustrates the run-length encoding of a sequence of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // Declare, allocate, and initialize device pointers for input and output * int num_items; // e.g., 8 * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_compacted_out; // e.g., [ , , , , , , , ] * int *d_counts_out; // e.g., [ , , , , , , , ] * int *d_num_segments; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSelect::RunLengthEncode(d_temp_storage, temp_storage_bytes, d_in, d_compacted_out, d_counts_out, d_num_segments, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run encoding * cub::DeviceSelect::RunLengthEncode(d_temp_storage, temp_storage_bytes, d_in, d_compacted_out, d_counts_out, d_num_segments, num_items); * * // d_keys_out <-- [0, 2, 9, 5, 8] * // d_values_out <-- [1, 2, 1, 3, 1] * // d_num_segments <-- [5] * * \endcode * * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIterator <b>[inferred]</b> Random-access output iterator type for writing compacted output items \iterator * \tparam CountsOutputIterator <b>[inferred]</b> Random-access output iterator type for writing output counts \iterator * \tparam NumSegmentsIterator <b>[inferred]</b> Output iterator type for recording the number of segments encountered \iterator */ template < typename InputIterator, typename OutputIterator, typename CountsOutputIterator, typename NumSegmentsIterator> __host__ __device__ __forceinline__ static cudaError_t RunLengthEncode( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_in, ///< [in] Pointer to consecutive runs of input keys OutputIterator d_compacted_out, ///< [out] Pointer to output keys (one key per run) CountsOutputIterator d_counts_out, ///< [out] Pointer to output value aggregates (one aggregate per run) NumSegmentsIterator d_num_segments, ///< [out] Pointer to total number of segments int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Data type of value iterator typedef typename std::iterator_traits<CountsOutputIterator>::value_type Value; typedef int Offset; // Signed integer type for global offsets typedef NullType* FlagIterator; // Flag iterator type (not used) typedef NullType SelectOp; // Selection op (not used) typedef Equality EqualityOp; // Default == operator typedef cub::Sum ReductionOp; // Value reduction operator // Generator type for providing 1s values for run-length reduction typedef ConstantInputIterator<Value, Offset> CountsInputIterator; Value one_val; one_val = 1; return DeviceReduceByKeyDispatch<InputIterator, OutputIterator, CountsInputIterator, CountsOutputIterator, NumSegmentsIterator, EqualityOp, ReductionOp, Offset>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_compacted_out, CountsInputIterator(one_val), d_counts_out, d_num_segments, EqualityOp(), ReductionOp(), num_items, stream, debug_synchronous); } }; /** * \example example_device_reduce.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#pragma once #include <limits> #include <atomic> #include <gunrock/util/cuda_properties.cuh> //#include <gunrock/util/types.cuh> #ifndef MEMBERBASK #define MEMBERMASK 0xffffffffu #endif #ifndef MEMBERMASK #define MEMBERMASK 0xffffffffu #endif #ifndef WARPSIZE #define WARPSIZE 32 #endif #if (__CUDACC_VER_MAJOR__ >= 9 && __CUDA_ARCH__ >= 300) && \ !defined(USE_SHFL_SYNC) #define USE_SHFL_SYNC #endif // CUDA 9 warp shuffles (device intrinsics) template <typename T> __device__ static __forceinline__ T _shfl_up(T var, unsigned int delta, int width = WARPSIZE, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC var = __shfl_up_sync(mask, var, delta, width); #else #if (__CUDA_ARCH__ >= 300) var = __shfl_up(var, delta, width); #endif #endif return var; } template <typename T> __device__ static __forceinline__ T _shfl_down(T var, unsigned int delta, int width = WARPSIZE, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC var = __shfl_down_sync(mask, var, delta, width); #else #if (__CUDA_ARCH__ >= 300) var = __shfl_down(var, delta, width); #endif #endif return var; } template <typename T> __device__ static __forceinline__ T _shfl_xor(T var, int lane_mask, int width = WARPSIZE, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC var = __shfl_xor_sync(mask, var, lane_mask, width); #else #if (__CUDA_ARCH__ >= 300) var = __shfl_xor(var, lane_mask, width); #endif #endif return var; } template <typename T> __device__ static __forceinline__ T _shfl(T var, int source_lane, int width = WARPSIZE, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC var = __shfl_sync(mask, var, source_lane, width); #else #if (__CUDA_ARCH__ >= 300) var = __shfl(var, source_lane, width); #endif #endif return var; } __device__ static __forceinline__ unsigned _ballot(int predicate, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC return __ballot_sync(mask, predicate); #else #if (__CUDA_ARCH__ >= 300) return __ballot(predicate); #endif #endif } __device__ static __forceinline__ int _any(int predicate, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC return __any_sync(mask, predicate); #else #if (__CUDA_ARCH__ >= 300) return __any(predicate); #endif #endif } __device__ static __forceinline__ int _all(int predicate, unsigned mask = MEMBERMASK) { #ifdef USE_SHFL_SYNC return __all_sync(mask, predicate); #else #if (__CUDA_ARCH__ >= 300) return __all(predicate); #endif #endif } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600 // atomic addition from Jon Cohen at NVIDIA __device__ static double atomicAdd(double* addr, double val) { double old = *addr, assumed; do { assumed = old; old = __longlong_as_double(atomicCAS((unsigned long long int*)addr, __double_as_longlong(assumed), __double_as_longlong(val + assumed))); } while (assumed != old); return old; } #else #endif __device__ static uint64_t atomicCAS(uint64_t* addr, uint64_t comp, uint64_t val) { return (uint64_t)atomicCAS((unsigned long long*)addr, (unsigned long long)comp, (unsigned long long)val); } __device__ static long long atomicCAS(long long* addr, long long comp, long long val) { return (long long)atomicCAS((unsigned long long*)addr, (unsigned long long)comp, (unsigned long long)val); } __device__ static float atomicCAS(float* addr, float comp, float val) { return __int_as_float( atomicCAS((int*)addr, __float_as_int(comp), __float_as_int(val))); } __device__ static double atomicCAS(double* addr, double comp, double val) { return __longlong_as_double(atomicCAS( (long long*)addr, __double_as_longlong(comp), __double_as_longlong(val))); } // TODO: verify overflow condition __device__ static long long atomicAdd(long long* addr, long long val) { return (long long)atomicAdd((unsigned long long*)addr, (unsigned long long)val); } /* __device__ static uint64_t atomicAdd(uint64_t* addr, uint64_t val) { return (uint64_t)atomicAdd((unsigned long long*)addr, (unsigned long long)val); }*/ #if ULONG_MAX == ULLONG_MAX __device__ static unsigned long atomicAdd(unsigned long* addr, unsigned long val) { return (unsigned long long)atomicAdd((unsigned long long*)addr, (unsigned long long)val); } #endif __device__ static uint64_t atomicExch(uint64_t* addr, uint64_t val) { return (uint64_t)atomicExch((unsigned long long int*)addr, (unsigned long long int)val); } __device__ static uint64_t atomicMin(uint64_t* addr, uint64_t val) { #if GR_CUDA_ARCH >= 350 return (uint64_t)atomicMin((unsigned long long int*)addr, (unsigned long long int)val); #else unsigned long long int old = (unsigned long long int)(*addr); unsigned long long int expected; do { expected = old; old = atomicCAS( (unsigned long long int*)addr, expected, min((unsigned long long int)val, expected)); } while (expected != old); return old; #endif } __device__ static float atomicMin(float* addr, float val) { int* addr_as_int = (int*)addr; int old = *addr_as_int; int expected; do { expected = old; old = ::atomicCAS(addr_as_int, expected, __float_as_int(::fminf(val, __int_as_float(expected)))); } while (expected != old); return __int_as_float(old); } __device__ static double atomicMin(double* addr, double val) { long long* addr_as_longlong = (long long*)addr; long long old = *addr_as_longlong; long long expected; do { expected = old; old = ::atomicCAS( addr_as_longlong, expected, __double_as_longlong(::fmin(val, __longlong_as_double(expected)))); } while (expected != old); return __longlong_as_double(old); } __device__ static float atomicMax(float* addr, float val) { int* addr_as_int = (int*)addr; int old = *addr_as_int; int expected; do { expected = old; old = ::atomicCAS(addr_as_int, expected, __float_as_int(::fmaxf(val, __int_as_float(expected)))); } while (expected != old); return __int_as_float(old); } __device__ static double atomicMax(double* addr, double val) { long long* addr_as_longlong = (long long*)addr; long long old = *addr_as_longlong; long long expected; do { expected = old; old = ::atomicCAS( addr_as_longlong, expected, __double_as_longlong(::fmax(val, __longlong_as_double(expected)))); } while (expected != old); return __longlong_as_double(old); } template <typename T> __device__ __host__ __forceinline__ T _ldg(T* addr) { #ifdef __CUDA_ARCH__ #if GR_CUDA_ARCH >= 350 return __ldg(addr); #else return *addr; #endif #else return *addr; #endif } template <typename T> __device__ __host__ __forceinline__ T _atomicAdd(T* ptr, const T& val) { #ifdef __CUDA_ARCH__ return atomicAdd(ptr, val); #else T retval; #pragma omp atomic capture { retval = ptr[0]; ptr[0] += val; } return retval; #endif } template <typename T> __device__ __host__ __forceinline__ T _atomicMin(T* ptr, const T& val) { #ifdef __CUDA_ARCH__ return atomicMin(ptr, val); #else std::atomic<T>* atomic_ptr = reinterpret_cast<std::atomic<T>*>(ptr); T old_val = *ptr; while (true) { bool is_equal = std::atomic_compare_exchange_strong(atomic_ptr, &old_val, min(old_val, val)); if (is_equal) break; } return old_val; #endif } namespace gunrock { namespace util { /** * Terminates the calling thread */ __device__ __forceinline__ void ThreadExit() { asm("exit;"); } /** * Returns the warp lane ID of the calling thread */ __device__ __forceinline__ unsigned int LaneId() { unsigned int ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } /** * The best way to multiply integers (24 effective bits or less) */ __device__ __forceinline__ unsigned int FastMul(unsigned int a, unsigned int b) { #if __CUDA_ARCH__ >= 200 return a * b; #else return __umul24(a, b); #endif } /** * The best way to multiply integers (24 effective bits or less) */ __device__ __forceinline__ int FastMul(int a, int b) { #if __CUDA_ARCH__ >= 200 return a * b; #else return __mul24(a, b); #endif } /** * Wrapper for performing atomic operations on integers of type size_t */ template <typename T, int SizeT = sizeof(T)> struct AtomicInt; template <typename T> struct AtomicInt<T, 4> { static __device__ __forceinline__ T Add(T* ptr, T val) { return atomicAdd((unsigned int*)ptr, (unsigned int)val); } }; template <typename T> struct AtomicInt<T, 8> { static __device__ __forceinline__ T Add(T* ptr, T val) { return atomicAdd((unsigned long long int*)ptr, (unsigned long long int)val); } }; // From Andrew Davidson's dStepping SSSP GPU implementation // binary search on device, only works for arrays shorter // than 1024 template <int NT, typename KeyType, typename ArrayType> __device__ int BinarySearch(KeyType i, ArrayType* queue) { int mid = ((NT >> 1) - 1); if (NT > 512) mid = queue[mid] > i ? mid - 256 : mid + 256; if (NT > 256) mid = queue[mid] > i ? mid - 128 : mid + 128; if (NT > 128) mid = queue[mid] > i ? mid - 64 : mid + 64; if (NT > 64) mid = queue[mid] > i ? mid - 32 : mid + 32; if (NT > 32) mid = queue[mid] > i ? mid - 16 : mid + 16; mid = queue[mid] > i ? mid - 8 : mid + 8; mid = queue[mid] > i ? mid - 4 : mid + 4; mid = queue[mid] > i ? mid - 2 : mid + 2; mid = queue[mid] > i ? mid - 1 : mid + 1; mid = queue[mid] > i ? mid : mid + 1; return mid; } } // namespace util } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#define DETERMINE_INDICES_4_GPU()\ int tx = KW_LOCAL_ID_0;\ int state = tx & 0x3;\ int pat = tx >> 2;\ int patIdx = KW_LOCAL_ID_1;\ int matrix = KW_GROUP_ID_1;\ int pattern = __umul24(KW_GROUP_ID_0, PATTERN_BLOCK_SIZE * 4) + multBy4(patIdx) + pat;\ int deltaPartialsByState = multBy16(KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE + patIdx);\ int deltaPartialsByMatrix = __umul24(matrix, multBy4(endPattern));\ int x2 = multBy16(matrix);\ int u = tx + deltaPartialsByState + deltaPartialsByMatrix; #define DETERMINE_INDICES_4_MULTI_1_GPU()\ int opIndexPtr = (gridStartOp + KW_GROUP_ID_0) * 8;\ int startPat = ptrOffsets[opIndexPtr ];\ int endPattern = ptrOffsets[opIndexPtr + 1];\ int tx = KW_LOCAL_ID_0;\ int state = tx & 0x3;\ int pat = tx >> 2;\ int patIdx = KW_LOCAL_ID_1;\ int matrix = KW_GROUP_ID_1;\ int pattern = startPat + multBy4(patIdx) + pat;\ int deltaPartialsByState = multBy4(startPat) + multBy16(patIdx);\ int deltaPartialsByMatrix = __umul24(matrix, multBy4(totalPatterns));\ int x2 = multBy16(matrix);\ int u = tx + deltaPartialsByState + deltaPartialsByMatrix; #define DETERMINE_INDICES_4_MULTI_2_GPU()\ KW_GLOBAL_VAR REAL* KW_RESTRICT partials3 = partials + ptrOffsets[opIndexPtr + 4];\ const KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices + ptrOffsets[opIndexPtr + 5];\ const KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices + ptrOffsets[opIndexPtr + 6]; KW_GLOBAL_KERNEL void kernelPartialsPartialsGrowingMulti(KW_GLOBAL_VAR REAL* KW_RESTRICT partials, const KW_GLOBAL_VAR REAL* KW_RESTRICT matrices, const KW_GLOBAL_VAR unsigned int* KW_RESTRICT ptrOffsets, int gridStartOp, int totalPatterns) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // TODO #else // GPU implementation DETERMINE_INDICES_4_MULTI_1_GPU(); const KW_GLOBAL_VAR REAL* KW_RESTRICT partials1 = partials + ptrOffsets[opIndexPtr + 2]; const KW_GLOBAL_VAR REAL* KW_RESTRICT partials2 = partials + ptrOffsets[opIndexPtr + 3]; DETERMINE_INDICES_4_MULTI_2_GPU(); LOAD_PARTIALS_PARTIALS_4_MULTI_PART_GPU(); LOAD_MATRIX_4_MULTI_GPU(); if (pattern < endPattern) { // Remove padded threads! SUM_PARTIALS_PARTIALS_4_GPU(); partials3[u] = sum1 * sum2; } #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelPartialsPartialsGrowing(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1, KW_GLOBAL_VAR REAL* KW_RESTRICT partials2, KW_GLOBAL_VAR REAL* KW_RESTRICT partials3, KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1, KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2, int endPattern) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // TODO #else // GPU implementation DETERMINE_INDICES_4_GPU(); int y = deltaPartialsByState + deltaPartialsByMatrix; KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE * 4 * 4]; KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE * 4 * 4]; /* copy PADDED_STATE_COUNT * PATTERN_BLOCK_SIZE lengthed partials*/ if (pattern < endPattern) { sPartials1[multBy16(patIdx) | tx] = partials1[y | tx]; /*All coalesced memory*/ sPartials2[multBy16(patIdx) | tx] = partials2[y | tx]; } else { sPartials1[multBy16(patIdx) | tx] = 0; sPartials2[multBy16(patIdx) | tx] = 0; } const KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + x2; /*Points to *this* matrix*/ const KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + x2; KW_LOCAL_MEM REAL sMatrix1[16]; /*Load values into shared memory*/ KW_LOCAL_MEM REAL sMatrix2[16]; if (patIdx == 0 ) { sMatrix1[multBy4(state) | pat] = matrix1[tx]; /* Should write transpose into sMatrix1 */ sMatrix2[tx] = matrix2[tx]; } KW_LOCAL_FENCE; KW_LOCAL_MEM REAL sProduct[PATTERN_BLOCK_SIZE * 4 * 4]; if (pattern < endPattern) { // Remove padded threads! REAL sum2; int i = pat; int patIdx16pat4 = multBy16(patIdx) | (tx & 0xC); sum2 = sMatrix2[multBy4(i) | state] * sPartials2[patIdx16pat4 | i]; i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); sProduct[multBy16(patIdx) | tx] = sPartials1[multBy16(patIdx) | tx] * sum2; } KW_LOCAL_FENCE; if (pattern < endPattern) { REAL sum1; int i = pat; int patIdx16pat4 = multBy16(patIdx) | (tx & 0xC); sum1 = sMatrix1[multBy4(i) | state] * sProduct[patIdx16pat4 | i]; i = (i + 1) & 0x3; FMA( sMatrix1[multBy4(i) | state], sProduct[patIdx16pat4 | i], sum1); i = (i + 1) & 0x3; FMA( sMatrix1[multBy4(i) | state], sProduct[patIdx16pat4 | i], sum1); i = (i + 1) & 0x3; FMA( sMatrix1[multBy4(i) | state], sProduct[patIdx16pat4 | i], sum1); partials3[u] = sum1; } #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelPartialsStatesGrowing(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1, KW_GLOBAL_VAR int* KW_RESTRICT states2, KW_GLOBAL_VAR REAL* KW_RESTRICT partials3, KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1, KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2, int endPattern) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // TODO #else // GPU implementation DETERMINE_INDICES_4_GPU(); int y = deltaPartialsByState + deltaPartialsByMatrix; KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE * 4 * 4]; /* copy PADDED_STATE_COUNT * PATTERN_BLOCK_SIZE lengthed partials*/ if (pattern < endPattern) { sPartials1[multBy16(patIdx) | tx] = partials1[y | tx]; /*All coalesced memory*/ } else { sPartials1[multBy16(patIdx) | tx] = 0; } const KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + x2; /*Points to *this* matrix*/ const KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + x2; KW_LOCAL_MEM REAL sMatrix1[16]; /*Load values into shared memory*/ KW_LOCAL_MEM REAL sMatrix2[16]; if (patIdx == 0 ) { sMatrix1[multBy4(state) | pat] = matrix1[tx]; /* Should write transpose into sMatrix1 */ sMatrix2[tx] = matrix2[tx]; } KW_LOCAL_FENCE; KW_LOCAL_MEM REAL sProduct[PATTERN_BLOCK_SIZE * 4 * 4]; if (pattern < endPattern) { // Remove padded threads! REAL sum2 = 1; int state2 = states2[pattern]; if (state2 < PADDED_STATE_COUNT) { sum2 = sMatrix2[state2 * 4 + state]; } sProduct[multBy16(patIdx) | tx] = sPartials1[multBy16(patIdx) | tx] * sum2; } KW_LOCAL_FENCE; if (pattern < endPattern) { REAL sum1; int i = pat; int patIdx16pat4 = multBy16(patIdx) | (tx & 0xC); sum1 = sMatrix1[multBy4(i) | state] * sProduct[patIdx16pat4 | i]; i = (i + 1) & 0x3; FMA( sMatrix1[multBy4(i) | state], sProduct[patIdx16pat4 | i], sum1); i = (i + 1) & 0x3; FMA( sMatrix1[multBy4(i) | state], sProduct[patIdx16pat4 | i], sum1); i = (i + 1) & 0x3; FMA( sMatrix1[multBy4(i) | state], sProduct[patIdx16pat4 | i], sum1); partials3[u] = sum1; } #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelPartialsPartialsEdgeFirstDerivatives(KW_GLOBAL_VAR REAL* KW_RESTRICT out, KW_GLOBAL_VAR REAL* KW_RESTRICT partials0, KW_GLOBAL_VAR REAL* KW_RESTRICT matrices0, KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions, KW_GLOBAL_VAR REAL* KW_RESTRICT weights, int skip, int totalPatterns, int categoryCount) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // Not implemented #else // GPU implementation int tx = KW_LOCAL_ID_0; int state = tx & 0x3; int pat = tx >> 2; int patIdx = KW_LOCAL_ID_1; int pattern = __umul24(KW_GROUP_ID_0, PATTERN_BLOCK_SIZE * 4) + multBy4(patIdx) + pat; int y = multBy16(KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE + patIdx); int node = KW_GROUP_ID_1; int instructionOffset = (skip + node) * 3; unsigned int partials1Offset = instructions[instructionOffset + 0]; unsigned int partials2Offset = instructions[instructionOffset + 1]; unsigned int matrices1Offset = instructions[instructionOffset + 2]; KW_LOCAL_MEM REAL sMatrix2[16]; KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE * 4 * 4]; KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE * 4 * 4]; /* TODO: Currently assumes MATRIX_BLOCK_SIZE >> matrixCount */\ KW_LOCAL_MEM REAL sWeights[MATRIX_BLOCK_SIZE]; for (int c = 0; c < categoryCount; c += KW_LOCAL_SIZE_0) { int x = c + KW_LOCAL_ID_0; if (x < categoryCount) { sWeights[x] = weights[x]; } } KW_LOCAL_FENCE; REAL numerator = 0; REAL denominator = 0; REAL lPartial1; REAL lPartial2; for (int c = 0; c < categoryCount; ++c) { KW_GLOBAL_VAR REAL* KW_RESTRICT partials1 = partials0 + partials1Offset + totalPatterns * PADDED_STATE_COUNT * c; KW_GLOBAL_VAR REAL* KW_RESTRICT partials2 = partials0 + partials2Offset + totalPatterns * PADDED_STATE_COUNT * c; KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices0 + matrices1Offset + PADDED_STATE_COUNT * PADDED_STATE_COUNT * c; /* copy PADDED_STATE_COUNT * PATTERN_BLOCK_SIZE length partials*/ if (pattern < totalPatterns) { lPartial1 = partials1[y | tx]; /*All coalesced memory*/ sPartials2[multBy16(patIdx) | tx] = lPartial2 = partials2[y | tx]; } else { lPartial1 = 0; sPartials2[multBy16(patIdx) | tx] = lPartial2 = 0; } FMA(lPartial1, lPartial2 * sWeights[c], denominator); if (patIdx == 0 ) { sMatrix2[multBy4(state) | pat] = matrix2[tx]; // transposed } KW_LOCAL_FENCE; REAL sum2; int i = pat; int patIdx16pat4 = multBy16(patIdx) | (tx & 0xC); sum2 = sMatrix2[multBy4(i) | state] * sPartials2[patIdx16pat4 | i]; i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); KW_LOCAL_FENCE; // TODO Is this necessary? FMA(lPartial1, sum2 * sWeights[c], numerator); // partials1 += totalPatterns * PADDED_STATE_COUNT; // partials2 += totalPatterns * PADDED_STATE_COUNT; } sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx] = numerator; sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx] = denominator; KW_LOCAL_FENCE; if (state < 2) { sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx + 2]; sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx + 2]; } KW_LOCAL_FENCE; if (state < 1) { sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx + 1]; sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx + 1]; } KW_LOCAL_FENCE; if (patIdx < PATTERN_BLOCK_SIZE / 4) { // Need the first PATTERN_BLOCK_SIZE * 4 threads int offset = patIdx * PATTERN_BLOCK_SIZE + tx; int site = __umul24(KW_GROUP_ID_0, PATTERN_BLOCK_SIZE * 4) + offset; if (site < totalPatterns) { int row = offset >> 2; // divide by 4 int col = offset & 0x3; // mod 4 REAL numerator = sPartials1[row * PATTERN_BLOCK_SIZE + multBy4(col) + 0]; REAL denominator = sPartials2[row * PATTERN_BLOCK_SIZE + multBy4(col) + 0]; REAL ratio = 0.0; if (denominator != 0.0) { ratio = numerator / denominator; } out[totalPatterns * (skip + node) + site] = ratio; // TODO Check that these are all coalesced writes } } #endif } KW_GLOBAL_KERNEL void kernelPartialsStatesEdgeFirstDerivatives(KW_GLOBAL_VAR REAL* KW_RESTRICT out, KW_GLOBAL_VAR int* KW_RESTRICT states0, KW_GLOBAL_VAR REAL* KW_RESTRICT partials0, KW_GLOBAL_VAR REAL* KW_RESTRICT matrices0, KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions, KW_GLOBAL_VAR REAL* KW_RESTRICT weights, int skip, int totalPatterns, int categoryCount) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // Not implemented #else // GPU implementation int tx = KW_LOCAL_ID_0; int state = tx & 0x3; int pat = tx >> 2; int patIdx = KW_LOCAL_ID_1; int pattern = __umul24(KW_GROUP_ID_0, PATTERN_BLOCK_SIZE * 4) + multBy4(patIdx) + pat; int y = multBy16(KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE + patIdx); int node = KW_GROUP_ID_1; int instructionOffset = (skip + node) * 3; unsigned int states1Offset = instructions[instructionOffset + 0]; unsigned int partials2Offset = instructions[instructionOffset + 1]; unsigned int matrices1Offset = instructions[instructionOffset + 2]; KW_LOCAL_MEM REAL sMatrix2[16]; KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE * 4 * 4]; KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE * 4 * 4]; /* TODO: Currently assumes MATRIX_BLOCK_SIZE >> matrixCount */\ KW_LOCAL_MEM REAL sWeights[MATRIX_BLOCK_SIZE]; for (int c = 0; c < categoryCount; c += KW_LOCAL_SIZE_0) { int x = c + KW_LOCAL_ID_0; if (x < categoryCount) { sWeights[x] = weights[x]; } } KW_LOCAL_FENCE; REAL numerator = 0; REAL denominator = 0; int lState1 = (pattern < totalPatterns) ? states0[states1Offset + pattern] : PADDED_STATE_COUNT; REAL lPartial1 = (lState1 >= PADDED_STATE_COUNT || state == lState1) ? 1 : 0; REAL lPartial2; for (int c = 0; c < categoryCount; ++c) { KW_GLOBAL_VAR REAL* KW_RESTRICT partials2 = partials0 + partials2Offset + totalPatterns * PADDED_STATE_COUNT * c; KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices0 + matrices1Offset + PADDED_STATE_COUNT * PADDED_STATE_COUNT * c; /* copy PADDED_STATE_COUNT * PATTERN_BLOCK_SIZE length partials*/ if (pattern < totalPatterns) { sPartials2[multBy16(patIdx) | tx] = lPartial2 = partials2[y | tx]; } else { sPartials2[multBy16(patIdx) | tx] = lPartial2 = 0; } FMA(lPartial1, lPartial2 * sWeights[c], denominator); if (patIdx == 0 ) { sMatrix2[multBy4(state) | pat] = matrix2[tx]; // transposed } KW_LOCAL_FENCE; REAL sum2; int i = pat; int patIdx16pat4 = multBy16(patIdx) | (tx & 0xC); sum2 = sMatrix2[multBy4(i) | state] * sPartials2[patIdx16pat4 | i]; i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); i = (i + 1) & 0x3; FMA( sMatrix2[multBy4(i) | state], sPartials2[patIdx16pat4 | i], sum2); KW_LOCAL_FENCE; // TODO Is this necessary? FMA(lPartial1, sum2 * sWeights[c], numerator); } sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx] = numerator; sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx] = denominator; KW_LOCAL_FENCE; if (state < 2) { sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx + 2]; sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx + 2]; } KW_LOCAL_FENCE; if (state < 1) { sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials1[patIdx * PATTERN_BLOCK_SIZE + tx + 1]; sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx] += sPartials2[patIdx * PATTERN_BLOCK_SIZE + tx + 1]; } KW_LOCAL_FENCE; if (patIdx < PATTERN_BLOCK_SIZE / 4) { // Need the first PATTERN_BLOCK_SIZE * 4 threads int offset = patIdx * PATTERN_BLOCK_SIZE + tx; int site = __umul24(KW_GROUP_ID_0, PATTERN_BLOCK_SIZE * 4) + offset; if (site < totalPatterns) { int row = offset >> 2; // divide by 4 int col = offset & 0x3; // mod 4 REAL numerator = sPartials1[row * PATTERN_BLOCK_SIZE + multBy4(col) + 0]; REAL denominator = sPartials2[row * PATTERN_BLOCK_SIZE + multBy4(col) + 0]; REAL ratio = 0.0; if (denominator != 0.0) { ratio = numerator / denominator; } out[totalPatterns * (skip + node) + site] = ratio; // TODO Check that these are all coalesced writes } } #endif } KW_GLOBAL_KERNEL void kernelPartialsStatesCrossProducts(KW_GLOBAL_VAR REAL* KW_RESTRICT out, const KW_GLOBAL_VAR int* KW_RESTRICT states0, const KW_GLOBAL_VAR REAL* KW_RESTRICT partials0, const KW_GLOBAL_VAR REAL* KW_RESTRICT lengths0, const KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions, const KW_GLOBAL_VAR REAL* KW_RESTRICT inCategoryWeights, const KW_GLOBAL_VAR REAL* KW_RESTRICT inPatternWeights, const int skip, const int totalPatterns, const int totalNodes, const int categoryCount, const int rateOffset, const int accumulate) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // Not implemented #else // GPU implementation const int tx = KW_LOCAL_ID_0; const int state = tx & 0x3; const int pat = tx >> 2; const int patternBlockId = KW_GROUP_ID_0; const int nodeId = KW_GROUP_ID_1; const int numPatternBlocks = KW_NUM_GROUPS_0; const int numNodeBlocks = KW_NUM_GROUPS_1; KW_LOCAL_MEM REAL post[4 * 4]; KW_LOCAL_MEM REAL pre[4 * 4]; KW_LOCAL_MEM REAL denominator[4 * 4]; KW_LOCAL_MEM REAL patternDenominator[16]; KW_LOCAL_MEM REAL patternWeights[4]; KW_LOCAL_MEM REAL categoryRates[16]; // TODO Assumes kCategoryCount <= 16 KW_LOCAL_MEM REAL categoryWeights[16]; // TODO Should put these into constant memory anyway if (tx < categoryCount) { categoryRates[tx] = lengths0[rateOffset + tx]; categoryWeights[tx] = inCategoryWeights[tx]; } KW_LOCAL_FENCE; // Fancy indexing to keep pattern work consecutive (may not help cache since jumping btw categories anyway) // TODO Check if helpful const int batchWorkItems = (totalPatterns + 4 - 1) / 4; // 4 patterns at a time const int patternWorkSize = 4 * ((batchWorkItems + numPatternBlocks - 1) / numPatternBlocks); REAL acrossPatterns = 0; for (int node = nodeId; // Just interleaved indexing node < totalNodes; node += numNodeBlocks) { int instructionOffset = (skip + node) * 2; unsigned int statesOffset = instructions[instructionOffset + 0]; unsigned int preOffset = instructions[instructionOffset + 1]; const REAL edgeLength = lengths0[skip + node]; for (int pattern = patternBlockId * patternWorkSize; pattern < (patternBlockId + 1) * patternWorkSize; pattern += 4) { unsigned int patternOffset = pattern * 4; REAL txPatternDenominator = 0; REAL withinPattern0 = 0; REAL withinPattern1 = 0; REAL withinPattern2 = 0; REAL withinPattern3 = 0; const KW_GLOBAL_VAR int* KW_RESTRICT postStates = states0 + statesOffset; const int stateData = postStates[pattern + pat]; // patterns are already padded mod 4 post[tx] = (state == stateData) ? (REAL) 1.0 : (REAL) 0.0; for (int c = 0; c < categoryCount; ++c) { const KW_GLOBAL_VAR REAL* KW_RESTRICT prePartials = partials0 + preOffset + patternOffset + totalPatterns * PADDED_STATE_COUNT * c; if (pattern < totalPatterns) { pre[tx] = prePartials[tx]; // Coalesced global memory read } else { pre[tx] = 0.0; } const REAL scale = edgeLength * categoryRates[c]; // TODO Better in constant memory? const REAL weight = categoryWeights[c]; // TODO Better in constant memory? // Inner product denominator[tx] = pre[tx] * post[tx]; if (tx < 8) { denominator[tx << 1] += denominator[tx << 1 | 0x1]; } KW_LOCAL_FENCE; // TODO necessary? in same warp if (tx < 4) { denominator[tx << 2] += denominator[tx << 2 | 0x2]; } KW_LOCAL_FENCE; // TODO necessary? is same warp txPatternDenominator += denominator[tx & 0xC] * weight; // post[tx] *= weight * scale; pre[tx] *= weight * scale; KW_LOCAL_FENCE; // TODO Merge with fence above withinPattern0 += post[4 * 0 | state] * pre[4 * 0 | pat]; withinPattern1 += post[4 * 1 | state] * pre[4 * 1 | pat]; withinPattern2 += post[4 * 2 | state] * pre[4 * 2 | pat]; withinPattern3 += post[4 * 3 | state] * pre[4 * 3 | pat]; } patternDenominator[tx] = txPatternDenominator; if (tx < 4) { patternWeights[tx] = inPatternWeights[pattern + tx]; } KW_LOCAL_FENCE; if (patternDenominator[4 * 0] > 0.0) { acrossPatterns += withinPattern0 * patternWeights[0] / patternDenominator[4 * 0]; } if (patternDenominator[4 * 1] > 0.0) { acrossPatterns += withinPattern1 * patternWeights[1] / patternDenominator[4 * 1]; } if (patternDenominator[4 * 2] > 0.0) { acrossPatterns += withinPattern2 * patternWeights[2] / patternDenominator[4 * 2]; } if (patternDenominator[4 * 3] > 0.0) { acrossPatterns += withinPattern3 * patternWeights[3] / patternDenominator[4 * 3]; } // TODO Vectorize } } KW_LOCAL_FENCE; const int destination = (nodeId * numPatternBlocks + patternBlockId) * 16; if (accumulate) { acrossPatterns += out[destination + tx]; } out[destination + tx] = acrossPatterns; // out[destination + tx] = post[tx]; // out[destination + tx] = patternDenominator[tx]; #endif } KW_GLOBAL_KERNEL void kernelPartialsPartialsCrossProducts(KW_GLOBAL_VAR REAL* KW_RESTRICT out, const KW_GLOBAL_VAR REAL* KW_RESTRICT partials0, const KW_GLOBAL_VAR REAL* KW_RESTRICT lengths0, const KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions, const KW_GLOBAL_VAR REAL* KW_RESTRICT inCategoryWeights, const KW_GLOBAL_VAR REAL* KW_RESTRICT inPatternWeights, const int skip, const int totalPatterns, const int totalNodes, const int categoryCount, const int rateOffset, const int accumulate) { #ifdef FW_OPENCL_CPU // CPU/MIC implementation todo(); // Not implemented #else // GPU implementation const int tx = KW_LOCAL_ID_0; const int state = tx & 0x3; const int pat = tx >> 2; const int patternBlockId = KW_GROUP_ID_0; const int nodeId = KW_GROUP_ID_1; const int numPatternBlocks = KW_NUM_GROUPS_0; const int numNodeBlocks = KW_NUM_GROUPS_1; KW_LOCAL_MEM REAL post[4 * 4]; KW_LOCAL_MEM REAL pre[4 * 4]; KW_LOCAL_MEM REAL denominator[4 * 4]; KW_LOCAL_MEM REAL patternDenominator[16]; KW_LOCAL_MEM REAL patternWeights[4]; KW_LOCAL_MEM REAL categoryRates[16]; // TODO Assumes kCategoryCount <= 16 KW_LOCAL_MEM REAL categoryWeights[16]; // TODO Should put these into constant memory anyway if (tx < categoryCount) { categoryRates[tx] = lengths0[rateOffset + tx]; categoryWeights[tx] = inCategoryWeights[tx]; } KW_LOCAL_FENCE; // Fancy indexing to keep pattern work consecutive (may not help cache since jumping btw categories anyway) // TODO Check if helpful const int batchWorkItems = (totalPatterns + 4 - 1) / 4; // 4 patterns at a time const int patternWorkSize = 4 * ((batchWorkItems + numPatternBlocks - 1) / numPatternBlocks); REAL acrossPatterns = 0; for (int node = nodeId; // Just interleaved indexing node < totalNodes; node += numNodeBlocks) { int instructionOffset = (skip + node) * 2; unsigned int preOffset = instructions[instructionOffset + 0]; unsigned int postOffset = instructions[instructionOffset + 1]; const REAL edgeLength = lengths0[skip + node]; for (int pattern = patternBlockId * patternWorkSize; pattern < (patternBlockId + 1) * patternWorkSize; pattern += 4) { unsigned int patternOffset = pattern * 4; REAL txPatternDenominator = 0; REAL withinPattern0 = 0; REAL withinPattern1 = 0; REAL withinPattern2 = 0; REAL withinPattern3 = 0; for (int c = 0; c < categoryCount; ++c) { const KW_GLOBAL_VAR REAL* KW_RESTRICT prePartials = partials0 + preOffset + patternOffset + totalPatterns * PADDED_STATE_COUNT * c; const KW_GLOBAL_VAR REAL* KW_RESTRICT postPartials = partials0 + postOffset + patternOffset + totalPatterns * PADDED_STATE_COUNT * c; if (pattern < totalPatterns) { pre[tx] = prePartials[tx]; // Coalesced global memory read post[tx] = postPartials[tx]; // Coalesced global memory read } else { pre[tx] = 0.0; post[tx] = 0.0; } const REAL scale = edgeLength * categoryRates[c]; // TODO Better in constant memory? const REAL weight = categoryWeights[c]; // TODO Better in constant memory? // Inner product denominator[tx] = pre[tx] * post[tx]; if (tx < 8) { denominator[tx << 1] += denominator[tx << 1 | 0x1]; } KW_LOCAL_FENCE; // TODO necessary? in same warp if (tx < 4) { denominator[tx << 2] += denominator[tx << 2 | 0x2]; } KW_LOCAL_FENCE; // TODO necessary? is same warp txPatternDenominator += denominator[tx & 0xC] * weight; post[tx] *= weight * scale; KW_LOCAL_FENCE; // TODO Merge with fence above withinPattern0 += pre[4 * 0 | state] * post[4 * 0 | pat]; withinPattern1 += pre[4 * 1 | state] * post[4 * 1 | pat]; withinPattern2 += pre[4 * 2 | state] * post[4 * 2 | pat]; withinPattern3 += pre[4 * 3 | state] * post[4 * 3 | pat]; } patternDenominator[tx] = txPatternDenominator; if (tx < 4) { patternWeights[tx] = inPatternWeights[pattern + tx]; } KW_LOCAL_FENCE; if (patternDenominator[4 * 0] > 0.0) { acrossPatterns += withinPattern0 * patternWeights[0] / patternDenominator[4 * 0]; } if (patternDenominator[4 * 1] > 0.0) { acrossPatterns += withinPattern1 * patternWeights[1] / patternDenominator[4 * 1]; } if (patternDenominator[4 * 2] > 0.0) { acrossPatterns += withinPattern2 * patternWeights[2] / patternDenominator[4 * 2]; } if (patternDenominator[4 * 3] > 0.0) { acrossPatterns += withinPattern3 * patternWeights[3] / patternDenominator[4 * 3]; } // TODO Vectorize } } KW_LOCAL_FENCE; const int destination = (nodeId * numPatternBlocks + patternBlockId) * 16; if (accumulate) { acrossPatterns += out[destination + tx]; } //out[destination + tx] = withinPattern0; out[destination + tx] = acrossPatterns; #endif }
the_stack
#include <cstdlib> #include <ctime> #include <cstdio> using namespace akg_reduce; using namespace std; template <typename T> void CompareResults(T *arr1, T *arr2, int len) { double total_err = 0.0; bool flag = true; for (auto i = 0; i < len; i++) { if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) { flag = false; } total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])); } if (flag) { printf("[CORRECT] Output is equal to Expected.\n"); } else { printf("[INCORRECT] Output is not equal to Expected\n"); printf("Ouput (show few results):\n"); for (auto i = 0; i < std::min(10, len); i++) { printf("%f ", TypeTransform<double, T>(arr1[i])); } printf("\n"); printf("Expected:\n"); for (auto i = 0; i < std::min(10, len); i++) { printf("%f ", TypeTransform<double, T>(arr2[i])); } printf("\n"); } printf("AVERAGE_ERROR = %f\n", total_err / (double)len); } // Kahan summation for single thread Sum implement. // More info in 'test_kahan.cc' template <typename T> __global__ void ComputeResultAlongXSingleThread(int x_len, int y_len, T *arr, T *output) { for (auto j = 0; j < y_len; j++) { T sum = 0.0; T low_bits = 0.0; T lower_val, cropped_sum; for (auto i = 0; i < x_len; i++) { lower_val = arr[i + j * x_len] - low_bits; cropped_sum = sum + lower_val; low_bits = (cropped_sum - sum) - lower_val; sum = cropped_sum; } output[j] = sum; } } template <typename T> __global__ void ComputeResultAlongYSingleThread(int x_len, int y_len, T *arr, T *output) { for (auto i = 0; i < x_len; i++) { T sum = 0.0; T low_bits = 0.0; T lower_val, cropped_sum; for (auto j = 0; j < y_len; j++) { lower_val = arr[i + j * x_len] - low_bits; cropped_sum = sum + lower_val; low_bits = (cropped_sum - sum) - lower_val; sum = cropped_sum; } output[i] = sum; } } template <typename T, typename ReduceOp> __global__ void ComputeResultAlongXGPUSingleBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread, ReduceOp op) { T T_red_rf = 0.0; __shared__ T red_buf[32]; __shared__ T temp_output[1]; // temp storage for output temp_output[0] = (T) 0.0; for (int k = 0; k < item_per_thread; ++k) { if (threadIdx.x + k * blockDim.x < x_len && threadIdx.y + blockDim.y * blockIdx.x < y_len) { T_red_rf += arr[threadIdx.x + k * blockDim.x + threadIdx.y * x_len + blockIdx.y * blockDim.y * x_len]; } } __syncthreads(); AkgReduce<T, ReduceOp, 32, REDUCE2D_X>(op, &temp_output[0], red_buf, T_red_rf); __syncthreads(); if (threadIdx.x == 0) { output[blockIdx.y * blockDim.y + threadIdx.y] = temp_output[0]; } } template <typename T, typename ReduceOp> __global__ void ComputeResultAlongXGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread, ReduceOp op) { T T_red_rf = 0.0; __shared__ T red_buf[32]; __shared__ T temp_output[1]; // temp storage for output temp_output[0] = (T) 0.0; for (int k = 0; k < item_per_thread; ++k) { if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread < x_len && threadIdx.y + blockDim.y * blockIdx.y < y_len) { T_red_rf += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread + threadIdx.y * x_len + blockIdx.y * blockDim.y * x_len]; } } __syncthreads(); AkgReduce<T, ReduceOp, 32, REDUCE2D_X>(op, &temp_output[0], red_buf, T_red_rf); __syncthreads(); if (threadIdx.x == 0) { AkgAtomicReturn<T, ReduceOp>(temp_output[0], &output[blockIdx.y * blockDim.y + threadIdx.y], op); } } template <typename T, typename ReduceOp> __global__ void ComputeResultAlongYGPUSingleBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread, ReduceOp op, int sharedmem_x) { T T_red_rf = 0.0; __shared__ T red_buf[32]; __shared__ T temp_output[1]; // temp storage for output, size is blockDimx.x temp_output[0] = (T) 0.0; for (int k = 0; k < item_per_thread; ++k) { if (threadIdx.x + blockIdx.x * blockDim.x < x_len && threadIdx.y + blockDim.y * k < y_len) { T_red_rf += arr[threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * x_len + k * blockDim.y * x_len]; } } __syncthreads(); AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[threadIdx.x], red_buf, T_red_rf, sharedmem_x); __syncthreads(); if (threadIdx.y == 0) { AkgAtomicReturn<T, ReduceOp>(temp_output[threadIdx.x], &output[blockIdx.x * blockDim.x + threadIdx.x], op); } } template <typename T, typename ReduceOp> __global__ void ComputeResultAlongYGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread, ReduceOp op, int sharedmem_x) { T T_red_rf = 0.0; __shared__ T red_buf[32]; __shared__ T temp_output[1]; // temp storage for output, size is blockDimx.x temp_output[0] = (T) 0.0; for (int k = 0; k < item_per_thread; ++k) { if (threadIdx.x + blockIdx.x * blockDim.x < x_len && threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread < y_len) { T_red_rf += arr[threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * x_len + k * blockDim.y * x_len + blockIdx.y * blockDim.y * item_per_thread * x_len]; } } __syncthreads(); AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[threadIdx.x], red_buf, T_red_rf, sharedmem_x); __syncthreads(); if (threadIdx.y == 0) { AkgAtomicReturn<T, ReduceOp>(temp_output[threadIdx.x], &output[blockIdx.x * blockDim.x + threadIdx.x], op); } } template <typename T> void TestReduce2DAlongX(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) { printf("--- TEST CASE Reduce2DAlongX ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str()); int input_bytes = x_len * y_len * sizeof(T); int output_bytes = y_len * sizeof(T); T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O; h_I = (T *)malloc(input_bytes); h_O = (T *)malloc(output_bytes); expected_h_O = (T *)malloc(output_bytes); // random initialize srand(time(0)); for (auto i = 0; i < x_len * y_len; i++) { h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0); } if (verbose) { printf("[VERBOSE] random Input data:\n"); for (auto j = 0; j < y_len; j++) { for (auto i = 0; i < x_len; i++) { printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len])); } printf("\n"); } } for (auto i = 0; i < y_len; i++) { h_O[i] = TypeTransform<T, double>(0.0); expected_h_O[i] = TypeTransform<T, double>(0.0); } // host to device GetGpuErr(cudaMalloc((void **)&d_I, input_bytes)); GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice)); // compute single thread resutls ComputeResultAlongXSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O); GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost)); if (single_block) { // compute GPU resutls dim3 gridSize(1, 8); dim3 blockSize(32, 1); int item_per_thread = (x_len - 1) / blockSize.x + 1; ComputeResultAlongXGPUSingleBlock<T, akg_reduce::SumOp> <<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp()); GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost)); // compare GPU with CPU CompareResults<T>(h_O, expected_h_O, y_len); } else { dim3 gridSize(2, 8); dim3 blockSize(32, 1); int item_per_block = (x_len - 1) / gridSize.x + 1; int item_per_thread = (item_per_block - 1) / blockSize.x + 1; ComputeResultAlongXGPUMultiBlock<T, akg_reduce::SumOp> <<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp()); GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost)); // compare GPU with CPU CompareResults<T>(h_O, expected_h_O, y_len); } GetGpuErr(cudaFree(expected_d_O)); GetGpuErr(cudaFree(d_O)); GetGpuErr(cudaFree(d_I)); free(expected_h_O); free(h_O); free(h_I); printf("--- CASE END ---\n\n"); } template <typename T> void TestReduce2DAlongY(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) { printf("--- TEST CASE Reduce2DAlongY ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str()); int input_bytes = x_len * y_len * sizeof(T); int output_bytes = x_len * sizeof(T); T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O; h_I = (T *)malloc(input_bytes); h_O = (T *)malloc(output_bytes); expected_h_O = (T *)malloc(output_bytes); // random initialize srand(time(0)); for (auto i = 0; i < x_len * y_len; i++) { h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0); } if (verbose) { printf("[VERBOSE] random Input data:\n"); for (auto j = 0; j < y_len; j++) { for (auto i = 0; i < x_len; i++) { printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len])); } printf("\n"); } } for (auto i = 0; i < x_len; i++) { h_O[i] = TypeTransform<T, double>(0.0); expected_h_O[i] = TypeTransform<T, double>(0.0); } // host to device GetGpuErr(cudaMalloc((void **)&d_I, input_bytes)); GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice)); // compute single thread results ComputeResultAlongYSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O); GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost)); if (single_block) { // compute GPU results dim3 gridSize(8, 1); dim3 blockSize(1, 32); int item_per_thread = (y_len - 1) / blockSize.y + 1; int sharedmem_x = 1; ComputeResultAlongYGPUSingleBlock<T, akg_reduce::SumOp> <<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp(), sharedmem_x); GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost)); } else { // compute GPU results dim3 gridSize(8, 2); dim3 blockSize(1, 32); int item_per_block = (y_len - 1) / gridSize.y + 1; int item_per_thread = (item_per_block - 1) / blockSize.y + 1; int sharedmem_x = 1; ComputeResultAlongYGPUMultiBlock<T, akg_reduce::SumOp> <<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp(), sharedmem_x); GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost)); } // compare GPU with CPU CompareResults<T>(h_O, expected_h_O, x_len); GetGpuErr(cudaFree(expected_d_O)); GetGpuErr(cudaFree(d_O)); GetGpuErr(cudaFree(d_I)); free(expected_h_O); free(h_O); free(h_I); printf("--- CASE END ---\n\n"); } int main() { TestReduce2DAlongX<int>(128, 8, "int", true); TestReduce2DAlongX<half>(128, 8, "half", true); TestReduce2DAlongX<float>(128, 8, "float", true); TestReduce2DAlongX<double>(128, 8, "double", true); TestReduce2DAlongX<int>(128, 8, "int", false); TestReduce2DAlongX<float>(128, 8, "float", false); TestReduce2DAlongX<double>(128, 8, "double", false); TestReduce2DAlongY<int>(8, 128, "int", true); TestReduce2DAlongY<half>(8, 128, "half", true); TestReduce2DAlongY<float>(8, 128, "float", true); TestReduce2DAlongY<double>(8, 128, "double", true); TestReduce2DAlongY<int>(8, 128, "int", false); TestReduce2DAlongY<half>(8, 128, "half", false); TestReduce2DAlongY<float>(8, 128, "float", false); TestReduce2DAlongY<double>(8, 128, "double", false); return 0; }
the_stack
#include "k2/csrc/ragged_ops.h" #include "k2/python/csrc/torch/torch_util.h" #include "k2/python/csrc/torch/v2/autograd/index_and_sum.h" #include "k2/python/csrc/torch/v2/autograd/normalize.h" #include "k2/python/csrc/torch/v2/autograd/sum.h" #include "k2/python/csrc/torch/v2/ragged_any.h" namespace k2 { static void PrintSpaces(std::ostream &os, int32_t num_spaces) { K2_CHECK_GE(num_spaces, 0); for (int32_t i = 0; i != num_spaces; ++i) os << " "; } template <typename T> void RaggedAnyToStringIter(std::ostream &os, const Ragged<T> ragged, int32_t axis, int32_t begin_pos, int32_t end_pos, int32_t num_indent, bool compact) { const auto &shape = ragged.shape; K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 && begin_pos <= end_pos && end_pos <= shape.TotSize(axis)); std::string sep = ""; bool is_first_row = true; for (int32_t d = begin_pos; d < end_pos; d++) { if (axis == shape.NumAxes() - 1) { os << sep << ragged.values[d]; sep = ", "; } else { const int32_t *row_splits = shape.RowSplits(axis + 1).Data(); K2_DCHECK_LE(d, shape.RowSplits(axis + 1).Dim()); int32_t row_start = row_splits[d], row_end = row_splits[d + 1]; if (!compact && !is_first_row) { PrintSpaces(os, num_indent + 1); } is_first_row = false; os << "["; RaggedAnyToStringIter(os, ragged, axis + 1, row_start, row_end, num_indent + 1, compact); os << "]"; if (d != end_pos - 1) { if (compact) os << ", "; else os << ",\n"; } } } } /** One iteration of RaggedAnyFromList. @param data It is a list or a list-of sublist(s). @param cur_level It is the level of a sublist. The root has a level 0. @param deepest_level values appear at this level. @param row_splits It contains row_splits of different levels, indexed by `cur_level`. @param elems It contains the elements read so far. */ template <typename T> static void RaggedAnyFromListIter(py::list data, int32_t *cur_level, int32_t *deepest_level, std::vector<std::vector<int32_t>> *row_splits, std::vector<T> *elems) { // We encounter a new sublist, so increase the level number *cur_level += 1; if (static_cast<size_t>(*cur_level) > row_splits->size()) { // This is a deeper level that has not been seen, so // we need to allocate a row_split for this level row_splits->resize(*cur_level, std::vector<int32_t>(1, 0)); } if (data.size() > 0 && py::isinstance<py::list>(data[0])) { // If `data` is not empty and it contains sublist for (auto &d : data) { if (!py::isinstance<py::list>(d)) { throw std::runtime_error("Expect an instance of list"); } RaggedAnyFromListIter(d.cast<py::list>(), cur_level, deepest_level, row_splits, elems); } } else { if (*deepest_level == -1) { *deepest_level = *cur_level; } else if (data.size() > 0 && *deepest_level != *cur_level) { // Handle the case for [ [2], [[1]] ] // // Note: [ [], [[1]] ] is valid throw std::runtime_error("Make sure sublists are properly nested"); } if (data.size() > 0 && static_cast<size_t>(*cur_level) != row_splits->size()) { // Handle cases like the following string: // [ [[1]], [2, 3] ] // The sublist [2, 3] should be [[2, 3]], i.e., has the same // level as [[1]] // // Note: [ [], [[1]] ] is valid throw std::runtime_error("Expect a ["); } auto tmp = data.cast<std::vector<T>>(); elems->insert(elems->end(), tmp.begin(), tmp.end()); } *cur_level -= 1; (*row_splits)[*cur_level].push_back( (*cur_level + 1 >= (int32_t)row_splits->size()) ? static_cast<int32_t>(elems->size()) : ((*row_splits)[*cur_level + 1].size() - 1)); } /** Construct a Ragged<T> from a list of sublist(s) of integers or real numbers. @param data A list of sublist(s). @return Return a Ragged<T> constructed from the given `data`. */ template <typename T> static Ragged<T> RaggedAnyFromList(py::list data) { std::vector<std::vector<int32_t>> row_splits; std::vector<T> elems; int32_t cur_level = 0; int32_t deepest_level = -1; // values appear at this level for (auto &d : data) { if (!py::isinstance<py::list>(d)) { throw std::runtime_error("Expect a list"); } RaggedAnyFromListIter(d.cast<py::list>(), &cur_level, &deepest_level, &row_splits, &elems); } if (row_splits.empty()) { // Assume 2 axes even though the num-axes is ambiguous from the input `[ ]` // row_splits is [ 0 ]. row_splits.push_back(std::vector<int32_t>(1, 0)); } std::vector<RaggedShapeLayer> axes(row_splits.size()); ContextPtr c = GetCpuContext(); for (size_t i = 0; i != row_splits.size(); ++i) { axes[i].row_splits = Array1<int32_t>(c, row_splits[i]); axes[i].cached_tot_size = row_splits[i].back(); } Ragged<T> ans; ans.shape = RaggedShape(axes); ans.values = Array1<T>(c, elems); if (ans.values.Dim() != ans.shape.NumElements()) { throw std::runtime_error("Invalid format of a ragged tensor"); } return ans; } RaggedAny::RaggedAny(const RaggedShape &shape, torch::Tensor value) : data(value) { ContextPtr context = GetContext(value); DeviceGuard guard(context); Dtype t = ScalarTypeToDtype(value.scalar_type()); FOR_REAL_AND_INT32_TYPES(t, T, { Array1<T> array = FromTorch<T>(value); Ragged<T> r(shape, array); any = r.Generic(); return; }); // Unreachable code K2_LOG(FATAL) << "Unsupported dtype: " << TraitsOf(t).Name(); } RaggedAny::RaggedAny(const std::string &s, py::object dtype /*=py::none()*/, torch::Device device /*=torch::kCPU*/) { if (!dtype.is_none() && !THPDtype_Check(dtype.ptr())) { K2_LOG(FATAL) << "Expect an instance of torch.dtype. " << "Given: " << py::str(dtype); } ContextPtr context = GetContext(device); DeviceGuard guard(context); if (dtype.is_none()) { try { // We try int first, if it fails, use float any = Ragged<int32_t>(s, /*throw_on_failure*/ true).To(context).Generic(); return; } catch (const std::runtime_error &) { // Use float. If it fails again, another exception // is thrown and it is propagated to the user any = Ragged<float>(s).To(context).Generic(); return; } } auto scalar_type = reinterpret_cast<THPDtype *>(dtype.ptr())->scalar_type; Dtype t = ScalarTypeToDtype(scalar_type); FOR_REAL_AND_INT32_TYPES(t, T, { any = Ragged<T>(s).To(context).Generic(); return; }); K2_LOG(FATAL) << "Unsupported dtype: " << scalar_type << ". Supported dtypes are: torch.int32, torch.float32, " << "and torch.float64"; } RaggedAny::RaggedAny(py::list data, py::object dtype /*= py::none()*/, torch::Device device /*=torch::kCPU*/) { if (!dtype.is_none() && !THPDtype_Check(dtype.ptr())) { K2_LOG(FATAL) << "Expect an instance of torch.dtype. " << "Given: " << py::str(dtype); } ContextPtr context = GetContext(device); DeviceGuard guard(context); if (dtype.is_none()) { try { // We try int first; if it fails, use float any = RaggedAnyFromList<int32_t>(data).To(context).Generic(); return; } catch (const std::exception &) { // Use float. If it fails again, another exception // is thrown and it is propagated to the user any = RaggedAnyFromList<float>(data).To(context).Generic(); return; } } auto scalar_type = reinterpret_cast<THPDtype *>(dtype.ptr())->scalar_type; Dtype t = ScalarTypeToDtype(scalar_type); FOR_REAL_AND_INT32_TYPES(t, T, { any = RaggedAnyFromList<T>(data).To(context).Generic(); return; }); K2_LOG(FATAL) << "Unsupported dtype: " << scalar_type << ". Supported dtypes are: torch.int32, torch.float32, " << "and torch.float64"; } RaggedAny::RaggedAny(torch::Tensor tensor) { int32_t ndim = tensor.dim(); K2_CHECK_GE(ndim, 2) << "Expect a tensor with more than 1-D"; ContextPtr context = GetContext(tensor); DeviceGuard guard(context); std::vector<RaggedShape> shapes; shapes.reserve(ndim - 1); int32_t dim0 = tensor.size(0); for (int32_t i = 1; i != ndim; ++i) { int32_t dim1 = tensor.size(i); shapes.push_back(RegularRaggedShape(context, dim0, dim1)); dim0 *= dim1; } while (shapes.size() > 2u) { RaggedShape c = std::move(shapes.back()); shapes.pop_back(); RaggedShape b = std::move(shapes.back()); shapes.pop_back(); RaggedShape a = std::move(shapes.back()); shapes.pop_back(); RaggedShape abc = ComposeRaggedShapes3(a, b, c); shapes.push_back(std::move(abc)); } if (shapes.size() > 1u) { RaggedShape b = std::move(shapes.back()); shapes.pop_back(); RaggedShape a = std::move(shapes.back()); shapes.pop_back(); RaggedShape ab = ComposeRaggedShapes(a, b); shapes.push_back(std::move(ab)); } Dtype t = ScalarTypeToDtype(tensor.scalar_type()); FOR_REAL_AND_INT32_TYPES(t, T, { Array1<T> values = FromTorch<T>(tensor.contiguous().view({-1})); any = Ragged<T>(shapes[0], values).Generic(); }); } const torch::Tensor &RaggedAny::Data() const { DeviceGuard guard(any.Context()); if (!data.defined()) { Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { const_cast<RaggedAny *>(this)->data = ToTorch((const_cast<RaggedAny *>(this)->any).Specialize<T>().values); }); } return data; } std::string RaggedAny::ToString(bool compact /*=false*/, int32_t device_id /*=-1*/) const { ContextPtr context = any.Context(); if (context->GetDeviceType() != kCpu) { return To("cpu").ToString(context->GetDeviceId()); } std::ostringstream os; Dtype t = any.GetDtype(); std::string dtype; if (t == kInt32Dtype) dtype = "torch.int32"; else if (t == kFloatDtype) dtype = "torch.float32"; else if (t == kDoubleDtype) dtype = "torch.float64"; else K2_LOG(FATAL) << "Unsupported dtype: " << TraitsOf(t).Name(); FOR_REAL_AND_INT32_TYPES(t, T, { os << "RaggedTensor(["; // 13 is strlen("RaggedTensor(") RaggedAnyToStringIter(os, any.Specialize<T>(), 0, 0, any.shape.Dim0(), 13, compact); os << "]"; if (device_id != -1) os << ", device='cuda:" << device_id << "'"; os << ", dtype=" << dtype; os << ")"; }); return os.str(); } RaggedAny RaggedAny::To(torch::Device device) const { ContextPtr context = any.Context(); if (device.is_cpu()) { // CPU -> CPU if (context->GetDeviceType() == kCpu) return *this; // CUDA -> CPU DeviceGuard guard(context); return RaggedAny(any.To(GetCpuContext())); } K2_CHECK(device.is_cuda()) << device.str(); int32_t device_index = device.index(); if (context->GetDeviceType() == kCuda && context->GetDeviceId() == device_index) // CUDA to CUDA, and it's the same device return *this; // CPU to CUDA // or from one GPU to another GPU DeviceGuard guard(device_index); return RaggedAny(any.To(GetCudaContext(device_index))); } RaggedAny RaggedAny::To(const std::string &device) const { torch::Device d(device); return this->To(d); } RaggedAny RaggedAny::To(torch::ScalarType scalar_type) const { Dtype d = any.GetDtype(); switch (scalar_type) { case torch::kFloat: FOR_REAL_AND_INT32_TYPES( d, T, { return RaggedAny(any.Specialize<T>().ToFloat().Generic()); }); case torch::kInt: FOR_REAL_AND_INT32_TYPES( d, T, { return RaggedAny(any.Specialize<T>().ToInt().Generic()); }); case torch::kDouble: FOR_REAL_AND_INT32_TYPES(d, T, { return RaggedAny(any.Specialize<T>().ToDouble().Generic()); }); default: K2_LOG(FATAL) << "Unsupported scalar type: " << torch::toString(scalar_type) << "\n"; } // Unreachable code return {}; } RaggedAny RaggedAny::Clone() const { DeviceGuard guard(any.Context()); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES( t, T, { return RaggedAny(any.Specialize<T>().Clone().Generic()); }); // Unreachable code return {}; } RaggedAny &RaggedAny::SetRequiresGrad(bool requires_grad /*=true*/) { // PyTorch will throw a RuntimeError exception if dtype is torch.int32 // So no need to check it by us here Data().requires_grad_(requires_grad); return *this; } torch::Tensor RaggedAny::Sum(float initial_value /*=0*/) const { DeviceGuard guard(any.Context()); return SumFunction::apply(*this, Data(), initial_value); } RaggedAny RaggedAny::Index(int32_t axis, int32_t i) const { K2_CHECK_EQ(axis, 0) << "Support only axis == 0 right now"; DeviceGuard guard(any.Context()); return RaggedAny(any.Index(axis, i)); } RaggedAny RaggedAny::RemoveAxis(int32_t axis) /*const*/ { DeviceGuard guard(any.Context()); return RaggedAny(any.RemoveAxis(axis)); } RaggedAny RaggedAny::Arange(int32_t axis, int32_t begin, int32_t end) /*const*/ { DeviceGuard guard(any.Context()); return RaggedAny(k2::Arange(any, axis, begin, end)); } RaggedAny RaggedAny::RemoveValuesLeq(py::object cutoff) /*const*/ { DeviceGuard guard(any.Context()); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { return RaggedAny( k2::RemoveValuesLeq<T>(any.Specialize<T>(), cutoff.cast<T>()) .Generic()); }); // Unreachable code return {}; } RaggedAny RaggedAny::RemoveValuesEq(py::object target) /*const*/ { DeviceGuard guard(any.Context()); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { return RaggedAny( k2::RemoveValuesEq<T>(any.Specialize<T>(), target.cast<T>()).Generic()); }); // Unreachable code return {}; } torch::Tensor RaggedAny::ArgMax( py::object initial_value /*=py::none()*/) /*const*/ { K2_CHECK((bool)initial_value); DeviceGuard guard(any.Context()); int32_t last_axis = any.NumAxes() - 1; const Array1<int32_t> &row_splits_array = any.RowSplits(last_axis); int32_t num_rows = row_splits_array.Dim() - 1; Array1<int32_t> indexes(any.Context(), num_rows); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { T v = initial_value.is_none() ? std::numeric_limits<T>::lowest() : initial_value.cast<T>(); ArgMaxPerSublist<T>(any.Specialize<T>(), v, &indexes); }); return ToTorch(indexes); } torch::Tensor RaggedAny::Max( py::object initial_value /*=py::none()*/) /*const*/ { K2_CHECK((bool)initial_value); DeviceGuard guard(any.Context()); int32_t last_axis = any.NumAxes() - 1; const Array1<int32_t> &row_splits_array = any.RowSplits(last_axis); int32_t num_rows = row_splits_array.Dim() - 1; Array1<int32_t> indexes(any.Context(), num_rows); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { T v = initial_value.is_none() ? std::numeric_limits<T>::lowest() : initial_value.cast<T>(); Array1<T> max_values(any.Context(), num_rows); MaxPerSublist<T>(any.Specialize<T>(), v, &max_values); return ToTorch(max_values); }); // Unreachable code return {}; } torch::Tensor RaggedAny::Min( py::object initial_value /*=py::none()*/) /*const*/ { K2_CHECK((bool)initial_value); DeviceGuard guard(any.Context()); int32_t last_axis = any.NumAxes() - 1; const Array1<int32_t> &row_splits_array = any.RowSplits(last_axis); int32_t num_rows = row_splits_array.Dim() - 1; Array1<int32_t> indexes(any.Context(), num_rows); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { T v = initial_value.is_none() ? std::numeric_limits<T>::max() : initial_value.cast<T>(); Array1<T> min_values(any.Context(), num_rows); MinPerSublist<T>(any.Specialize<T>(), v, &min_values); return ToTorch(min_values); }); // Unreachable code return {}; } RaggedAny RaggedAny::Cat(const std::vector<RaggedAny> &srcs, int32_t axis) { K2_CHECK_GT(srcs.size(), 0); DeviceGuard guard(srcs[0].any.Context()); Dtype t = srcs[0].any.GetDtype(); int32_t num_srcs = srcs.size(); FOR_REAL_AND_INT32_TYPES(t, T, { std::vector<Ragged<T>> tmp; tmp.reserve(num_srcs); for (const auto &s : srcs) { tmp.push_back(s.any.Specialize<T>()); } return RaggedAny( k2::Cat(axis, num_srcs, tmp.data(), /*merge_map*/ nullptr).Generic()); }); // Unreachable code return {}; } std::tuple<RaggedAny, torch::optional<RaggedAny>, torch::optional<torch::Tensor>> RaggedAny::Unique(bool need_num_repeats /*= false*/, bool need_new2old_indexes /*= false*/) { DeviceGuard guard(any.Context()); Dtype t = any.GetDtype(); K2_CHECK_EQ(t, kInt32Dtype) << "Unsupported dtype: " << TraitsOf(t).Name(); Ragged<int32_t> num_repeats; Array1<int32_t> new2old_indexes; Ragged<int32_t> ans = UniqueSequences( any.Specialize<int32_t>(), need_num_repeats ? &num_repeats : nullptr, need_new2old_indexes ? &new2old_indexes : nullptr); torch::optional<RaggedAny> num_repeats_tensor; if (need_num_repeats) num_repeats_tensor = RaggedAny(num_repeats.Generic()); torch::optional<torch::Tensor> new2old_indexes_tensor; if (need_new2old_indexes) new2old_indexes_tensor = ToTorch(new2old_indexes); return std::make_tuple(RaggedAny(ans.Generic()), num_repeats_tensor, new2old_indexes_tensor); } RaggedAny RaggedAny::Normalize(bool use_log) /*const*/ { DeviceGuard guard(any.Context()); RaggedAny out; NormalizeFunction::apply(*this, use_log, Data(), &out); return out; } torch::Tensor RaggedAny::Pad(const std::string &mode, py::object padding_value) /*const*/ { K2_CHECK((bool)padding_value); K2_CHECK(!padding_value.is_none()); DeviceGuard guard(any.Context()); Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { Array2<T> arr = PadRagged(any.Specialize<T>(), mode, padding_value.cast<T>()); return ToTorch(arr); }); // Unreachable code return {}; } template <typename T> static py::list ToList(Ragged<T> &src, int32_t axis, int32_t begin, int32_t end) { // assuming src is on CPU int32_t num_axes = src.NumAxes(); K2_CHECK_GE(axis, 0); K2_CHECK_LT(axis, num_axes); K2_CHECK_LE(begin, end); K2_CHECK_LE(end, src.TotSize(axis)); py::list ans(end - begin); if (axis == num_axes - 1) { const T *data = src.values.Data(); // recursion ends here for (int32_t i = begin; i != end; ++i) { ans[i - begin] = data[i]; } } else { const int32_t *data = src.RowSplits(axis + 1).Data(); for (int32_t i = begin; i != end; ++i) { ans[i - begin] = ToList(src, axis + 1, data[i], data[i + 1]); } } return ans; } py::list RaggedAny::ToList() /*const*/ { if (any.Context()->GetDeviceType() != kCpu) { return RaggedAny(any.To(GetCpuContext())).ToList(); } Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { return k2::ToList(any.Specialize<T>(), /*axis*/ 0, /*begin*/ 0, /*end*/ any.Dim0()); }); // Unreachable code return py::none(); } torch::optional<torch::Tensor> RaggedAny::Sort( bool descending /*= false*/, bool need_new2old_indexes /*= false*/) { DeviceGuard guard(any.Context()); Dtype t = any.GetDtype(); Array1<int32_t> new2old; if (need_new2old_indexes) { new2old = Array1<int32_t>(any.Context(), any.NumElements()); } FOR_REAL_AND_INT32_TYPES(t, T, { if (descending) { SortSublists<T, GreaterThan<T>>( &any.Specialize<T>(), need_new2old_indexes ? &new2old : nullptr); } else { SortSublists<T, LessThan<T>>(&any.Specialize<T>(), need_new2old_indexes ? &new2old : nullptr); } }); torch::optional<torch::Tensor> ans; if (need_new2old_indexes) ans = ToTorch(new2old); return ans; } RaggedAny RaggedAny::Index(RaggedAny &indexes) /*const*/ { K2_CHECK_EQ(indexes.any.GetDtype(), kInt32Dtype) << "Unsupported dtype: " << TraitsOf(indexes.any.GetDtype()).Name(); DeviceGuard guard(any.Context()); bool remove_axis = false; Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { return RaggedAny(k2::Index<T>(any.Specialize<T>(), indexes.any.Specialize<int32_t>(), remove_axis) .Generic()); }); // Unreachable code return {}; } std::pair<RaggedAny, torch::optional<torch::Tensor>> RaggedAny::Index( torch::Tensor indexes, int32_t axis, bool need_value_indexes /*= false*/) /*const*/ { DeviceGuard guard(any.Context()); Array1<int32_t> indexes_array = FromTorch<int32_t>(indexes); Array1<int32_t> value_indexes; torch::optional<torch::Tensor> value_indexes_tensor; Dtype t = any.GetDtype(); FOR_REAL_AND_INT32_TYPES(t, T, { Ragged<T> ans = k2::Index<T>(any.Specialize<T>(), axis, indexes_array, need_value_indexes ? &value_indexes : nullptr); if (need_value_indexes) value_indexes_tensor = ToTorch(value_indexes); return std::make_pair(RaggedAny(ans.Generic()), value_indexes_tensor); }); // Unreachable code return {}; } RaggedAny RaggedAny::Index(torch::Tensor src, py::object default_value /*=py::none()*/) /*const*/ { Dtype t = any.GetDtype(); K2_CHECK_EQ(t, kInt32Dtype) << "Unsupported dtype: " << TraitsOf(t).Name(); K2_CHECK_EQ(src.dim(), 1) << "Expected dim: 1. Given: " << src.dim(); DeviceGuard guard(any.Context()); Dtype dtype = ScalarTypeToDtype(src.scalar_type()); FOR_REAL_AND_INT32_TYPES(dtype, T, { T value_for_minus_one = default_value.is_none() ? T() : default_value.cast<T>(); Array1<T> src_array = FromTorch<T>(src); return RaggedAny( k2::Index(src_array, any.Specialize<int32_t>(), value_for_minus_one) .Generic()); }); // Unreachable code return {}; } torch::Tensor RaggedAny::IndexAndSum(torch::Tensor src) /*const*/ { DeviceGuard guard(any.Context()); return IndexAndSumFunction::apply(src, *this); } } // namespace k2
the_stack
////////////////////////////// ////////////////////////////// ////////////////////////////// //Helper functions for leaf-nodes __device__ void compute_monopole(double &mass, double &posx, double &posy, double &posz, float4 pos){ mass += pos.w; posx += pos.w*pos.x; posy += pos.w*pos.y; posz += pos.w*pos.z; } __device__ void compute_quadropole(double &oct_q11, double &oct_q22, double &oct_q33, double &oct_q12, double &oct_q13, double &oct_q23, float4 pos) { oct_q11 += pos.w * pos.x*pos.x; oct_q22 += pos.w * pos.y*pos.y; oct_q33 += pos.w * pos.z*pos.z; oct_q12 += pos.w * pos.x*pos.y; oct_q13 += pos.w * pos.y*pos.z; oct_q23 += pos.w * pos.z*pos.x; } __device__ void compute_bounds(float3 &r_min, float3 &r_max, float4 pos) { r_min.x = fminf(r_min.x, pos.x); r_min.y = fminf(r_min.y, pos.y); r_min.z = fminf(r_min.z, pos.z); r_max.x = fmaxf(r_max.x, pos.x); r_max.y = fmaxf(r_max.y, pos.y); r_max.z = fmaxf(r_max.z, pos.z); } //Non-leaf node helper functions __device__ void compute_monopole_node(double &mass, double &posx, double &posy, double &posz, double4 pos) { mass += pos.w; posx += pos.w*pos.x; posy += pos.w*pos.y; posz += pos.w*pos.z; } __device__ void compute_quadropole_node(double &oct_q11, double &oct_q22, double &oct_q33, double &oct_q12, double &oct_q13, double &oct_q23, double4 Q0, double4 Q1) { oct_q11 += Q0.x; oct_q22 += Q0.y; oct_q33 += Q0.z; oct_q12 += Q1.x; oct_q13 += Q1.y; oct_q23 += Q1.z; } __device__ void compute_bounds_node(float3 &r_min, float3 &r_max, float4 node_min, float4 node_max) { r_min.x = fminf(r_min.x, node_min.x); r_min.y = fminf(r_min.y, node_min.y); r_min.z = fminf(r_min.z, node_min.z); r_max.x = fmaxf(r_max.x, node_max.x); r_max.y = fmaxf(r_max.y, node_max.y); r_max.z = fmaxf(r_max.z, node_max.z); } extern "C" __global__ void compute_leaf(const int n_leafs, uint *leafsIdxs, uint2 *node_bodies, real4 *body_pos, double4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds, // float3 *lowerBounds, // float3 *upperBounds, real4 *body_vel) { const uint bid = blockIdx.y * gridDim.x + blockIdx.x; const uint tid = threadIdx.x; const uint id = bid * blockDim.x + tid; volatile __shared__ float3 shmem[256]; volatile float3 *sh_rmin = (float3*)&shmem [ 0]; volatile float3 *sh_rmax = (float3*)&shmem[128]; //Set the shared memory for these threads and exit the thread if (id >= n_leafs){ sh_rmin[tid].x = +1e10f; sh_rmin[tid].y = +1e10f; sh_rmin[tid].z = +1e10f; sh_rmax[tid].x = -1e10f; sh_rmax[tid].y = -1e10f; sh_rmax[tid].z = -1e10f; return; } //Since nodes are intermixes with non-leafs in the node_bodies array //we get a leaf-id from the leafsIdxs array int nodeID = leafsIdxs[id]; const uint2 bij = node_bodies[nodeID]; const uint firstChild = bij.x & ILEVELMASK; const uint lastChild = bij.y; //TODO maybe have to increase it by 1 //Variables holding properties and intermediate answers float4 p; double mass, posx, posy, posz; mass = posx = posy = posz = 0.0; double oct_q11, oct_q22, oct_q33; double oct_q12, oct_q13, oct_q23; oct_q11 = oct_q22 = oct_q33 = 0.0; oct_q12 = oct_q13 = oct_q23 = 0.0; float3 r_min, r_max; r_min = (float3){+1e10f, +1e10f, +1e10f}; r_max = (float3){-1e10f, -1e10f, -1e10f}; //Loop over the children=>particles=>bodys //unroll increases register usage #pragma unroll 16 float maxEps = -100.0f; int count=0; for(int i=firstChild; i < lastChild; i++){ p = body_pos[i]; maxEps = fmaxf(body_vel[i].w, maxEps); //Determine the max softening within this leaf count++; compute_monopole(mass, posx, posy, posz, p); compute_quadropole(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, p); compute_bounds(r_min, r_max, p); } double4 mon = {posx, posy, posz, mass}; double im = 1.0/mon.w; if(mon.w == 0) im = 0; //Allow tracer/massless particles mon.x *= im; mon.y *= im; mon.z *= im; double4 Q0, Q1; Q0 = (double4){oct_q11, oct_q22, oct_q33, maxEps}; //Store max softening Q1 = (double4){oct_q12, oct_q13, oct_q23, 0}; //Store the leaf properties multipole[3*nodeID + 0] = mon; //Monopole multipole[3*nodeID + 1] = Q0; //Quadropole multipole[3*nodeID + 2] = Q1; //Quadropole //Store the node boundaries nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f}; nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 1.0f}; //4th parameter is set to 1 to indicate this is a leaf //Global domain boundaries using reduction /* sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; __syncthreads(); //TODO what happens here if some of the threads have been cancelled because of // id >= nleaf //Than we run the risk of comparing datas with garbade in shem //Reduction of the global boundaries of the system if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); if (tid < 32) { sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax); } __syncthreads(); //Store the results if(tid == 0) { //Compiler doesnt allow: volatile float3 = float3 lowerBounds[bid].x = sh_rmin[0].x; lowerBounds[bid].y = sh_rmin[0].y; lowerBounds[bid].z = sh_rmin[0].z; upperBounds[bid].x = sh_rmax[0].x; upperBounds[bid].y = sh_rmax[0].y; upperBounds[bid].z = sh_rmax[0].z; } */ return; } //Function goes level by level (starting from deepest) and computes //the properties of the non-leaf nodes extern "C" __global__ void compute_non_leaf(const int curLevel, //Level for which we calc uint *leafsIdxs, //Conversion of ids uint *node_level_list, //Contains the start nodes of each lvl uint *n_children, //Reference from node to first child and number of childs double4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; const int endNode = node_level_list[curLevel]; const int startNode = node_level_list[curLevel-1]; if(idx >= (endNode-startNode)) return; const int nodeID = leafsIdxs[idx + startNode]; //Get the children info const uint firstChild = n_children[nodeID] & 0x0FFFFFFF; //TODO make this name/define? const uint nChildren = ((n_children[nodeID] & 0xF0000000) >> 28); //TODO make this name/define? //Variables double mass, posx, posy, posz; mass = posx = posy = posz = 0.0; double oct_q11, oct_q22, oct_q33; double oct_q12, oct_q13, oct_q23; oct_q11 = oct_q22 = oct_q33 = 0.0; oct_q12 = oct_q13 = oct_q23 = 0.0; float3 r_min, r_max; r_min = (float3){+1e10f, +1e10f, +1e10f}; r_max = (float3){-1e10f, -1e10f, -1e10f}; //Process the children (1 to 8) float maxEps = -100.0f; for(int i=firstChild; i < firstChild+nChildren; i++) { //Gogo process this data! double4 tmon = multipole[3*i + 0]; maxEps = max(multipole[3*i + 1].w, maxEps); compute_monopole_node(mass, posx, posy, posz, tmon); compute_quadropole_node(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, multipole[3*i + 1], multipole[3*i + 2]); compute_bounds_node(r_min, r_max, nodeLowerBounds[i], nodeUpperBounds[i]); } //Save the bounds nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f}; nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 0.0f}; //4th is set to 0 to indicate a non-leaf //Regularize and store the results double4 mon = {posx, posy, posz, mass}; double im = 1.0/mon.w; if(mon.w == 0) im = 0; //Allow tracer/massless particles mon.x *= im; mon.y *= im; mon.z *= im; double4 Q0, Q1; Q0 = (double4){oct_q11, oct_q22, oct_q33, maxEps}; //store max Eps Q1 = (double4){oct_q12, oct_q13, oct_q23, 0}; multipole[3*nodeID + 0] = mon; //Monopole multipole[3*nodeID + 1] = Q0; //Quadropole1 multipole[3*nodeID + 2] = Q1; //Quadropole2 return; } extern "C" __global__ void compute_scaling(const int node_count, double4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds, uint *n_children, real4 *multipoleF, float theta, real4 *boxSizeInfo, real4 *boxCenterInfo, uint2 *node_bodies){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; if(idx >= node_count) return; double4 monD, Q0, Q1; monD = multipole[3*idx + 0]; //Monopole Q0 = multipole[3*idx + 1]; //Quadropole1 Q1 = multipole[3*idx + 2]; //Quadropole2 //Scale the quadropole double im = 1.0 / monD.w; if(monD.w == 0) im = 0; //Allow tracer/massless particles Q0.x = Q0.x*im - monD.x*monD.x; Q0.y = Q0.y*im - monD.y*monD.y; Q0.z = Q0.z*im - monD.z*monD.z; Q1.x = Q1.x*im - monD.x*monD.y; Q1.y = Q1.y*im - monD.y*monD.z; Q1.z = Q1.z*im - monD.x*monD.z; //Switch the y and z parameter double temp = Q1.y; Q1.y = Q1.z; Q1.z = temp; //Convert the doubles to floats float4 mon = (float4){monD.x, monD.y, monD.z, monD.w}; multipoleF[3*idx + 0] = mon; multipoleF[3*idx + 1] = (float4){Q0.x, Q0.y, Q0.z, Q0.w}; //Quadropole1 multipoleF[3*idx + 2] = (float4){Q1.x, Q1.y, Q1.z, Q1.w}; //Quadropole2 float4 r_min, r_max; r_min = nodeLowerBounds[idx]; r_max = nodeUpperBounds[idx]; float3 boxCenter; boxCenter.x = 0.5*(r_min.x + r_max.x); boxCenter.y = 0.5*(r_min.y + r_max.y); boxCenter.z = 0.5*(r_min.z + r_max.z); float3 boxSize = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)), fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)), fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))}; //Calculate distance between center of the box and the center of mass float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)}; double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z)); //Length of the box, note times 2 since we only computed half the distance before float l = 2*fmaxf(boxSize.x, fmaxf(boxSize.y, boxSize.z)); //Extra check, shouldnt be necessary // if(l < 0.000001) // l = 0.000001; //Store the box size and opening criteria boxSizeInfo[idx].x = boxSize.x; boxSizeInfo[idx].y = boxSize.y; boxSizeInfo[idx].z = boxSize.z; boxSizeInfo[idx].w = __int_as_float(n_children[idx]); boxCenterInfo[idx].x = boxCenter.x; boxCenterInfo[idx].y = boxCenter.y; boxCenterInfo[idx].z = boxCenter.z; //Extra check, shouldnt be necessary, probably it is otherwise the test for leaf can fail //So it IS important Otherwise 0.0 < 0 can fail, now it will be: -1e-12 < 0 if(l < 0.000001) l = 0.000001; #ifdef IMPBH float cellOp = (l/theta) + s; #else //Minimum distance method float cellOp = (l/theta); #endif cellOp = cellOp*cellOp; if(r_max.w > 0){ cellOp = -cellOp; //This is a leaf node } boxCenterInfo[idx].w = cellOp; //Change the indirections of the leaf nodes so they point to //the particle data bool leaf = (r_max.w > 0); if(leaf){ uint2 bij = node_bodies[idx]; uint pfirst = bij.x & ILEVELMASK; uint nchild = bij.y - pfirst; pfirst = pfirst | ((nchild-1) << LEAFBIT); boxSizeInfo[idx].w = __int_as_float(pfirst); } return; } //Modify the references to the fist body and the number of bodys //for the leafs //Also copy the node_data to the group data extern "C" __global__ void copyNodeDataToGroupData(const int n_groups, const int n_nodes, uint4 *node_data, // uint4 *group_data, uint2 *node_bodies, int *group_list, real4 *boxCenterInfo, real4 *boxSizeInfo, real4 *groupCenterInfo, real4 *groupSizeInfo){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; if(idx >= n_nodes) return; //Copy the data and change the children data //Instead of pointing to child nodes we want it to point to //particles // uint4 nodeData = node_data[idx]; // bool leaf = __int_as_float(nodeData.z) <= 0; float temp = boxCenterInfo[idx].w; bool leaf = temp <= 0; //uint2 bij2 = node_bodies[idx]; //uint pfirst2 = bij2.x & ILEVELMASK; //uint nchild2 = bij2.y - pfirst2; //Change the indirections of the leaf nodes so they point to //the particle data if(leaf) { uint2 bij = node_bodies[idx]; uint pfirst = bij.x & ILEVELMASK; uint nchild = bij.y - pfirst; pfirst = pfirst | ((nchild-1) << LEAFBIT); boxSizeInfo[idx].w = __int_as_float(pfirst); } //Now fill in the group data if(idx >= n_groups) return; int nodeID = group_list[idx]; real4 nodeData = boxSizeInfo[nodeID]; uint2 bij = node_bodies[nodeID]; int pfirst = bij.x & ILEVELMASK; int nchild = bij.y - pfirst; pfirst = pfirst | (nchild-1) << CRITBIT; nodeData.w = __int_as_float(pfirst); groupSizeInfo[idx] = nodeData; groupCenterInfo[idx] = boxCenterInfo[nodeID]; } //Compute the properties for the groups extern "C" __global__ void setPHGroupData(const int n_groups, const int n_particles, real4 *bodies_pos, int2 *group_list, real4 *groupCenterInfo, real4 *groupSizeInfo){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; if(bid >= n_groups) return; //Do a reduction on the particles assigned to this group volatile __shared__ float3 shmem[2*NCRIT]; volatile float3 *sh_rmin = (float3*)&shmem [ 0]; volatile float3 *sh_rmax = (float3*)&shmem[NCRIT]; float3 r_min = (float3){+1e10f, +1e10f, +1e10f}; float3 r_max = (float3){-1e10f, -1e10f, -1e10f}; int start = group_list[bid].x; int end = group_list[bid].y; int partIdx = start + threadIdx.x; //Set the shared memory with the data if (partIdx >= end) { sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; } else { sh_rmin[tid].x = r_min.x = bodies_pos[partIdx].x; sh_rmin[tid].y = r_min.y = bodies_pos[partIdx].y; sh_rmin[tid].z = r_min.z = bodies_pos[partIdx].z; sh_rmax[tid].x = r_max.x = bodies_pos[partIdx].x; sh_rmax[tid].y = r_max.y = bodies_pos[partIdx].y; sh_rmax[tid].z = r_max.z = bodies_pos[partIdx].z; } __syncthreads(); // do reduction in shared mem if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); if(blockDim.x >= 64) if (tid < 32) {sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin, sh_rmax); } if(blockDim.x >= 32) if (tid < 16) { sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin, sh_rmax); } if(tid < 8) { sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin, sh_rmax); sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin, sh_rmax); sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin, sh_rmax); sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin, sh_rmax); } // write result for this block to global mem if (tid == 0) { //Compute the group center and size float3 grpCenter; grpCenter.x = 0.5*(r_min.x + r_max.x); grpCenter.y = 0.5*(r_min.y + r_max.y); grpCenter.z = 0.5*(r_min.z + r_max.z); float3 grpSize = (float3){fmaxf(fabs(grpCenter.x-r_min.x), fabs(grpCenter.x-r_max.x)), fmaxf(fabs(grpCenter.y-r_min.y), fabs(grpCenter.y-r_max.y)), fmaxf(fabs(grpCenter.z-r_min.z), fabs(grpCenter.z-r_max.z))}; //Store the box size and opening criteria groupSizeInfo[bid].x = grpSize.x; groupSizeInfo[bid].y = grpSize.y; groupSizeInfo[bid].z = grpSize.z; int nchild = end-start; start = start | (nchild-1) << CRITBIT; groupSizeInfo[bid].w = __int_as_float(start); float l = max(grpSize.x, max(grpSize.y, grpSize.z)); groupCenterInfo[bid].x = grpCenter.x; groupCenterInfo[bid].y = grpCenter.y; groupCenterInfo[bid].z = grpCenter.z; //Test stats for physical group size groupCenterInfo[bid].w = l; //groupCenterInfo[idx].w = free variable } //end tid == 0 }//end copyNode2grp
the_stack
#include "EdgeCheck.h" #include <iostream> #include <cmath> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_BLOCK_1D // 定义一维块大小。 #define DEF_BLOCK_1D 256 // 宏:DEF_COL_MAX // 定义欧式距离最大列数。 #define DEF_COL_MAX 1024 // 宏:DEF_HUMOM_SIZE // 定义 Hu 矩大小。 #define DEF_HUMOM_SIZE 7 // 宏:ERR_EDEFORM 和 ERR_EHUMOM // 定义错误码。 #define ERR_EDEFORM 200 #define ERR_EHUMOM 150 // 宏:DEF_ERR_COUNT // errmap 中每个点的邻域大小中错误点的个数。 #define DEF_ERR_COUNT 3 // 宏:DEF_INVALID_FLOAT // 定义 float 类型的无效数据。 #define DEF_INVALID_FLOAT 100000000.0f // Kernel 函数:_edgeMatchKer(边缘匹配算法) // 计算测试图像每个参考图像的相关系数。 static __global__ void // Kernel 函数无返回值 _edgeMatchKer( ImageCuda teimg, // 测试图像 ImageCuda *reimg, // 参考图像数组 int recount, // 参考图像数量 float *cormapsum // 相关系数数组 ); // Kernel 函数:_getCormapMaxIndexKer(获取 cormapsum 中最大的值的索引) // 在匹配得到的结果中找到最大的值。 static __global__ void // Kernel 函数无返回值 _getCormapMaxIndexKer( float *cormap, // cormapsum 的数据 int count, // cormapsum 中数据的数量 int *maxindx // 最大值索引 ); // Kernel 函数:_imgConvertCstKer(实现将图像转化为坐标集算法) // 当输入参数为坐标集时,此算法将细化后的图像转化为输出坐标集。 static __global__ void // Kernel 函数无返回值 _imgConvertCstKer( ImageCuda outimg, // 输出图像 CoordiSet outcst, // 输出坐标 unsigned char highpixel, // 高像素 int *outcstcount // 坐标集索引 ); // Kernel 函数:_localMomentsKer(计算边缘点的 local moments) // 计算边缘坐标集合上的每个点的 local moments。 static __global__ void // Kernel 函数无返回值 _localMomentsKer( CoordiSet cdset, // 边缘的坐标集合 int width, // 顺逆时针跟踪的宽度 float *moments // local moments 特征矩阵 ); // Kernel 函数:_euclidMatKer(计算边缘点间的欧式距离) // 计算参考边缘和测试边缘的所有点的欧式距离,输出到矩阵中。 static __global__ void // Kernel 函数无返回值 _euclidMatKer( CoordiSet recdset, // 参考边缘的坐标集合 CoordiSet tecdset, // 测试边缘的坐标集合 int group, // 参考边缘的分段大小 float *eudmat, // 欧式距离矩阵 float *indexmat // 索引下标矩阵 ); // Kernel 函数: _findRowMinKer(查找行最小值) // 根据差值矩阵 diffmatrix,查找每一行的最小值,并将每一行出现最小 // 值的行列号保存在数组 rowmin 中。 static __global__ void // Kernel 函数无返回值 _findRowMinKer( float *eudmat, // 欧式距离矩阵 float *indexmat, // 索引下标矩阵 int matwidth, // 距离列数大小 int rowlen, // 最大列数 float maxdis2, // 欧式距离阈值平方 int *rowmin // 行最小值矩阵 ); // Kernel 函数: _relateCoeffKer(计算特征向量间的相关系数) // 根据测试边缘点和参考边缘点的对应关系,计算 local moments 特征向量间的标准 // 相关系数。 static __global__ void // Kernel 函数无返回值 _relateCoeffKer( float *temoments, // 测试边缘点的 local moments 特征矩阵 float *remoments, // 测试边缘点的 local moments 特征矩阵 float mincor, // 标准相关系数的阈值大小 int *rowmin // 相关点的对应索引值 ); // Kernel 函数: _makeErrmapKer(错误码标记输入到 errmap 图像中) // 根据 rowmin 中的错误码标记,输入到 errmap 图像中。 static __global__ void // Kernel 函数无返回值 _makeErrmapKer( CoordiSet tecdset, // 边缘的坐标集合 int *rowmin, // 相关点的对应索引值 ImageCuda errmap // 错误码图像 ); // Kernel 函数: _confirmErrmapKer(确定 errmap 中的错误点为异常) // 根据 errmap 图像中每个错误点,如果其 3 * 3 邻域内包括 3 个以上的异常点, // 则确定当前错误点为异常点。 static __global__ void // Kernel 函数无返回值 _confirmErrmapKer( ImageCuda errmap, // 错误码图像 ImageCuda errpoint // 异常点图像 ); // Kernel 函数:_edgeMatchKer(边缘匹配算法) static __global__ void _edgeMatchKer(ImageCuda teimg, ImageCuda *reimg, int recount, float *cormapsum) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标的 // x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 8 个输出像素,这八个像素位于统一列的相邻行上, // 因此,对于 r 需要进行乘 8 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 8; int z = blockIdx.z; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 // 同时检查 z 的值,若大于参考图像数量则 z 值无效,直接返回。 if (c >= teimg.imgMeta.width || r >= teimg.imgMeta.height || z >= recount) return; // 申请一个 float 型共享内存,用于存储每每个块内线程所计算相关系数总和。 __shared__ float cormap; // 读取线程号。 int threadid = threadIdx.y * blockDim.x + threadIdx.x; // 局部变量,存储当前线程内像素点的相关系数和。 float tcormap = 0.0f; // 存储测试图像和参考图像的像素值。 unsigned char tepix,repix; // 用每个块内的0号线程给共享内存赋初值。 if (threadid == 0 ) cormap = 0.0f; // 块内同步。 __syncthreads(); // 计算测试图像第一个输入坐标点对应的图像数据数组下标。 int teimgidx = r * teimg.pitchBytes + c; // 计算参考图像第一个输入坐标点对应的图像数据数组下标。 int reimgidx = r * reimg[z].pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 tepix = teimg.imgMeta.imgData[teimgidx]; repix = reimg[z].imgMeta.imgData[reimgidx]; // 计算当前两个点的相关系数。 tcormap = tepix * repix; // 处理后 7 个点。 for(int j = 1; j < 8; j++) { // y 分量加 1 。 r++; // 获取当前像素点坐标。 teimgidx += teimg.pitchBytes; reimgidx += reimg[z].pitchBytes; // 若当前像素点越界,则跳过该点,处理下一点;否则计算当前点的相关系数。 if (r < teimg.imgMeta.height) { // 读取第 j 个输入坐标点对应的测试图像和参考图像像素值。 tepix = teimg.imgMeta.imgData[teimgidx]; repix = reimg[z].imgMeta.imgData[reimgidx]; // 计算当前两个点的相关系数并累加。 tcormap += tepix * repix; } } // 原子操作将当前线程所计算相关系数和累加到共享内存中。 tcormap = tcormap / (255 * 255); atomicAdd(&cormap, tcormap); // 块内同步。 __syncthreads(); // 每个块内 0 号线程将该块所计算的相关系数和累加入 // 每个参考图像总的相关系数和中。 if (threadid == 0 ) // 原子操作将该块所计算的相关系数和累加入每个参考图像总的相关系数和中。 atomicAdd(&cormapsum[z], cormap); } // Kernel 函数:_imgConvertCstKer(实现将图像转化为坐标集算法) static __global__ void _imgConvertCstKer(ImageCuda outimg, CoordiSet outcst, unsigned char highpixel, int *outcstcount) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // dstc 表示 column, dstr 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return ; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * outimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = outimg.imgMeta.imgData + curpos; // 若当前像素值等于 highpixel 值。 if (*outptr == highpixel) { // 原子操作获得当前坐标点的索引值。 int idx = atomicAdd(outcstcount, 1); // 保存图像的横纵坐标到输出坐标集中。 outcst.tplData[idx * 2] = dstc; outcst.tplData[idx * 2 + 1] = dstr; } } // Kernel 函数:_getCormapMaxIndexKer(获取 cormapsum 中最大的值的索引) static __global__ void _getCormapMaxIndexKer(float *cormapcpu, int count,int *maxindex) { // 获取当前线程的线程号。 int threadtid = threadIdx.x; // 声明共享内存,保存当前块内的相关系数矩阵和索引矩阵。 extern __shared__ float datashare[]; if (threadtid < count) { // 将当前线程对应的相关系数矩阵中的值以及其对应的索引(即列号)保存 // 在该块的共享内存中。 datashare[threadtid] = *(cormapcpu + threadtid); datashare[threadtid + count] = threadtid; } else { datashare[threadtid] = DEF_INVALID_FLOAT; datashare[threadtid + count] = DEF_INVALID_FLOAT; } // 块内同步,为了保证一个块内的所有线程都已经完成了上述操作,即存 // 储该行的欧式距离和索引到共享内存中。 __syncthreads(); // 使用双调排序的思想,找到该行的最小值。 for (int k = 1; k < count; k <<= 1) { // 对待排序的元素进行分组,每次都将较大的元素交换到数组中 // 较前的位置,然后改变分组大小,进而在比较上一次得到的较大值 // 并做相应的交换,以此类推,最终数组中第 0 号元素存放的是该行 // 的最大值。 if (((threadtid % (k << 1)) == 0) && datashare[threadtid] < datashare[threadtid + k] ) { // 两个值进行交换。 float temp1 = datashare[threadtid]; datashare[threadtid] = datashare[threadtid + k]; datashare[threadtid + k] = temp1; // 交换相对应的索引 index 值。 float temp2 = datashare[threadtid + count]; datashare[threadtid + count] = datashare[threadtid + k + count]; datashare[threadtid + k + count] = temp2; } // 块内同步。 __syncthreads(); } // 将最大值的索引保存在 maxindex 中。 *maxindex = (int)datashare[count]; } // Kernel 函数:_localMomentsKer(计算边缘点的 local moments) static __global__ void _localMomentsKer(CoordiSet cdset, int width, float *moments) { // 读取线程号。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 获得坐标集大小。 int count = cdset.count; // 多余的线程直接退出。 if (idx >= count) return; // 声明空间矩。 float m00 = 0.0f, m01 = 0.0f, m10 = 0.0f, m11 = 0.0f, m02 = 0.0f, m20 = 0.0f, m12 = 0.0f, m21 = 0.0f, m03 = 0.0f, m30 = 0.0f; // 计算当前边缘点的空间矩。 for (int i = -width; i <= width; i++) { // 判断邻域是否越界,注意坐标集合的首尾显示上是相连的。 int temp = idx + i; if (temp < 0) temp = count + temp; if (temp > count - 1) temp = temp - count; // 获得当前边缘点的横纵坐标。 int xdata = cdset.tplData[2 * temp]; int ydata = cdset.tplData[2 * temp + 1]; // 计算当前边缘点的空间矩。 m00 += 1; m01 += ydata; m10 += xdata; m11 += xdata * ydata; m02 += ydata * ydata; m20 += xdata * xdata; m12 += xdata * ydata * ydata; m21 += xdata * xdata * ydata; m03 += ydata * ydata * ydata; m30 += xdata * xdata * xdata; } // 声明矩中心。 float centerx = 0.0f, centery = 0.0f; centerx = m10 / m00; centery = m01 / m00; // 声明中心矩。 float u00, /*u01, u10,*/ u11, u02, u20, u12, u21, u03, u30; float centerx2 = centerx * centerx; float centery2 = centery * centery; // 计算当前边缘点的中心矩。 u00 = m00; // u01 = 0.0f; // u10 = 0.0f; u11 = m11 - centerx * m01; u20 = m20 - centerx * m10; u02 = m02 - centery * m01; u21 = m21 - 2 * centerx * m11 - centery * m20 + 2 * centerx2 * m01; u12 = m12 - 2 * centery * m11 - centerx * m02 + 2 * centery2 * m10; u30 = m30 - 3 * centerx * m20 + 2 * centerx2 * m10; u03 = m03 - 3 * centery * m02 + 2 * centery2 * m01; // 声明正规矩。 float /*n00, n01, n10,*/ n11, n02, n20, n12, n21, n03, n30; float temp1= pow(u00, 2.0f), temp2 = pow(u00, 2.5f); // 计算当前边缘点的中心矩。 n11 = u11 / temp1; n20 = u20 / temp1; n02 = u02 / temp1; n21 = u21 / temp2; n12 = u12 / temp2; n30 = u30 / temp2; n03 = u03 / temp2; // 计算当前边缘点在特征矩阵中对应的行数。 float * humoments = moments + idx * DEF_HUMOM_SIZE; // 声明临时变量,减少重复计算。 float t0 = n30 + n12, t1 = n21 + n03; float q0 = t0 * t0, q1 = t1 * t1; float n4 = 4 * n11; float s = n20 + n02, d = n20 - n02; // 计算 Hu 矩值 0, 1, 3, 5。 humoments[0] = s; humoments[1] = d * d + n4 * n11; humoments[3] = q0 + q1; humoments[5] = d * (q0 - q1) + n4 * t0 * t1; // 改变临时变量。 t0 *= q0 - 3 * q1; t1 *= 3 * q0 - q1; q0 = n30 - 3 * n12; q1 = 3 * n21 - n03; // 计算 Hu 矩值 2, 4, 6。 humoments[2] = q0 * q0 + q1 * q1; humoments[4] = q0 * t0 + q1 * t1; humoments[6] = q1 * t0 - q0 * t1; } // Kernel 函数:_euclidMatKer(计算边缘点间的欧式距离) static __global__ void _euclidMatKer(CoordiSet recdset, CoordiSet tecdset, int group, float *eudmat, float *indexmat) { // 获取当前线程的块号。 int blocktid = blockIdx.x; // 获取当前线程的线程号。 int threadtid = threadIdx.x; // 计算矩阵中对应的输出点的位置。 int inidx = blockIdx.x * blockDim.x + threadIdx.x; // 获得当前线程对应的测试边缘点的坐标。 int tecurx = tecdset.tplData[2 * blocktid]; int tecury = tecdset.tplData[2 * blocktid + 1]; // 保存分段内 group 个距离的最小值。 float mindis = DEF_INVALID_FLOAT; // 记录最小值下标。 int minindex = 0; // 测试边缘的每个坐标点与分段内 group 个参考边缘的坐标点计算欧式距离。这样 // 可以减少欧式矩阵的大小,便于后续算法操作。 for (int i = 0; i < group; i++) { // 获得参考边缘点的坐标。 int tempidx = threadtid * group + i; int recurx = recdset.tplData[2 * tempidx]; int recury = recdset.tplData[2 * tempidx + 1]; // 计算欧式距离。 float distemp = (float)((tecurx - recurx) * (tecurx - recurx) + (tecury - recury) * (tecury - recury)); // 记录最小值和下标。 if (distemp < mindis) { mindis = distemp; minindex = tempidx; } } // 将最小值保存到矩阵的当前元素中。 *(eudmat + inidx) = mindis; *(indexmat + inidx) = (float)minindex; } // Kernel 函数: _findRowMinKer(查找行最小值) static __global__ void _findRowMinKer(float *eudmat, float *indexmat, int matwidth, int rowlen, float maxdis2, int *rowmin) { // 获取当前线程的块号。 int blocktid = blockIdx.x; // 获取当前线程的线程号。 int threadtid = threadIdx.x; // 计算当前线程在矩阵中的偏移。 int tid = blockIdx.x * blockDim.x + threadIdx.x; // 声明共享内存,保存当前块内的欧式距离矩阵和索引矩阵。 extern __shared__ float datashare[]; if (threadtid < matwidth) { // 将当前线程对应的差值矩阵中的值以及其对应的索引(即列号)保存 // 在该块的共享内存中。 datashare[threadtid] = *(eudmat + tid); datashare[threadtid + rowlen] = *(indexmat + tid); } else { datashare[threadtid] = DEF_INVALID_FLOAT; datashare[threadtid + rowlen] = DEF_INVALID_FLOAT; } // 块内同步,为了保证一个块内的所有线程都已经完成了上述操作,即存 // 储该行的欧式距离和索引到共享内存中。 __syncthreads(); // 使用双调排序的思想,找到该行的最小值。 for (int k = 1; k < rowlen; k <<= 1) { // 对待排序的元素进行分组,每次都将距离较小的元素交换到数组中 // 较前的位置,然后改变分组大小,进而在比较上一次得到的较小值 // 并做相应的交换,以此类推,最终数组中第 0 号元素存放的是该行 // 的最小值。 if (((threadtid % (k << 1)) == 0) && datashare[threadtid] > datashare[threadtid + k] ) { // 两个欧式距离进行交换。 float temp1 = datashare[threadtid]; datashare[threadtid] = datashare[threadtid + k]; datashare[threadtid + k] = temp1; // 交换相对应的索引 index 值。 float temp2 = datashare[threadtid + rowlen]; datashare[threadtid + rowlen] = datashare[threadtid + k + rowlen]; datashare[threadtid + k + rowlen] = temp2; } // 块内同步。 __syncthreads(); } // 将当前行最小值出现的列号保存在数组 rowmin 中。如果最小距离大于指定阈值, // 则设置错误码 ERR_EDEFORM。 if (datashare[0] < maxdis2) rowmin[blocktid] = (int)datashare[rowlen]; else rowmin[blocktid] = ERR_EDEFORM; } // Kernel 函数: _relateCoeffKer(计算特征向量间的相关系数) static __global__ void _relateCoeffKer( float *temoments, float *remoments, float mincor, int *rowmin) { // 读取线程号。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 获得当前测试边缘点对应的参考边缘点坐标。 int corr = rowmin[idx]; // 如果没有对应的参考边缘点,则不处理当前的测试边缘点。 if (corr == ERR_EDEFORM) return; // 获得当前测试边缘点的 local moments。 float *tepoint = temoments + idx * DEF_HUMOM_SIZE; // 获得对应的参考边缘点的 local moments。 float *repoint = remoments + idx * DEF_HUMOM_SIZE; // 计算标准相关系数。 float sum = 0.0f; for (int i = 0; i < 7; i++) { sum += tepoint[i] * repoint[i]; } sum /= 7; if (sum < mincor) rowmin[idx] = ERR_EHUMOM; } // Kernel 函数: _makeErrmapKer(错误码标记输入到 errmap 图像中) static __global__ void _makeErrmapKer(CoordiSet tecdset, int *rowmin, ImageCuda errmap) { // 读取线程号。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 判断线程是否超出界限。 if (idx >= tecdset.count) return; // 获得当前点在 errmap 图像中的位置。 int curerr = tecdset.tplData[2 * idx + 1] * errmap.pitchBytes + tecdset.tplData[2 * idx]; // 如果当前标记是错误码的话,则是异常点。 if (rowmin[idx] == ERR_EDEFORM || rowmin[idx] == ERR_EHUMOM) { // 将错误码输出到 errmap 图像中,表示该点是异常点。 errmap.imgMeta.imgData[curerr] = (unsigned char)rowmin[idx]; } else { // 否则设置值为 0,表示非异常点。 errmap.imgMeta.imgData[curerr] = 0; } } // Kernel 函数: _confirmErrmapKer(确定 errmap 中的错误点为异常) static __global__ void _confirmErrmapKer(ImageCuda errmap, ImageCuda errpoint) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // c 表示 column, r 表示 row)。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃 if (dstc >= errmap.imgMeta.width || dstr >= errmap.imgMeta.height) return; // 记录输入图像对应位置。 unsigned char *curinptr; curinptr = errmap.imgMeta.imgData + dstc + dstr * errmap.pitchBytes; // 如果当前点不是错误点则直接退出。 if (*curinptr != ERR_EDEFORM && *curinptr != ERR_EHUMOM) return; // 记录输出图像对应位置。 unsigned char *curoutptr; curoutptr = errpoint.imgMeta.imgData + dstc + dstr * errpoint.pitchBytes; // 因为是存放邻域内错误点的个数,所以先初始化为最小值 0x00。 unsigned char count = 0; // 保存邻域的像素值。 unsigned char neighpixel; for (int j = dstr - 1; j <= dstr + 1; j++) { for (int i = dstc - 1; i <= dstc + 1; i++) { // 判断当前像素是否越界。 if (j >= 0 && j < errmap.imgMeta.height && i >= 0 && i < errmap.imgMeta.width) { // 循环计算每个邻域内错误点的个数。 neighpixel = *(errmap.imgMeta.imgData + i + j * errmap.pitchBytes); if (neighpixel == ERR_EDEFORM || neighpixel == ERR_EHUMOM) count++; // 如果计数个数大于 DEF_ERR_COUNT 个,则确定当前点为异常点, // 并结束循环。 if (count >= DEF_ERR_COUNT) { *curoutptr = *curinptr; return; } } } } } // 宏:FAIL_EDGECHECK_SPACE_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_EDGECHECK_SPACE_FREE do { \ if (reimgCud != NULL) \ delete []reimgCud; \ if (tecdset != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tecdset); \ if (recdset != NULL) \ CoordiSetBasicOp::deleteCoordiSet(recdset); \ if (reimgcudDev != NULL) \ cudaFree(reimgcudDev); \ if (alldevpointer != NULL) \ cudaFree(alldevpointer); \ if (alldevpointermat != NULL) \ cudaFree(alldevpointermat); \ if (errmap != NULL) \ ImageBasicOp::deleteImage(errmap); \ } while (0) // Host 成员方法:edgeCheckPoint(边缘的异常点检查) __host__ int EdgeCheck::edgeCheckPoint(Image *teimg, Image *errpoint) { // 检查测试图像, errpoint 和参考图像是否为空,若为空则直接返回。 if (teimg == NULL || errpoint == NULL || reImages == NULL) return NULL_POINTER; // 检查每幅参考图像是否为空,若为空则直接返回。 for(int i = 0; i < reCount; i++) { if(reImages[i] == NULL) return NULL_POINTER; } // 局部变量,错误码。 int errcode; // 声明所有中间变量并初始化为空。 ImageCuda *reimgCud = NULL; CoordiSet *tecdset = NULL; CoordiSet *recdset = NULL; ImageCuda *reimgcudDev = NULL; float *alldevpointer = NULL; float *alldevpointermat = NULL; Image *errmap = NULL; // 将测试图像数据拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(teimg); if (errcode != NO_ERROR) return errcode; // 提取测试图像的 ROI 子图。 ImageCuda teimgCud; errcode = ImageBasicOp::roiSubImage(teimg, &teimgCud); if (errcode != NO_ERROR) return errcode; // 将所有参考图像拷贝入 Device 内存。 for (int i = 0; i < reCount; i++) { errcode = ImageBasicOp::copyToCurrentDevice(reImages[i]); if (errcode != NO_ERROR) return errcode; } // 提取所有参考图像的 ROI 子图。 reimgCud = new ImageCuda[reCount]; for (int i = 0; i < reCount; i++) { errcode = ImageBasicOp::roiSubImage(reImages[i], &reimgCud[i]); if (errcode != NO_ERROR) return errcode; } // 为 reimgcudDev 分配内存空间。 errcode = cudaMalloc((void **)&reimgcudDev, reCount * sizeof (ImageCuda)); if (errcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 将 Host 上的 reimgCud 拷贝到 Device 上。 errcode = cudaMemcpy(reimgcudDev, reimgCud, reCount * sizeof (ImageCuda), cudaMemcpyHostToDevice); // 判断是否拷贝成功,若失败,释放之前的空间,防止内存泄漏,然后返回错误。 if (errcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 计算坐标集初始大小。 int count = teimg->height * teimg->width; // 一次申请 Device 端所有空间。 cudaError_t cudaerrcode; // 申明所有指针变量。 float *cormapsumDev; int *maxindexDev, *tecountdev, *recountdev; float *deveudmat, *devindexmat; int *devrowmin; // 为 alldevpointer 分配空间。 cudaerrcode = cudaMalloc((void **)&alldevpointer, (reCount + 3) * sizeof (float)); if (cudaerrcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return cudaerrcode; } // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset(alldevpointer, 0, (reCount + 3) * sizeof (float)); if (cudaerrcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return cudaerrcode; } // 获得 cormapsumDev 位置指针。 cormapsumDev = alldevpointer; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; blocksize.z = 1; gridsize.x = (teimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (teimgCud.imgMeta.height + blocksize.y * 8 - 1) / (blocksize.y * 8); gridsize.z = reCount; // 调用匹配函数对每个参考图像进行匹配。 _edgeMatchKer<<<gridsize, blocksize>>>(teimgCud,reimgcudDev, reCount,cormapsumDev); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 获得 maxindexDev 位置指针。 maxindexDev = (int *)(cormapsumDev + reCount); size_t blocksize1D, gridsize1D; blocksize1D = 1; // 调用 _getCormapMaxIndexKer 函数,找到相关系数最大的参考图像, // 即为匹配图像。 _getCormapMaxIndexKer<<<blocksize1D, reCount, 2 * reCount>>>(cormapsumDev, reCount, maxindexDev); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 将 Device 端的 maxindexDev 拷贝到 Host 端。 int maxindex; errcode = cudaMemcpy(&maxindex, maxindexDev, sizeof (int), cudaMemcpyDeviceToHost); // 判断是否拷贝成功,若失败,释放之前的空间,防止内存泄漏,然后返回错误。 if (errcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 获得 tecountdev 和 recountdev 位置指针。 tecountdev = maxindexDev + 1; recountdev = tecountdev + 1; // 创建测试边缘的坐标集合和匹配得到的参考边缘的坐标集合。 CoordiSetBasicOp::newCoordiSet(&tecdset); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } CoordiSetBasicOp::newCoordiSet(&recdset); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 在 Device 端创建测试边缘的坐标集合。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(tecdset,count); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 在 Device 端创建匹配参考边缘的坐标集合。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(recdset,count); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (teimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (teimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; gridsize.z = 1; // 将测试图像转化为坐标集。 _imgConvertCstKer<<<gridsize, blocksize>>>(teimgCud, *tecdset, this->highPixel, tecountdev); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 将匹配参考图像转化为坐标集。 _imgConvertCstKer<<<gridsize, blocksize>>>(reimgCud[maxindex], *recdset, this->highPixel, recountdev); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 将 Device 上的 recountdev 拷贝到 Host 上。 int recount; errcode = cudaMemcpy(&recount,recountdev, sizeof (int), cudaMemcpyDeviceToHost); // 判断是否拷贝成功,若失败,释放之前的空间,防止内存泄漏,然后返回错误。 if (errcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 将 Device 上的 tecountdev 拷贝到 Host 上。 int tecount; errcode = cudaMemcpy(&tecount,tecountdev, sizeof (int), cudaMemcpyDeviceToHost); // 判断是否拷贝成功,若失败,释放之前的空间,防止内存泄漏,然后返回错误。 if (errcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 测试边缘的每个点与参考边缘的 group 个点计算欧式距离。 int group = (recount + DEF_COL_MAX - 1) / DEF_COL_MAX; // 计算欧式距离矩阵的宽度和高度。 int matwidth = (recount + group - 1) / group; int matheight = tecount; // 为 alldevpointermat 分配空间。 cudaerrcode = cudaMalloc((void **)&alldevpointermat, (recount * tecount + 2 * matheight * matwidth) * sizeof (float)); if (cudaerrcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return cudaerrcode; } // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset(alldevpointermat, 0, (recount * tecount + 2 * matheight * matwidth) * sizeof (float)); if (cudaerrcode != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return cudaerrcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = matwidth; gridsize1D = matheight; // 获得欧式距离矩阵和索引矩阵位置指针。 deveudmat = alldevpointermat; devindexmat = deveudmat + matheight * matwidth; // 调用核函数,计算参考边缘和测试边缘的所有点的欧式距离的矩阵。 _euclidMatKer<<<gridsize1D, blocksize1D>>>(*recdset, *tecdset, group, deveudmat, devindexmat); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 获得行最小值数组指针。 devrowmin = (int *)(devindexmat + matheight * matwidth); // 获得最小 2 的幂次数,使得排序的长度满足 2 的幂次方。 int exponent = (int)ceil(log((float)matwidth) / log(2.0f)); int length = (1 << exponent); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = length; gridsize1D = matheight; // 调用核函数,计算行最小值,即找到点对应关系。 _findRowMinKer<<<gridsize1D, blocksize1D, 2 * length * sizeof (float)>>>( deveudmat, devindexmat, matwidth, length, maxDisPoint * maxDisPoint, devrowmin); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 申请中间错误码图像。 errcode = ImageBasicOp::newImage(&errmap); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 大小和输入的 errpoint 一致。 errcode = ImageBasicOp::makeAtCurrentDevice(errmap, errpoint->width, errpoint->height); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 提取错误码子图像。 ImageCuda errmapcud; errcode = ImageBasicOp::roiSubImage(errmap, &errmapcud); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = DEF_BLOCK_1D; gridsize1D = (tecount + DEF_BLOCK_1D - 1) / DEF_BLOCK_1D; // 调用核函数,将错误码标记输出到 errmap 图像中。 _makeErrmapKer<<<gridsize1D, blocksize1D>>>(*tecdset, devrowmin, errmapcud); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 将错误码图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(errpoint); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 提取错误码子图像。 ImageCuda errpointcud; errcode = ImageBasicOp::roiSubImage(errpoint, &errpointcud); if (errcode != NO_ERROR) { FAIL_EDGECHECK_SPACE_FREE; return errcode; } // 计算核函数调用的分块大小。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; blocksize.z = 1; gridsize.x = (errmapcud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (errmapcud.imgMeta.height + blocksize.y - 1) / blocksize.y; gridsize.z = 1; // 调用核函数,根据错误码图像确定最终的异常点。 _confirmErrmapKer<<<gridsize, blocksize>>>(errmapcud, errpointcud); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { FAIL_EDGECHECK_SPACE_FREE; return CUDA_ERROR; } // 释放 Device 端内存。 FAIL_EDGECHECK_SPACE_FREE; return NO_ERROR; } // 取消前面的宏定义。 #undef FAIL_EDGECHECK_SPACE_FREE // Host 成员方法:edgeCheckFragment(边缘的异常片段检查) __host__ int EdgeCheck::edgeCheckFragment(CoordiSet *recdset, CoordiSet *tecdset, Image *errpoint) { // 检查输入坐标集合和图像是否为 NULL,如果为 NULL 直接报错返回。 if (recdset == NULL || tecdset == NULL || errpoint == NULL) return NULL_POINTER; // 局部变量,错误码 int errcode; // 将参考边缘的坐标集拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(recdset); if (errcode != NO_ERROR) return errcode; // 将测试边缘的坐标集拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(tecdset); if (errcode != NO_ERROR) return errcode; // 获取参考边缘和测试边缘点的个数。 int recount = recdset->count; int tecount = tecdset->count; // 测试边缘的每个点与参考边缘的 group 个点计算欧式距离。 int group = (recount + DEF_COL_MAX - 1) / DEF_COL_MAX; // 计算欧式距离矩阵的宽度和高度。 int matwidth = (recount + group - 1) / group; int matheight = tecount; // 一次申请 Device 端所有空间。 cudaError_t cudaerrcode; float *alldevicepointer, *devremat, *devtemat, *deveudmat, *devindexmat; int *devrowmin; cudaerrcode = cudaMalloc((void **)&alldevicepointer, (recount * DEF_HUMOM_SIZE + tecount * DEF_HUMOM_SIZE + 2 * matheight * matwidth + matheight) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset(alldevicepointer, 0, (recount * DEF_HUMOM_SIZE + tecount * DEF_HUMOM_SIZE + 2 * matheight * matwidth + matheight) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 获得参考边缘的 local moments 特征矩阵 devremat。 devremat = alldevicepointer; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 size_t blocksize1D, gridsize1D; blocksize1D = DEF_BLOCK_1D; gridsize1D = (recount + blocksize1D - 1) / blocksize1D; // 调用核函数,计算参考边缘的 local moments。 _localMomentsKer<<<gridsize1D, blocksize1D>>>(*recdset, this->followWidth, devremat); // 若调用核函数出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 获得测试边缘的 local moments 特征矩阵 devremat。 devtemat = devremat + recount * DEF_HUMOM_SIZE; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = DEF_BLOCK_1D; gridsize1D = (tecount + blocksize1D - 1) / blocksize1D; // 调用核函数,计算测试边缘的 local moments。 _localMomentsKer<<<gridsize1D, blocksize1D>>>(*tecdset, this->followWidth, devtemat); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = matwidth; gridsize1D = matheight; // 获得欧式距离矩阵和索引矩阵位置指针。 deveudmat = devtemat + tecount * DEF_HUMOM_SIZE; devindexmat = deveudmat + matheight * matwidth; // 调用核函数,计算参考边缘和测试边缘的所有点的欧式距离的矩阵。 _euclidMatKer<<<gridsize1D, blocksize1D>>>(*recdset, *tecdset, group, deveudmat, devindexmat); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 获得行最小值数组指针。 devrowmin = (int *)(devindexmat + matheight * matwidth); // 获得最小 2 的幂次数,使得排序的长度满足 2 的幂次方。 int exponent = (int)ceil(log((float)matwidth) / log(2.0f)); int length = (1 << exponent); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = length; gridsize1D = matheight; // 调用核函数,计算行最小值,即找到点对应关系。 _findRowMinKer<<<gridsize1D, blocksize1D, 2 * length * sizeof (float)>>>( deveudmat, devindexmat, matwidth, length, maxDis * maxDis, devrowmin); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = DEF_BLOCK_1D; gridsize1D = (tecount + blocksize1D - 1) / blocksize1D; // 调用核函数,计算对应点间的特征向量的标准相关系数。 _relateCoeffKer<<<gridsize1D, blocksize1D>>>(devtemat, devremat, this->minCor, devrowmin); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 申请中间错误码图像。 Image *errmap; errcode = ImageBasicOp::newImage(&errmap); if (errcode != NO_ERROR) { cudaFree(alldevicepointer); return errcode; } // 大小和输入的 errpoint 一致。 errcode = ImageBasicOp::makeAtCurrentDevice(errmap, errpoint->width, errpoint->height); if (errcode != NO_ERROR) { cudaFree(alldevicepointer); ImageBasicOp::deleteImage(errmap); return errcode; } // 提取错误码子图像。 ImageCuda errmapcud; errcode = ImageBasicOp::roiSubImage(errmap, &errmapcud); if (errcode != NO_ERROR) { cudaFree(alldevicepointer); ImageBasicOp::deleteImage(errmap); return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize1D = DEF_BLOCK_1D; gridsize1D = (tecount + blocksize1D - 1) / blocksize1D; // 调用核函数,将错误码标记输出到 errmap 图像中。 _makeErrmapKer<<<gridsize1D, blocksize1D>>>(*tecdset, devrowmin, errmapcud); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); ImageBasicOp::deleteImage(errmap); return CUDA_ERROR; } // 将输出的异常点图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(errpoint); if (errcode != NO_ERROR) { cudaFree(alldevicepointer); ImageBasicOp::deleteImage(errmap); return errcode; } // 提取错误码子图像。 ImageCuda errpointcud; errcode = ImageBasicOp::roiSubImage(errpoint, &errpointcud); if (errcode != NO_ERROR) { cudaFree(alldevicepointer); ImageBasicOp::deleteImage(errmap); return errcode; } // 计算核函数调用的分块大小。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (errmapcud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (errmapcud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用核函数,根据错误码图像确定最终的异常点。 _confirmErrmapKer<<<gridsize, blocksize>>>(errmapcud, errpointcud); // 若调用核函数出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); ImageBasicOp::deleteImage(errmap); return CUDA_ERROR; } // 释放中间图像。 errcode = ImageBasicOp::deleteImage(errmap); if (errcode != NO_ERROR) { cudaFree(alldevicepointer); return errcode; } // 释放 Device 端内存。 cudaFree(alldevicepointer); return NO_ERROR; }
the_stack
/////////////////////////////////////////////////////////////////////////// // Copyright 1993-2012 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. /// // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdint.h> // includes CUDA #include <cuda_runtime.h> #define MIN_FULLNESS 16 // includes, project //#include <helper_cuda.h> //#include <helper_functions.h> // helper functions for SDK examples // thrust for reductions -- smaller reductions are actually okay. #include <cuda.h> #include "matrixCompare.h" #include <sdbf_class.h> #include <bloom_filter.h> #include <sdbf_set.h> /** * Comparison (CUDA Kernel) on the device: C = popcll( A & B ) * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void CompKernelShared(uint16_t *C, uint64_t *A, uint64_t *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread uint64_t Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ uint64_t As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ uint64_t Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Compare the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { // checking for 0 less efficient than computation. less divergence. Csub += __popcll(As[ty][k] & Bs[k][tx]); } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } // kernel for minimum estimate cache __global__ void EstCacheKernel(uint16_t *R) { double m = 2048; double k = 5; double exp = 1-1.0/2048; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; // bloom filter minimum estimate calculation for all element counts up to 256 R[x*256+y]=(uint16_t)llrintf(m*(1- powf(exp,k*x) - powf(exp,k*y)+ powf(exp,k*(x+y))) ); } // compute and apply cutoff based upon hamming weight and fullness of filters // 1 and 2 apply to A and B-matrixes respectively. __global__ void CutoffKernel(uint16_t *R, uint16_t *cache, uint16_t *s1, uint16_t *s2, uint16_t *ham1, uint16_t *ham2, int longsideB) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // if either filter does not have enough elements // throw out the result int min_est=0; int s1row=s1[row]; int s2col=s2[col]; if ((s1row < MIN_FULLNESS ) || (s2col < MIN_FULLNESS)) { R[row * longsideB + col]=0; } else { if (s1row==160 && s2col==160) // cache hit avoidance; most common case min_est=214; else min_est=cache[s1row*256+s2col]; // expensive int max_est = (ham1[row] < ham2[col]) ? ham1[row]: ham2[col]; float cut_off=(0.3*(float)(max_est-min_est)+(float)min_est); R[row * longsideB + col] = (R[row * longsideB + col] > cut_off)? (uint16_t)llrintf(100*(R[row * longsideB + col]-cut_off)/(max_est-cut_off)) : 0 ; } } // reduction kernel -- basic but still faster than reduce-by-key // strided accesses etc probably would help. __global__ void ReduceKernel(uint16_t *A, uint16_t *C, int wA) { // Each thread computes one element of C // from each row of A uint16_t Cmax = 0; int row = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < wA; ++e) { if (A[row * wA +e] > Cmax) Cmax = A[row * wA + e]; } C[row] = Cmax; } void constantInitC(uint64_t *data, int size, uint64_t val) { for (int i = 0; i < size; ++i) data[i] = val; } void constantInitCs(uint16_t *data, int size, uint16_t val) { for (int i = 0; i < size; ++i) data[i] = val; } int nextpower2(int32_t bf_count) { int32_t v = bf_count; v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v++; if (v < 32 ) v = 32; return v; } // wrapper bit for matrixCompare to compare sdbf sets // int sdbfsetCompare(sdbf_set *refset, sdbf_set *target, bool quiet, int confidence) { int shortside=32; // allocate these based on target sizing int longsidea=32768; int longsideb=32; int block_size=32; longsidea = nextpower2(refset->bf_vector->size()); //longsidea *=4; if (!quiet) std::cout <<"side A " << longsidea << std::endl; // Allocate host memory for matrices A and B unsigned int size_A = shortside *longsidea; unsigned int mem_size_A = sizeof(uint64_t) * size_A; uint64_t *h_A = (uint64_t *)malloc(mem_size_A); // Initialize host memory - ie make some data constantInitC(h_A, size_A, 0); uint8_t *h_A8; h_A8=(uint8_t*)h_A; uint16_t *ham_A = (uint16_t*)malloc(longsidea*sizeof(uint16_t)); constantInitCs(ham_A, longsidea, 0); uint16_t *elem_A = (uint16_t*)malloc(longsidea*sizeof(uint16_t)); constantInitCs(elem_A, longsidea, 0); // load bloom filters into A for (int i=0; i < refset->bf_vector->size(); i++) { for (int j=0; j < 256; j++) { h_A8[256*i+j]=refset->bf_vector->at(i)->bf[j]; } ham_A[i]=refset->bf_vector->at(i)->hamming; elem_A[i]=(uint16_t)refset->bf_vector->at(i)->elem_count(); } unsigned int mem_size_R = longsidea * sizeof(uint16_t); uint16_t *results = (uint16_t *) malloc(mem_size_R); // B we will load repeatedly with one sdbf at a time // from target // allocate device memory for A parts // and transfer it up to the device. uint64_t *d_A, *d_B; cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int mem_size_Act = longsidea*sizeof(uint16_t); uint16_t *ham_Ad, *elem_Ad; error = cudaMalloc((void **) &elem_Ad, mem_size_Act); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &ham_Ad, mem_size_Act); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(ham_Ad, ham_A, mem_size_Act, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(elem_Ad, elem_A, mem_size_Act, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // for each sdbf in set2, load into B and compare it with A. for (int m=0; m < target->size(); m++) { longsideb=1024; // fix this longsideb = nextpower2(target->at(m)->filter_count()); if (!quiet) std::cout <<"side B " << longsideb << std::endl; unsigned int size_B = shortside *longsideb; unsigned int mem_size_B = sizeof(uint64_t) * size_B; uint64_t *h_B = (uint64_t *)malloc(mem_size_B); constantInitC(h_B, size_B, 0); uint8_t *h_B8; h_B8=(uint8_t*)h_B; int mem_size_Bct =sizeof(uint16_t)*longsideb; uint16_t *ham_B = (uint16_t*)malloc(mem_size_Bct); constantInitCs(ham_B, longsideb, 0); uint16_t *elem_B = (uint16_t*)malloc(mem_size_Bct); constantInitCs(elem_B, longsideb, 0); uint8_t *tmpbuf; int max_elem=0; for (int j=0; j < target->at(m)->filter_count() ; j++) { ham_B[j]=target->at(m)->hamming[j]; elem_B[j]=sdbf::get_elem_count(target->at(m),j); if (elem_B[j] > max_elem) max_elem=elem_B[j]; for (int i=0; i< 256; i+=8) { for (int k=0;k<8;k++) h_B8[i*longsideb+k+j*8]=target->at(m)->buffer[i+k+j*256]; } } // if the whole block has too few elements, skip it. // for filtering out empty parts of drives if (max_elem < MIN_FULLNESS) { free(h_B); free(ham_B); free(elem_B); continue; } uint16_t *ham_Bd, *elem_Bd; error = cudaMalloc((void **) &elem_Bd, mem_size_Bct); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &ham_Bd, mem_size_Bct); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(ham_Bd, ham_B, mem_size_Bct, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(elem_Bd, elem_B, mem_size_Bct, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int matrix_result; matrix_result = matrixCompare(block_size, shortside, longsidea,longsideb, d_A, d_B, results, elem_Ad, elem_Bd, ham_Ad, ham_Bd); int result_count=0; int score=0; int filter_count=0; if (!quiet) std::cout << target->at(m)->name() << std::endl; int setsize =refset->bf_vector->size(); for (int i=0; i < setsize; i++) { if (results[i]>0) { result_count++; score+=results[i]; } if (elem_A[i]>=MIN_FULLNESS) filter_count++; if (i+1 == setsize) { if (result_count!=0 && (score/filter_count >= confidence)) { cout << target->at(m)->name() << "|"<< refset->bf_vector->at(i)->name() << "|" << score/filter_count<< endl; result_count=0; score=0; } } else if (refset->bf_vector->at(i)->bloom_id() != refset->bf_vector->at(i+1)->bloom_id()) { if (result_count!=0 && (score/filter_count >= confidence)) { cout << target->at(m)->name() << "|"<< refset->bf_vector->at(i)->name() << "|" << score/filter_count<< endl; } filter_count=0; result_count=0; score=0; } } free(h_B); free(ham_B); free(elem_B); cudaFree(d_B); cudaFree(ham_Bd); cudaFree(elem_Bd); } cudaFree(d_A); free(ham_A); free(elem_A); free(h_A); cudaFree(ham_Ad); cudaFree(elem_Ad); free(results); return 0; } // Compares two matrices in device memory. // host required to pre-allocate result memory in *resptr int matrixCompare(int block_size, int shortside, int long_A, int long_B, uint64_t *d_A, uint64_t *d_B, uint16_t *resptr, uint16_t *elem_A, uint16_t *elem_B,uint16_t *ham_A,uint16_t *ham_B) { dim3 dimsA(shortside,long_A, 1); dim3 dimsB(long_B, shortside, 1); // size calcs unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(uint64_t) * size_A; unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(uint64_t) * size_B; // Size matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int size_C = dimsC.x * dimsC.y; unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(uint16_t); unsigned int mem_size_R = dimsA.y * sizeof(uint16_t); //uint16_t *h_C = (uint16_t *) malloc(mem_size_C); // Declare device memory uint16_t *d_C; // allocate device memory for computation matrix cudaError_t error; error = cudaMalloc((void **) &d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } uint16_t *d_cache; error = cudaMalloc((void **) &d_cache, 256*256*(sizeof(uint16_t))); if (error != cudaSuccess) { printf("cudaMalloc est_cache returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // compute cutoff cache values dim3 threadscache(16,16); dim3 gridcache(256/threadscache.x,256/threadscache.y); EstCacheKernel<<<gridcache,threadscache>>>(d_cache); // Setup thread block and grid dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Compare Kernel CompKernelShared<32><<< grid, threads>>>(d_C, d_A, d_B, dimsA.x, dimsB.x); // cutoff kernel CutoffKernel<<<grid,threads>>>(d_C, d_cache, elem_A, elem_B, ham_A, ham_B, dimsC.x); // allocate result space uint16_t *d_R; error = cudaMalloc((void **) &d_R, mem_size_R); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } //reduction kernel -- runs on 1-dimension not in a grid shape like the others //ReduceKernel<<<dimsC.y/1024,1024>>>(d_C, d_R, dimsC.x); ReduceKernel<<<dimsC.y/16,16>>>(d_C, d_R, dimsC.x); error = cudaMemcpy(resptr, d_R, mem_size_R, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_R,d_R) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } //for (int i=0; i < dimsC.y; i++) { // cout << resptr[i] << " " ; //} cudaFree(d_C); cudaFree(d_R); cudaFree(d_cache); return EXIT_SUCCESS; } int deviceSetup(int devID) { cudaSetDevice(devID); cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { fprintf(stderr,"cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); return(1); } if (error != cudaSuccess) { fprintf(stderr,"cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { fprintf(stderr,"GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } return 0; } int deviceTeardown() { cudaDeviceReset(); // at end? return 0; }
the_stack
#include <cub/cub.cuh> #include <cub/device/device_scan.cuh> #include <libvis/cuda/cuda_auto_tuner.h> #include "badslam/cuda_util.cuh" #include "badslam/cuda_matrix.cuh" #include "badslam/surfel_projection_nvcc_only.cuh" #include "badslam/util.cuh" #include "badslam/util_nvcc_only.cuh" namespace vis { __global__ void CreateSurfelsForKeyframeCUDASerializingKernel( int sparse_surfel_cell_size, CUDABuffer_<u16> depth_buffer, CUDABuffer_<uchar4> color_buffer, CUDABuffer_<u32> supporting_surfels, CUDABuffer_<u8> new_surfel_flag_vector) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; constexpr bool kSemiDense = false; constexpr int kSemiDenseThreshold = 5; if (x < depth_buffer.width() && y < depth_buffer.height()) { // TODO: Is this border necessary here? constexpr int kBorder = 1; // TODO: This atomicCAS() will lead to the selection of a (relatively) random high-res // pixel within the sparsified cell. Can we instead select a good // pixel according to some criteria (e.g, having high gradient magnitude)? bool new_surfel = x >= kBorder && y >= kBorder && x < depth_buffer.width() - kBorder && y < depth_buffer.height() - kBorder && !(depth_buffer(y, x) & kInvalidDepthBit) && (!kSemiDense || (::abs(color_buffer(y, x).w - color_buffer(y, x + 1).w) >= kSemiDenseThreshold || ::abs(color_buffer(y, x).w - color_buffer(y + 1, x).w) >= kSemiDenseThreshold || ::abs(color_buffer(y, x).w - color_buffer(y - 1, x).w) >= kSemiDenseThreshold || ::abs(color_buffer(y, x).w - color_buffer(y, x - 1).w) >= kSemiDenseThreshold)) && atomicCAS(&supporting_surfels(y / sparse_surfel_cell_size, x / sparse_surfel_cell_size), kInvalidIndex, 0) == kInvalidIndex; u32 seq_index = x + y * depth_buffer.width(); new_surfel_flag_vector(0, seq_index) = new_surfel ? 1 : 0; } } void CallCreateSurfelsForKeyframeCUDASerializingKernel( cudaStream_t stream, int sparse_surfel_cell_size, const CUDABuffer_<u16>& depth_buffer, const CUDABuffer_<uchar4>& color_buffer, const CUDABuffer_<u32>& supporting_surfels, const CUDABuffer_<u8>& new_surfel_flag_vector) { CUDA_AUTO_TUNE_2D( CreateSurfelsForKeyframeCUDASerializingKernel, 32, 32, depth_buffer.width(), depth_buffer.height(), 0, stream, /* kernel parameters */ sparse_surfel_cell_size, depth_buffer, color_buffer, supporting_surfels, new_surfel_flag_vector); CUDA_CHECK(); } __device__ __forceinline__ void CreateNewSurfel( u32 x, u32 y, u32 surfel_index, const PixelCenterUnprojector& unprojector, const DepthToColorPixelCorner& depth_to_color, const PixelCornerProjector& color_corner_projector, const CUDAMatrix3x4& global_T_frame, const CUDAMatrix3x4& frame_T_global, const DepthParameters& depth_params, const CUDABuffer_<u16>& depth_buffer, const CUDABuffer_<u16>& normals_buffer, const CUDABuffer_<u16>& radius_buffer, cudaTextureObject_t color_texture, CUDABuffer_<float>& surfels) { float calibrated_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size, x / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, depth_buffer(y, x)); float3 surfel_global_position = global_T_frame * unprojector.UnprojectPoint(x, y, calibrated_depth); SurfelSetPosition(&surfels, surfel_index, surfel_global_position); float3 surfel_local_normal = U16ToImageSpaceNormal(normals_buffer(y, x)); float3 surfel_global_normal = global_T_frame.Rotate(surfel_local_normal); SurfelSetNormal(&surfels, surfel_index, surfel_global_normal); float surfel_radius_squared = __half2float(__ushort_as_half(radius_buffer(y, x))); SurfelSetRadiusSquared(&surfels, surfel_index, surfel_radius_squared); float2 color_pxy; TransformDepthToColorPixelCorner(make_float2(x + 0.5f, y + 0.5f), depth_to_color, &color_pxy); float4 color = tex2D<float4>(color_texture, color_pxy.x, color_pxy.y); float2 t1_pxy, t2_pxy; ComputeTangentProjections( surfel_global_position, surfel_global_normal, surfel_radius_squared, frame_T_global, color_corner_projector, &t1_pxy, &t2_pxy); float descriptor_1; float descriptor_2; ComputeRawDescriptorResidual( color_texture, color_pxy, t1_pxy, t2_pxy, /*surfel_descriptor_1*/ 0, /*surfel_descriptor_2*/ 0, &descriptor_1, &descriptor_2); SurfelSetColor(&surfels, surfel_index, make_uchar4( 255.f * color.x, 255.f * color.y, 255.f * color.z, 0)); surfels(kSurfelDescriptor1, surfel_index) = descriptor_1; surfels(kSurfelDescriptor2, surfel_index) = descriptor_2; } __global__ void WriteNewSurfelIndexAndInitializeObservationsCUDAKernel( u32 pixel_count, CUDABuffer_<u8> new_surfel_flag_vector, CUDABuffer_<u32> new_surfel_indices, u16* observation_vector, u16* free_space_violation_vector, u32* new_surfel_index_list) { unsigned int seq_index = blockIdx.x * blockDim.x + threadIdx.x; if (seq_index < pixel_count) { if (new_surfel_flag_vector(0, seq_index) != 1) { return; } // One needs to be subtracted because of using an inclusive prefix sum for // computing new_surfel_indices, i.e., the first new surfel has // new_surfel_indices(...) == 1. u32 surfel_index = new_surfel_indices(0, seq_index) - 1; new_surfel_index_list[surfel_index] = seq_index; observation_vector[surfel_index] = 1; free_space_violation_vector[surfel_index] = 0; } } void CallWriteNewSurfelIndexAndInitializeObservationsCUDAKernel( cudaStream_t stream, u32 pixel_count, const CUDABuffer_<u8>& new_surfel_flag_vector, const CUDABuffer_<u32>& new_surfel_indices, u16* observation_vector, u16* free_space_violation_vector, u32* new_surfel_index_list) { CUDA_AUTO_TUNE_1D( WriteNewSurfelIndexAndInitializeObservationsCUDAKernel, 1024, pixel_count, 0, stream, /* kernel parameters */ pixel_count, new_surfel_flag_vector, new_surfel_indices, observation_vector, free_space_violation_vector, new_surfel_index_list); CUDA_CHECK(); } __global__ void CountObservationsForNewSurfelsCUDAKernel( int new_surfel_count, u32* new_surfel_index_list, u16* observation_vector, u16* free_space_violation_vector, DepthParameters depth_params, PixelCenterUnprojector unprojector, CUDABuffer_<u16> new_surfel_depth_buffer, CUDABuffer_<u16> new_surfel_normals_buffer, CUDAMatrix3x4 covis_T_frame, PixelCornerProjector projector, CUDABuffer_<u16> covis_depth_buffer, CUDABuffer_<u16> covis_normals_buffer) { const unsigned int new_surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (new_surfel_index < new_surfel_count) { u32 new_surfel_pixel_index = new_surfel_index_list[new_surfel_index]; u32 y = new_surfel_pixel_index / new_surfel_depth_buffer.width(); u32 x = new_surfel_pixel_index - y * new_surfel_depth_buffer.width(); float calibrated_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size, x / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, new_surfel_depth_buffer(y, x)); float3 surfel_input_position = unprojector.UnprojectPoint(x, y, calibrated_depth); float3 surfel_local_position; if (!covis_T_frame.MultiplyIfResultZIsPositive(surfel_input_position, &surfel_local_position)) { return; } int px, py; if (!ProjectSurfelToImage( covis_depth_buffer.width(), covis_depth_buffer.height(), projector, surfel_local_position, &px, &py)) { return; } // Check for depth compatibility. bool is_free_space_violation = false; if (!IsAssociatedWithPixel<true>( surfel_local_position, new_surfel_normals_buffer, x, y, covis_T_frame, covis_normals_buffer, px, py, depth_params, covis_depth_buffer(py, px), kDepthResidualDefaultTukeyParam, unprojector, &is_free_space_violation)) { if (is_free_space_violation) { free_space_violation_vector[new_surfel_index] += 1; } return; } // Accumulate. observation_vector[new_surfel_index] += 1; } } void CallCountObservationsForNewSurfelsCUDAKernel( cudaStream_t stream, int new_surfel_count, u32* new_surfel_index_list, u16* observation_vector, u16* free_space_violation_vector, const DepthParameters& depth_params, const PixelCenterUnprojector& unprojector, const CUDABuffer_<u16>& new_surfel_depth_buffer, const CUDABuffer_<u16>& new_surfel_normals_buffer, const CUDAMatrix3x4& covis_T_frame, const PixelCornerProjector& projector, const CUDABuffer_<u16>& covis_depth_buffer, const CUDABuffer_<u16>& covis_normals_buffer) { CUDA_AUTO_TUNE_1D( CountObservationsForNewSurfelsCUDAKernel, 1024, new_surfel_count, 0, stream, /* kernel parameters */ new_surfel_count, new_surfel_index_list, observation_vector, free_space_violation_vector, depth_params, unprojector, new_surfel_depth_buffer, new_surfel_normals_buffer, covis_T_frame, projector, covis_depth_buffer, covis_normals_buffer); CUDA_CHECK(); } __global__ void FilterNewSurfelsCUDAKernel( u16 min_observation_count, u32 new_surfel_count, u32* new_surfel_index_list, u16* observation_vector, u16* free_space_violation_vector, CUDABuffer_<u8> new_surfel_flag_vector) { const unsigned int new_surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (new_surfel_index < new_surfel_count) { u16 observation_count = observation_vector[new_surfel_index]; if (observation_count < min_observation_count || free_space_violation_vector[new_surfel_index] > observation_count) { // Do not create this surfel. u32 new_surfel_pixel_index = new_surfel_index_list[new_surfel_index]; new_surfel_flag_vector(0, new_surfel_pixel_index) = 0; } } } void CallFilterNewSurfelsCUDAKernel( cudaStream_t stream, u16 min_observation_count, u32 new_surfel_count, u32* new_surfel_index_list, u16* observation_vector, u16* free_space_violation_vector, const CUDABuffer_<u8>& new_surfel_flag_vector) { CUDA_AUTO_TUNE_1D( FilterNewSurfelsCUDAKernel, 1024, new_surfel_count, 0, stream, /* kernel parameters */ min_observation_count, new_surfel_count, new_surfel_index_list, observation_vector, free_space_violation_vector, new_surfel_flag_vector); } __global__ void CreateSurfelsForKeyframeCUDACreationAppendKernel( PixelCenterUnprojector unprojector, DepthToColorPixelCorner depth_to_color, PixelCornerProjector color_corner_projector, CUDAMatrix3x4 global_T_frame, CUDAMatrix3x4 frame_T_global, DepthParameters depth_params, CUDABuffer_<u16> depth_buffer, CUDABuffer_<u16> normals_buffer, CUDABuffer_<u16> radius_buffer, cudaTextureObject_t color_texture, CUDABuffer_<u8> new_surfel_flag_vector, CUDABuffer_<u32> new_surfel_indices, u32 surfels_size, CUDABuffer_<float> surfels) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < depth_buffer.width() && y < depth_buffer.height()) { u32 seq_index = x + y * depth_buffer.width(); if (new_surfel_flag_vector(0, seq_index) != 1) { return; } // Compute the index at which to output the new surfel (appending after the // old surfels_size). One needs to be subtracted because of using an // inclusive prefix sum for computing new_surfel_indices, i.e., the first // new surfel has new_surfel_indices(...) == 1. u32 surfel_index = surfels_size + new_surfel_indices(0, seq_index) - 1; CreateNewSurfel(x, y, surfel_index, unprojector, depth_to_color, color_corner_projector, global_T_frame, frame_T_global, depth_params, depth_buffer, normals_buffer, radius_buffer, color_texture, surfels); } } void CallCreateSurfelsForKeyframeCUDACreationAppendKernel( cudaStream_t stream, const PixelCenterUnprojector& unprojector, const DepthToColorPixelCorner& depth_to_color, const PixelCornerProjector& color_corner_projector, const CUDAMatrix3x4& global_T_frame, const CUDAMatrix3x4& frame_T_global, const DepthParameters& depth_params, const CUDABuffer_<u16>& depth_buffer, const CUDABuffer_<u16>& normals_buffer, const CUDABuffer_<u16>& radius_buffer, cudaTextureObject_t color_texture, const CUDABuffer_<u8>& new_surfel_flag_vector, const CUDABuffer_<u32>& new_surfel_indices, u32 surfels_size, const CUDABuffer_<float>& surfels) { CUDA_AUTO_TUNE_2D( CreateSurfelsForKeyframeCUDACreationAppendKernel, 32, 32, depth_buffer.width(), depth_buffer.height(), 0, stream, /* kernel parameters */ unprojector, depth_to_color, color_corner_projector, global_T_frame, frame_T_global, depth_params, depth_buffer, normals_buffer, radius_buffer, color_texture, new_surfel_flag_vector, new_surfel_indices, surfels_size, surfels); CUDA_CHECK(); } u32 CreateSurfelsForKeyframeCUDA_CountNewSurfels( cudaStream_t stream, u32 pixel_count, void** new_surfels_temp_storage, usize* new_surfels_temp_storage_bytes, CUDABuffer_<u8>* new_surfel_flag_vector, CUDABuffer_<u32>* new_surfel_indices) { // Indices for the new surfels are computed with a parallel inclusive prefix sum from CUB. cub::TransformInputIterator<u32, TypeConversionOp<u32>, const u8*> new_surfel_flag_vector_as_u32(new_surfel_flag_vector->address(), TypeConversionOp<u32>()); if (*new_surfels_temp_storage_bytes == 0) { cub::DeviceScan::InclusiveSum( *new_surfels_temp_storage, *new_surfels_temp_storage_bytes, new_surfel_flag_vector_as_u32, new_surfel_indices->address(), pixel_count, stream); cudaMalloc(new_surfels_temp_storage, *new_surfels_temp_storage_bytes); } cub::DeviceScan::InclusiveSum( *new_surfels_temp_storage, *new_surfels_temp_storage_bytes, new_surfel_flag_vector_as_u32, new_surfel_indices->address(), pixel_count, stream); CUDA_CHECK(); // Read back the number of new surfels to the CPU by reading the last element // in new_surfel_indices. u32 new_surfel_count; cudaMemcpyAsync( &new_surfel_count, reinterpret_cast<u8*>(new_surfel_indices->address()) + ((pixel_count - 1) * sizeof(u32)), sizeof(u32), cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); return new_surfel_count; } }
the_stack
#include "FunctionPointers_kernels.h" // Texture reference for reading image texture<unsigned char, 2> tex; extern __shared__ unsigned char LocalBlock[]; static cudaArray *array = NULL; #define RADIUS 1 // pixel value used for thresholding function, works well with sample image 'lena' #define THRESHOLD 150.0f #ifdef FIXED_BLOCKWIDTH #define BlockWidth 80 #define SharedPitch 384 #endif // A function pointer can be declared explicity like this line: //__device__ unsigned char (*pointFunction)(unsigned char, float ) = NULL; // or by using typedef's like below: typedef unsigned char (*blockFunction_t)( unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, float ); typedef unsigned char (*pointFunction_t)( unsigned char, float ); __device__ blockFunction_t blockFunction; __device__ unsigned char ComputeSobel(unsigned char ul, // upper left unsigned char um, // upper middle unsigned char ur, // upper right unsigned char ml, // middle left unsigned char mm, // middle (unused) unsigned char mr, // middle right unsigned char ll, // lower left unsigned char lm, // lower middle unsigned char lr, // lower right float fScale ) { short Horz = ur + 2*mr + lr - ul - 2*ml - ll; short Vert = ul + 2*um + ur - ll - 2*lm - lr; short Sum = (short) (fScale*(abs(Horz)+abs(Vert))); return (unsigned char)( (Sum < 0) ? 0 : ((Sum > 255) ? 255 : Sum) ) ; } // define a function pointer and initialize to NULL __device__ unsigned char (*varFunction)( unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, unsigned char, float x ) = NULL; __device__ unsigned char ComputeBox(unsigned char ul, // upper left unsigned char um, // upper middle unsigned char ur, // upper right unsigned char ml, // middle left unsigned char mm, // middle...middle unsigned char mr, // middle right unsigned char ll, // lower left unsigned char lm, // lower middle unsigned char lr, // lower right float fscale ) { short Sum = (short) (ul+um+ur + ml+mm+mr + ll+lm+lr)/9; Sum *= fscale; return (unsigned char)( (Sum < 0) ? 0 : ((Sum > 255) ? 255 : Sum) ) ; } __device__ unsigned char Threshold( unsigned char in, float thresh ) { if( in > thresh ) return 0xFF; else return 0; } // Declare function tables, one for the point function chosen, one for the // block function chosen. The number of entries is determined by the // enum in FunctionPointers_kernels.h __device__ blockFunction_t blockFunction_table[LAST_BLOCK_FILTER]; __device__ pointFunction_t pointFunction_table[LAST_POINT_FILTER]; // Declare device side function pointers. We retrieve them later with // cudaMemcpyFromSymbol to set our function tables above in some // particular order specified at runtime. __device__ blockFunction_t pComputeSobel = ComputeSobel; __device__ blockFunction_t pComputeBox = ComputeBox; __device__ pointFunction_t pComputeThreshold = Threshold; // Allocate host side tables to mirror the device side, and later, we // fill these tables with the function pointers. This lets us send // the pointers to the kernel on invocation, as a method of choosing // which function to run. blockFunction_t h_blockFunction_table[2]; pointFunction_t h_pointFunction_table[2]; // Perform a filter operation on the data, using shared memory // The actual operation performed is // determined by the function pointer "blockFunction" and selected // by the integer argument "blockOperation" and has access // to an apron around the current pixel being processed. // Following the block operation, a per-pixel operation, // pointed to by pPointFunction is performed before the final // pixel is produced. __global__ void SobelShared( uchar4 *pSobelOriginal, unsigned short SobelPitch, #ifndef FIXED_BLOCKWIDTH short BlockWidth, short SharedPitch, #endif short w, short h, float fScale, int blockOperation, pointFunction_t pPointFunction ) { short u = 4*blockIdx.x*BlockWidth; short v = blockIdx.y*blockDim.y + threadIdx.y; short ib; int SharedIdx = threadIdx.y * SharedPitch; for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) { LocalBlock[SharedIdx+4*ib+0] = tex2D( tex, (float) (u+4*ib-RADIUS+0), (float) (v-RADIUS) ); LocalBlock[SharedIdx+4*ib+1] = tex2D( tex, (float) (u+4*ib-RADIUS+1), (float) (v-RADIUS) ); LocalBlock[SharedIdx+4*ib+2] = tex2D( tex, (float) (u+4*ib-RADIUS+2), (float) (v-RADIUS) ); LocalBlock[SharedIdx+4*ib+3] = tex2D( tex, (float) (u+4*ib-RADIUS+3), (float) (v-RADIUS) ); } if ( threadIdx.y < RADIUS*2 ) { // // copy trailing RADIUS*2 rows of pixels into shared // SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch; for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) { LocalBlock[SharedIdx+4*ib+0] = tex2D( tex, (float) (u+4*ib-RADIUS+0), (float) (v+blockDim.y-RADIUS) ); LocalBlock[SharedIdx+4*ib+1] = tex2D( tex, (float) (u+4*ib-RADIUS+1), (float) (v+blockDim.y-RADIUS) ); LocalBlock[SharedIdx+4*ib+2] = tex2D( tex, (float) (u+4*ib-RADIUS+2), (float) (v+blockDim.y-RADIUS) ); LocalBlock[SharedIdx+4*ib+3] = tex2D( tex, (float) (u+4*ib-RADIUS+3), (float) (v+blockDim.y-RADIUS) ); } } __syncthreads(); u >>= 2; // index as uchar4 from here uchar4 *pSobel = (uchar4 *) (((char *) pSobelOriginal)+v*SobelPitch); SharedIdx = threadIdx.y * SharedPitch; blockFunction = blockFunction_table[blockOperation]; for ( ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x ) { uchar4 out; unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0]; unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1]; unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2]; unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0]; unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1]; unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2]; unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0]; unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1]; unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2]; out.x = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale ); pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3]; pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3]; pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3]; out.y = (*blockFunction)(pix01, pix02, pix00, pix11, pix12, pix10, pix21, pix22, pix20, fScale ); pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4]; pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4]; pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4]; out.z = (*blockFunction)( pix02, pix00, pix01, pix12, pix10, pix11, pix22, pix20, pix21, fScale ); pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5]; pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5]; pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5]; out.w = (*blockFunction)( pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale ); if( pPointFunction != NULL ) { out.x = (*pPointFunction)(out.x, THRESHOLD ); out.y = (*pPointFunction)(out.y, THRESHOLD ); out.z = (*pPointFunction)(out.z, THRESHOLD ); out.w = (*pPointFunction)(out.w, THRESHOLD ); } if ( u+ib < w/4 && v < h ) { pSobel[u+ib] = out; } } __syncthreads(); } __global__ void SobelCopyImage( Pixel *pSobelOriginal, unsigned int Pitch, int w, int h, float fscale ) { unsigned char *pSobel = (unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch); for ( int i = threadIdx.x; i < w; i += blockDim.x ) { pSobel[i] = min( max((tex2D( tex, (float) i, (float) blockIdx.x ) * fscale), 0.f), 255.f); } } // Perform block and pointer filtering using texture lookups. // The block and point operations are determined by the // input argument (see comment above for "SobelShared" function) __global__ void SobelTex( Pixel *pSobelOriginal, unsigned int Pitch, int w, int h, float fScale, int blockOperation, pointFunction_t pPointOperation ) { unsigned char *pSobel = (unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch); unsigned char tmp = 0; for ( int i = threadIdx.x; i < w; i += blockDim.x ) { unsigned char pix00 = tex2D( tex, (float) i-1, (float) blockIdx.x-1 ); unsigned char pix01 = tex2D( tex, (float) i+0, (float) blockIdx.x-1 ); unsigned char pix02 = tex2D( tex, (float) i+1, (float) blockIdx.x-1 ); unsigned char pix10 = tex2D( tex, (float) i-1, (float) blockIdx.x+0 ); unsigned char pix11 = tex2D( tex, (float) i+0, (float) blockIdx.x+0 ); unsigned char pix12 = tex2D( tex, (float) i+1, (float) blockIdx.x+0 ); unsigned char pix20 = tex2D( tex, (float) i-1, (float) blockIdx.x+1 ); unsigned char pix21 = tex2D( tex, (float) i+0, (float) blockIdx.x+1 ); unsigned char pix22 = tex2D( tex, (float) i+1, (float) blockIdx.x+1 ); tmp = (*(blockFunction_table[blockOperation]))(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale ); if( pPointOperation != NULL ) { tmp = (*pPointOperation)(tmp, 150.0 ); } pSobel[i] = tmp; } } extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp) { cudaChannelFormatDesc desc; if (Bpp == 1) { desc = cudaCreateChannelDesc<unsigned char>(); } else { desc = cudaCreateChannelDesc<uchar4>(); } cutilSafeCall(cudaMallocArray(&array, &desc, iw, ih)); cutilSafeCall(cudaMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, cudaMemcpyHostToDevice)); } extern "C" void deleteTexture(void) { cutilSafeCall(cudaFreeArray(array)); } // Copy the pointers from the function tables to the host side void setupFunctionTables() { // Dynamically assign the function table. // Copy the function pointers to their appropriate locations according to the enum cutilSafeCall( cudaMemcpyFromSymbol( &h_blockFunction_table[SOBEL_FILTER], pComputeSobel, sizeof(blockFunction_t)) ); cutilSafeCall( cudaMemcpyFromSymbol( &h_blockFunction_table[BOX_FILTER], pComputeBox, sizeof(blockFunction_t) ) ); // do the same for the point function, where the 2nd function is NULL ("no-op" filter, skipped in kernel code) cutilSafeCall( cudaMemcpyFromSymbol( &h_pointFunction_table[THRESHOLD_FILTER], pComputeThreshold, sizeof(pointFunction_t) ) ); h_pointFunction_table[NULL_FILTER] = NULL; // now copy the function tables back to the device, so if we wish we can use an index into the table to choose them // We have now set the order in the function table according to our enum. cutilSafeCall( cudaMemcpyToSymbol( blockFunction_table, h_blockFunction_table, sizeof( blockFunction_t )*LAST_BLOCK_FILTER ) ); cutilSafeCall( cudaMemcpyToSymbol( pointFunction_table, h_pointFunction_table, sizeof( pointFunction_t )*LAST_POINT_FILTER ) ); } // Wrapper for the __global__ call that sets up the texture and threads // Below two methods for selecting the image processing function to run are shown. // BlockOperation is an integer kernel argument used as an index into the blockFunction_table on the device side // pPointOp is itself a function pointer passed as a kernel argument, retrieved from a host side copy of the function table extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale, int blockOperation, int pointOperation) { cutilSafeCall(cudaBindTextureToArray(tex, array)); pointFunction_t pPointOp = h_pointFunction_table[pointOperation]; switch ( mode ) { case SOBELDISPLAY_IMAGE: SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale ); break; case SOBELDISPLAY_SOBELTEX: SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale, blockOperation, pPointOp ); break; case SOBELDISPLAY_SOBELSHARED: { dim3 threads(16,4); #ifndef FIXED_BLOCKWIDTH int BlockWidth = 80; // must be divisible by 16 for coalescing #endif dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)), ih/threads.y+(0!=ih%threads.y)); int SharedPitch = ~0x3f&(4*(BlockWidth+2*RADIUS)+0x3f); int sharedMem = SharedPitch*(threads.y+2*RADIUS); // for the shared kernel, width must be divisible by 4 iw &= ~3; SobelShared<<<blocks, threads, sharedMem>>>((uchar4 *) odata, iw, #ifndef FIXED_BLOCKWIDTH BlockWidth, SharedPitch, #endif iw, ih, fScale, blockOperation, pPointOp ); } break; } cutilSafeCall(cudaUnbindTexture(tex)); }
the_stack
#define TPB 512 #define TFBIGMIX8e(){\ p0+=p1;p2+=p3;p4+=p5;p6+=p7;p1=ROTL64(p1,46) ^ p0;p3=ROTL64(p3,36) ^ p2;p5=ROTL64(p5,19) ^ p4;p7=ROTL64(p7,37) ^ p6;\ p2+=p1;p4+=p7;p6+=p5;p0+=p3;p1=ROTL64(p1,33) ^ p2;p7=ROTL64(p7,27) ^ p4;p5=ROTL64(p5,14) ^ p6;p3=ROTL64(p3,42) ^ p0;\ p4+=p1;p6+=p3;p0+=p5;p2+=p7;p1=ROTL64(p1,17) ^ p4;p3=ROTL64(p3,49) ^ p6;p5=ROTL64(p5,36) ^ p0;p7=ROTL64(p7,39) ^ p2;\ p6+=p1;p0+=p7;p2+=p5;p4+=p3;p1=ROTL64(p1,44) ^ p6;p7=ROTL64(p7, 9) ^ p0;p5=ROTL64(p5,54) ^ p2;p3=ROTR64(p3, 8) ^ p4;\ } #define TFBIGMIX8o(){\ p0+=p1;p2+=p3;p4+=p5;p6+=p7;p1=ROTL64(p1,39) ^ p0;p3=ROTL64(p3,30) ^ p2;p5=ROTL64(p5,34) ^ p4;p7=ROTL64(p7,24) ^ p6;\ p2+=p1;p4+=p7;p6+=p5;p0+=p3;p1=ROTL64(p1,13) ^ p2;p7=ROTL64(p7,50) ^ p4;p5=ROTL64(p5,10) ^ p6;p3=ROTL64(p3,17) ^ p0;\ p4+=p1;p6+=p3;p0+=p5;p2+=p7;p1=ROTL64(p1,25) ^ p4;p3=ROTL64(p3,29) ^ p6;p5=ROTL64(p5,39) ^ p0;p7=ROTL64(p7,43) ^ p2;\ p6+=p1;p0+=p7;p2+=p5;p4+=p3;p1=ROTL64(p1, 8) ^ p6;p7=ROTL64(p7,35) ^ p0;p5=ROTR64(p5, 8) ^ p2;p3=ROTL64(p3,22) ^ p4;\ } __constant__ uint64_t c_sk_buf[64]; __constant__ uint64_t c_t2[ 3] = { 0x08, 0xff00000000000000, 0xff00000000000008}; __constant__ uint32_t c_add[18] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}; #define skein_ks_parity64 0x1BD11BDAA9FC1A22ull //#include <stdio.h> __global__ __launch_bounds__(512, 2) void skein256_gpu_hash_32(uint32_t threads, uint64_t *outputHash) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { const uint64_t dt0 = __ldg(&outputHash[thread]); const uint64_t dt1 = __ldg(&outputHash[threads + thread]); const uint64_t dt2 = __ldg(&outputHash[threads*2 + thread]); const uint64_t dt3 = __ldg(&outputHash[threads*3 + thread]); uint64_t h[ 9] = { 0xCCD044A12FDB3E13, 0xE83590301A79A9EB, 0x55AEA0614F816E6F, 0x2A2767A4AE9B94DB, 0xEC06025E74DD7683, 0xE7A436CDC4746251, 0xC36FBAF9393AD185, 0x3EEDBA1833EDFC13, 0xb69d3cfcc73a4e2a, // skein_ks_parity64 ^ h[0..7] }; int i=0; uint64_t p0 = c_sk_buf[i++] + dt0 + dt1; uint64_t p1 = c_sk_buf[i++] + dt1; uint64_t p2 = c_sk_buf[i++] + dt2 + dt3; uint64_t p3 = c_sk_buf[i++] + dt3; uint64_t p4 = c_sk_buf[i++]; uint64_t p5 = c_sk_buf[i++]; uint64_t p6 = c_sk_buf[i++]; uint64_t p7 = c_sk_buf[i++]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 1); // TFBIGMIX8e(); p1=ROTL64(p1,46) ^ p0; p3=ROTL64(p3,36) ^ p2; p2+=p1; p0+=p3; p1=ROTL64(p1,33) ^ p2; p3=ROTL64(p3,42) ^ p0; p4+=p1; p6+=p3; p0+=p5; p2+=p7; p1=ROTL64(p1,17) ^ p4; p3=ROTL64(p3,49) ^ p6; p5=c_sk_buf[i++] ^ p0; p7=c_sk_buf[i++] ^ p2; p6+=p1; p0+=p7; p2+=p5; p4+=p3; p1=ROTL64(p1,44) ^ p6; p7=ROTL64(p7, 9) ^ p0; p5=ROTL64(p5,54) ^ p2; p3=ROTL64(p3,56) ^ p4; p0+=h[ 1]; p1+=h[ 2]; p2+=h[ 3]; p3+=h[ 4]; p4+=h[ 5]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 2]; p1+=h[ 3]; p2+=h[ 4]; p3+=h[ 5]; p4+=h[ 6]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 3); TFBIGMIX8e(); p0+=h[ 3]; p1+=h[ 4]; p2+=h[ 5]; p3+=h[ 6]; p4+=h[ 7]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 4]; p1+=h[ 5]; p2+=h[ 6]; p3+=h[ 7]; p4+=h[ 8]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 5); TFBIGMIX8e(); p0+=h[ 5]; p1+=h[ 6]; p2+=h[ 7]; p3+=h[ 8]; p4+=h[ 0]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 6]; p1+=h[ 7]; p2+=h[ 8]; p3+=h[ 0]; p4+=h[ 1]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 7); TFBIGMIX8e(); p0+=h[ 7]; p1+=h[ 8]; p2+=h[ 0]; p3+=h[ 1]; p4+=h[ 2]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 8]; p1+=h[ 0]; p2+=h[ 1]; p3+=h[ 2]; p4+=h[ 3]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 9); TFBIGMIX8e(); p0+=h[ 0]; p1+=h[ 1]; p2+=h[ 2]; p3+=h[ 3]; p4+=h[ 4]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 1]; p1+=h[ 2]; p2+=h[ 3]; p3+=h[ 4]; p4+=h[ 5]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,11); TFBIGMIX8e(); p0+=h[ 2]; p1+=h[ 3]; p2+=h[ 4]; p3+=h[ 5]; p4+=h[ 6]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 3]; p1+=h[ 4]; p2+=h[ 5]; p3+=h[ 6]; p4+=h[ 7]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,13); TFBIGMIX8e(); p0+=h[ 4]; p1+=h[ 5]; p2+=h[ 6]; p3+=h[ 7]; p4+=h[ 8]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 5]; p1+=h[ 6]; p2+=h[ 7]; p3+=h[ 8]; p4+=h[ 0]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,15); TFBIGMIX8e(); p0+=h[ 6]; p1+=h[ 7]; p2+=h[ 8]; p3+=h[ 0]; p4+=h[ 1]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p0+=h[ 7]; p1+=h[ 8]; p2+=h[ 0]; p3+=h[ 1]; p4+=h[ 2]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,17); TFBIGMIX8e(); p0+=h[ 8]; p1+=h[ 0]; p2+=h[ 1]; p3+=h[ 2]; p4+=h[ 3]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; TFBIGMIX8o(); p4+=h[ 4]; p5+=c_sk_buf[i++]; p7+=c_sk_buf[i++]; p6+=c_sk_buf[i]; p0 = (p0+h[ 0]) ^ dt0; p1 = (p1+h[ 1]) ^ dt1; p2 = (p2+h[ 2]) ^ dt2; p3 = (p3+h[ 3]) ^ dt3; h[0] = p0; h[1] = p1; h[2] = p2; h[3] = p3; h[4] = p4; h[5] = p5; h[6] = p6; h[7] = p7; h[8] = h[ 0] ^ h[ 1] ^ h[ 2] ^ h[ 3] ^ h[ 4] ^ h[ 5] ^ h[ 6] ^ h[ 7] ^ skein_ks_parity64; p5+=c_t2[0]; //p5 already equal h[5] p6+=c_t2[1]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 1); TFBIGMIX8e(); p0+=h[ 1]; p1+=h[ 2]; p2+=h[ 3]; p3+=h[ 4]; p4+=h[ 5]; p5+=h[ 6] + c_t2[ 1]; p6+=h[ 7] + c_t2[ 2]; p7+=h[ 8] + c_add[ 0]; TFBIGMIX8o(); p0+=h[ 2]; p1+=h[ 3]; p2+=h[ 4]; p3+=h[ 5]; p4+=h[ 6]; p5+=h[ 7] + c_t2[ 2]; p6+=h[ 8] + c_t2[ 0]; p7+=h[ 0] + c_add[ 1]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 3); TFBIGMIX8e(); p0+=h[ 3]; p1+=h[ 4]; p2+=h[ 5]; p3+=h[ 6]; p4+=h[ 7]; p5+=h[ 8] + c_t2[ 0]; p6+=h[ 0] + c_t2[ 1]; p7+=h[ 1] + c_add[ 2]; TFBIGMIX8o(); p0+=h[ 4]; p1+=h[ 5]; p2+=h[ 6]; p3+=h[ 7]; p4+=h[ 8]; p5+=h[ 0] + c_t2[ 1]; p6+=h[ 1] + c_t2[ 2]; p7+=h[ 2] + c_add[ 3]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 5); TFBIGMIX8e(); p0+=h[ 5]; p1+=h[ 6]; p2+=h[ 7]; p3+=h[ 8]; p4+=h[ 0]; p5+=h[ 1] + c_t2[ 2]; p6+=h[ 2] + c_t2[ 0]; p7+=h[ 3] + c_add[ 4]; TFBIGMIX8o(); p0+=h[ 6]; p1+=h[ 7]; p2+=h[ 8]; p3+=h[ 0]; p4+=h[ 1]; p5+=h[ 2] + c_t2[ 0]; p6+=h[ 3] + c_t2[ 1]; p7+=h[ 4] + c_add[ 5]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 7); TFBIGMIX8e(); p0+=h[ 7]; p1+=h[ 8]; p2+=h[ 0]; p3+=h[ 1]; p4+=h[ 2]; p5+=h[ 3] + c_t2[ 1]; p6+=h[ 4] + c_t2[ 2]; p7+=h[ 5] + c_add[ 6]; TFBIGMIX8o(); p0+=h[ 8]; p1+=h[ 0]; p2+=h[ 1]; p3+=h[ 2]; p4+=h[ 3]; p5+=h[ 4] + c_t2[ 2]; p6+=h[ 5] + c_t2[ 0]; p7+=h[ 6] + c_add[ 7]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7, 9); TFBIGMIX8e(); p0+=h[ 0]; p1+=h[ 1]; p2+=h[ 2]; p3+=h[ 3]; p4+=h[ 4]; p5+=h[ 5] + c_t2[ 0]; p6+=h[ 6] + c_t2[ 1]; p7+=h[ 7] + c_add[ 8]; TFBIGMIX8o(); p0+=h[ 1]; p1+=h[ 2]; p2+=h[ 3]; p3+=h[ 4]; p4+=h[ 5]; p5+=h[ 6] + c_t2[ 1]; p6+=h[ 7] + c_t2[ 2]; p7+=h[ 8] + c_add[ 9]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,11); TFBIGMIX8e(); p0+=h[ 2]; p1+=h[ 3]; p2+=h[ 4]; p3+=h[ 5]; p4+=h[ 6]; p5+=h[ 7] + c_t2[ 2]; p6+=h[ 8] + c_t2[ 0]; p7+=h[ 0] + c_add[10]; TFBIGMIX8o(); p0+=h[ 3]; p1+=h[ 4]; p2+=h[ 5]; p3+=h[ 6]; p4+=h[ 7]; p5+=h[ 8] + c_t2[ 0]; p6+=h[ 0] + c_t2[ 1]; p7+=h[ 1] + c_add[11]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,13); TFBIGMIX8e(); p0+=h[ 4]; p1+=h[ 5]; p2+=h[ 6]; p3+=h[ 7]; p4+=h[ 8]; p5+=h[ 0] + c_t2[ 1]; p6+=h[ 1] + c_t2[ 2]; p7+=h[ 2] + c_add[12]; TFBIGMIX8o(); p0+=h[ 5]; p1+=h[ 6]; p2+=h[ 7]; p3+=h[ 8]; p4+=h[ 0]; p5+=h[ 1] + c_t2[ 2]; p6+=h[ 2] + c_t2[ 0]; p7+=h[ 3] + c_add[13]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,15); TFBIGMIX8e(); p0+=h[ 6]; p1+=h[ 7]; p2+=h[ 8]; p3+=h[ 0]; p4+=h[ 1]; p5+=h[ 2] + c_t2[ 0]; p6+=h[ 3] + c_t2[ 1]; p7+=h[ 4] + c_add[14]; TFBIGMIX8o(); p0+=h[ 7]; p1+=h[ 8]; p2+=h[ 0]; p3+=h[ 1]; p4+=h[ 2]; p5+=h[ 3] + c_t2[ 1]; p6+=h[ 4] + c_t2[ 2]; p7+=h[ 5] + c_add[15]; // Round_8_512v30(h, t, p0, p1, p2, p3, p4, p5, p6, p7,17); TFBIGMIX8e(); p0+=h[ 8]; p1+=h[ 0]; p2+=h[ 1]; p3+=h[ 2]; p4+=h[ 3]; p5+=h[ 4] + c_t2[ 2]; p6+=h[ 5] + c_t2[ 0]; p7+=h[ 6] + c_add[16]; TFBIGMIX8o(); p0+=h[ 0]; p1+=h[ 1]; p2+=h[ 2]; p3+=h[ 3]; p4+=h[ 4]; p5+=h[ 5] + c_t2[ 0]; p6+=h[ 6] + c_t2[ 1]; p7+=h[ 7] + c_add[17]; outputHash[thread] = p0; outputHash[threads + thread] = p1; outputHash[threads*2 + thread] = p2; outputHash[threads*3 + thread] = p3; } //thread } __host__ void skein256_cpu_init(int thr_id){ uint64_t h[ 9] = { 0xCCD044A12FDB3E13, 0xE83590301A79A9EB, 0x55AEA0614F816E6F, 0x2A2767A4AE9B94DB, 0xEC06025E74DD7683, 0xE7A436CDC4746251, 0xC36FBAF9393AD185, 0x3EEDBA1833EDFC13, 0xb69d3cfcc73a4e2a, // skein_ks_parity64 ^ h[0..7] }; uint64_t t[3] = {0x20, 0xf000000000000000, 0xf000000000000020}; uint64_t dt0,dt1,dt2,dt3; dt0=dt1=dt2=dt3=0; uint64_t sk_buf[64]; int i=0; uint64_t p0 = h[0] + dt0; uint64_t p1 = h[1] + dt1; uint64_t p2 = h[2] + dt2; uint64_t p3 = h[3] + dt3; uint64_t p4 = h[4]; uint64_t p5 = h[5] + t[0]; uint64_t p6 = h[6] + t[1]; uint64_t p7 = h[7]; p0+=p1; p2+=p3; p4+=p5; p6+=p7; p5=ROTL64(p5,19) ^ p4; p7=ROTL64(p7,37) ^ p6; p4+=p7; p6+=p5; p7=ROTL64(p7,27) ^ p4; p5=ROTL64(p5,14) ^ p6; sk_buf[i++] = p0; sk_buf[i++] = p1; sk_buf[i++] = p2; sk_buf[i++] = p3; sk_buf[i++] = p4; sk_buf[i++] = p5; sk_buf[i++] = p6; sk_buf[i++] = p7; sk_buf[i++] = ROTL64(p5,36); sk_buf[i++] = ROTL64(p7,39); sk_buf[i++] = h[ 6] + t[1]; sk_buf[i++] = h[ 8] + 1; sk_buf[i++] = h[ 7] + t[2]; sk_buf[i++] = h[ 0] + 2; sk_buf[i++] = h[ 8] + t[ 0]; sk_buf[i++] = h[ 1] + 3; sk_buf[i++] = h[ 0] + t[ 1]; sk_buf[i++] = h[ 2] + 4; sk_buf[i++] = h[ 1] + t[ 2]; sk_buf[i++] = h[ 3] + 5; sk_buf[i++] = h[ 2] + t[ 0]; sk_buf[i++] = h[ 4] + 6; sk_buf[i++] = h[ 3] + t[ 1]; sk_buf[i++] = h[ 5] + 7; sk_buf[i++] = h[ 4] + t[ 2]; sk_buf[i++] = h[ 6] + 8; sk_buf[i++] = h[ 5] + t[ 0]; sk_buf[i++] = h[ 7] + 9; sk_buf[i++] = h[ 6] + t[ 1]; sk_buf[i++] = h[ 8] + 10; sk_buf[i++] = h[ 7] + t[ 2]; sk_buf[i++] = h[ 0] + 11; sk_buf[i++] = h[ 8] + t[ 0]; sk_buf[i++] = h[ 1] + 12; sk_buf[i++] = h[ 0] + t[ 1]; sk_buf[i++] = h[ 2] + 13; sk_buf[i++] = h[ 1] + t[ 2]; sk_buf[i++] = h[ 3] + 14; sk_buf[i++] = h[ 2] + t[ 0]; sk_buf[i++] = h[ 4] + 15; sk_buf[i++] = h[ 3] + t[ 1]; sk_buf[i++] = h[ 5] + 16; sk_buf[i++] = h[ 4] + t[ 2]; sk_buf[i++] = h[ 6] + 17; sk_buf[i++] = h[ 5] + t[ 0]; sk_buf[i++] = h[ 7] + 18; sk_buf[i++] = h[ 6] + t[ 1]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_sk_buf, sk_buf, sizeof(sk_buf), 0, cudaMemcpyHostToDevice)); } __host__ void skein256_cpu_hash_32(const uint32_t threads, uint2 *d_hash) { dim3 grid((threads + TPB - 1) / TPB); dim3 block(TPB); skein256_gpu_hash_32<<<grid, block>>>(threads, (uint64_t*)d_hash); }
the_stack
#include <vector> #include "thrust/functional.h" #include "thrust/sort.h" namespace anakin { namespace saber { // caffe util_nms.cu #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const *const a, float const *const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } const std::vector<bool> nms_voting0(const float *boxes_dev, unsigned long long *mask_dev, int boxes_num, float nms_overlap_thresh, const int max_candidates, const int top_n) { if ((max_candidates > 0) && (boxes_num > max_candidates)) { boxes_num = max_candidates; } // float *boxes_dev = NULL; // unsigned long long *mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); // CUDA_CHECK(cudaMalloc(&mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel << < blocks, threads >> > (boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); std::vector<bool> mask(boxes_num, false); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { ++num_to_keep; mask[i] = true; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } if ((top_n > 0) && (num_to_keep >= top_n)) { break; } } } // CUDA_CHECK(cudaFree(mask_dev)); return mask; } template <typename Dtype> __global__ void rpn_cmp_conf_bbox_kernel( const int threads, const int num_anchors, const int map_height, const int map_width, const Dtype input_height, const Dtype input_width, const Dtype heat_map_a, const Dtype heat_map_b, const Dtype allow_border, const Dtype allow_border_ratio, const Dtype min_size_w, const Dtype min_size_h, const bool min_size_mode_and_else_or, const Dtype thr_obj, const Dtype bsz01, const bool do_bbox_norm, const Dtype mean0, const Dtype mean1, const Dtype mean2, const Dtype mean3, const Dtype std0, const Dtype std1, const Dtype std2, const Dtype std3, const bool refine_out_of_map_bbox, const Dtype* anc_data, const Dtype* prob_data, const Dtype* tgt_data, Dtype* conf_data, Dtype* bbox_data) { int map_size = map_height * map_width; CUDA_KERNEL_LOOP(index, threads) { int w = index % map_width; int h = (index / map_width) % map_height; int a = index / map_size; int off = h * map_width + w; Dtype score = prob_data[(num_anchors + a) * map_size + off]; if (score < thr_obj) { conf_data[index] = 0.0; continue; } int ax4 = a * 4; Dtype anchor_ctr_x = anc_data[ax4]; Dtype anchor_ctr_y = anc_data[ax4 + 1]; Dtype anchor_width = anc_data[ax4 + 2]; Dtype anchor_height = anc_data[ax4 + 3]; Dtype input_ctr_x = w * heat_map_a + heat_map_b + anchor_ctr_x; Dtype input_ctr_y = h * heat_map_a + heat_map_b + anchor_ctr_y; if (allow_border >= Dtype(0.0) || allow_border_ratio >= Dtype(0.0)) { Dtype x1 = input_ctr_x - 0.5 * (anchor_width - bsz01); Dtype y1 = input_ctr_y - 0.5 * (anchor_height - bsz01); Dtype x2 = x1 + anchor_width - bsz01; Dtype y2 = y1 + anchor_height - bsz01; if (allow_border >= Dtype(0.0) && ( x1 < -allow_border || y1 < -allow_border || x2 > input_width - 1 + allow_border || y2 > input_height - 1 + allow_border)) { conf_data[index] = 0.0; continue; } else if (allow_border_ratio >= Dtype(0.0)) { Dtype x11 = max(Dtype(0), x1); Dtype y11 = max(Dtype(0), y1); Dtype x22 = min(input_width - 1, x2); Dtype y22 = min(input_height - 1, y2); if ((y22 - y11 + bsz01) * (x22 - x11 + bsz01) / ((y2 - y1 + bsz01) * (x2 - x1 + bsz01)) < (1.0 - allow_border_ratio)) { conf_data[index] = 0.0; continue; } } } Dtype tg0 = tgt_data[ax4 * map_size + off]; Dtype tg1 = tgt_data[(ax4 + 1) * map_size + off]; Dtype tg2 = tgt_data[(ax4 + 2) * map_size + off]; Dtype tg3 = tgt_data[(ax4 + 3) * map_size + off]; if (do_bbox_norm) { tg0 = tg0 * std0 + mean0; tg1 = tg1 * std1 + mean1; tg2 = tg2 * std2 + mean2; tg3 = tg3 * std3 + mean3; } Dtype tw = anchor_width * exp(tg2); Dtype th = anchor_height * exp(tg3); Dtype ctx = tg0 * anchor_width + input_ctr_x; Dtype cty = tg1 * anchor_height + input_ctr_y; Dtype ltx = ctx - 0.5 * (tw - bsz01); Dtype lty = cty - 0.5 * (th - bsz01); Dtype rbx = ltx + tw - bsz01; Dtype rby = lty + th - bsz01; if (refine_out_of_map_bbox) { ltx = min(max(ltx, Dtype(0.0)), input_width -1); lty = min(max(lty, Dtype(0.0)), input_height -1); rbx = min(max(rbx, Dtype(0.0)), input_width -1); rby = min(max(rby, Dtype(0.0)), input_height -1); } if (min_size_mode_and_else_or) { if ((rbx - ltx + bsz01) < min_size_w || (rby - lty + bsz01) < min_size_h) { conf_data[index] = 0.0; continue; } } else { if ((rbx - ltx + bsz01) < min_size_w && (rby - lty + bsz01) < min_size_h) { conf_data[index] = 0.0; continue; } } conf_data[index] = score; bbox_data[index * 4] = ltx; bbox_data[index * 4 + 1] = lty; bbox_data[index * 4 + 2] = rbx; bbox_data[index * 4 + 3] = rby; } } template <typename Dtype> void rpn_cmp_conf_bbox_gpu(const int num_anchors, const int map_height, const int map_width, const Dtype input_height, const Dtype input_width, const Dtype heat_map_a, const Dtype heat_map_b, const Dtype allow_border, const Dtype allow_border_ratio, const Dtype min_size_w, const Dtype min_size_h, const bool min_size_mode_and_else_or, const Dtype thr_obj, const Dtype bsz01, const bool do_bbox_norm, const Dtype mean0, const Dtype mean1, const Dtype mean2, const Dtype mean3, const Dtype std0, const Dtype std1, const Dtype std2, const Dtype std3, const bool refine_out_of_map_bbox, const Dtype* anc_data, const Dtype* prob_data, const Dtype* tgt_data, Dtype* conf_data, Dtype* bbox_data, Context<NV> *ctx) { #ifdef ENABLE_DEBUG #undef CUDA_NUM_THREADS #define CUDA_NUM_THREADS 256 #endif int threads = num_anchors * map_height * map_width; rpn_cmp_conf_bbox_kernel<Dtype><<<CUDA_GET_BLOCKS(threads), CUDA_NUM_THREADS, 0, ctx->get_compute_stream()>>>(threads, num_anchors, map_height, map_width, input_height, input_width, heat_map_a, heat_map_b, allow_border, allow_border_ratio, min_size_w, min_size_h, min_size_mode_and_else_or, thr_obj, bsz01, do_bbox_norm, mean0, mean1, mean2, mean3, std0, std1, std2, std3, refine_out_of_map_bbox, anc_data, prob_data, tgt_data, conf_data, bbox_data); CUDA_POST_KERNEL_CHECK; } template void rpn_cmp_conf_bbox_gpu(const int num_anchors, const int map_height, const int map_width, const float input_height, const float input_width, const float heat_map_a, const float heat_map_b, const float allow_border, const float allow_border_ratio, const float min_size_w, const float min_size_h, const bool min_size_mode_and_else_or, const float thr_obj, const float bsz01, const bool do_bbox_norm, const float mean0, const float mean1, const float mean2, const float mean3, const float std0, const float std1, const float std2, const float std3, const bool refine_out_of_map_bbox, const float* anc_data, const float* prob_data, const float* tgt_data, float* conf_data, float* bbox_data, Context<NV> *ctx); // rcnn template <typename Dtype> __global__ void rcnn_cmp_conf_bbox_kernel(const int num_rois, const Dtype input_height, const Dtype input_width, const Dtype allow_border, const Dtype allow_border_ratio, const Dtype min_size_w, const Dtype min_size_h, const bool min_size_mode_and_else_or, const Dtype thr_obj, const Dtype bsz01, const bool do_bbox_norm, const Dtype mean0, const Dtype mean1, const Dtype mean2, const Dtype mean3, const Dtype std0, const Dtype std1, const Dtype std2, const Dtype std3, const bool refine_out_of_map_bbox, const bool regress_agnostic, const int num_class, const Dtype* thr_cls, const Dtype* rois_data, const Dtype* prob_data, const Dtype* tgt_data, Dtype* conf_data, Dtype* bbox_data) { int probs_dim = num_class + 1; int cords_dim = (regress_agnostic ? 2 : (num_class + 1)) * 4; CUDA_KERNEL_LOOP(index, num_rois) { const Dtype* probs = prob_data + index * probs_dim; const Dtype* cords = tgt_data + index * cords_dim; const Dtype* rois = rois_data + index * 5; if ((1.0 - probs[0]) < thr_obj) { conf_data[index] = 0.0; continue; } if (int(rois[0]) == -1) { conf_data[index] = 0.0; continue; } Dtype score_max = -10e6; int cls_max = -1; for (int c = 0; c < num_class; c++) { Dtype score_c = probs[c + 1] - thr_cls[c]; if (score_c > score_max) { score_max = score_c; cls_max = c; } } if (score_max < 0) { conf_data[index] = 0.0; continue; } if (allow_border >= 0.0 || allow_border_ratio >= 0.0) { Dtype x1 = rois[1]; Dtype y1 = rois[2]; Dtype x2 = rois[3]; Dtype y2 = rois[4]; if (allow_border >= 0.0 && ( x1 < -allow_border || y1 < -allow_border || x2 > input_width - 1 + allow_border || y2 > input_height - 1 + allow_border )) { conf_data[index] = 0.0; continue; } else if (allow_border_ratio >= 0.0) { Dtype x11 = max(Dtype(0.0), x1); Dtype y11 = max(Dtype(0.0), y1); Dtype x22 = min(input_width - 1, x2); Dtype y22 = min(input_height - 1, y2); if ((y22 - y11 + bsz01) * (x22 - x11 + bsz01) / ((y2 - y1 + bsz01) * (x2 - x1 +bsz01)) < (1.0 - allow_border_ratio)) { conf_data[index] = 0.0; continue; } } } Dtype rois_w = rois[3] - rois[1] + bsz01; Dtype rois_h = rois[4] - rois[2] + bsz01; Dtype rois_ctr_x = rois[1] + 0.5 * (rois_w - bsz01); Dtype rois_ctr_y = rois[2] + 0.5 * (rois_h - bsz01); int cdst = regress_agnostic ? 4 : ((cls_max + 1) * 4); Dtype tg0 = cords[cdst]; Dtype tg1 = cords[cdst + 1]; Dtype tg2 = cords[cdst + 2]; Dtype tg3 = cords[cdst + 3]; if (do_bbox_norm) { tg0 = tg0 * std0 + mean0; tg1 = tg1 * std1 + mean1; tg2 = tg2 * std2 + mean2; tg3 = tg3 * std3 + mean3; } Dtype tw = rois_w * exp(tg2); Dtype th = rois_h * exp(tg3); Dtype ctx = tg0 * rois_w + rois_ctr_x; Dtype cty = tg1 * rois_h + rois_ctr_y; Dtype ltx = ctx - 0.5 * (tw - bsz01); Dtype lty = cty - 0.5 * (th - bsz01); Dtype rbx = ltx + tw - bsz01; Dtype rby = lty + th - bsz01; if (refine_out_of_map_bbox) { ltx = min(max(ltx, Dtype(0.0)), input_width -1); lty = min(max(lty, Dtype(0.0)), input_height -1); rbx = min(max(rbx, Dtype(0.0)), input_width -1); rby = min(max(rby, Dtype(0.0)), input_height -1); } if (min_size_mode_and_else_or) { if ((rbx - ltx + bsz01) < min_size_w || (rby - lty + bsz01) < min_size_h) { conf_data[index] = 0.0; continue; } } else { if ((rbx - ltx + bsz01) < min_size_w && (rby - lty + bsz01) < min_size_h) { conf_data[index] = 0.0; continue; } } conf_data[index] = probs[cls_max + 1]; bbox_data[index * 4] = ltx; bbox_data[index * 4 + 1] = lty; bbox_data[index * 4 + 2] = rbx; bbox_data[index * 4 + 3] = rby; } } template <typename Dtype> void rcnn_cmp_conf_bbox_gpu(const int num_rois, const Dtype input_height, const Dtype input_width, const Dtype allow_border, const Dtype allow_border_ratio, const Dtype min_size_w, const Dtype min_size_h, const bool min_size_mode_and_else_or, const Dtype thr_obj, const Dtype bsz01, const bool do_bbox_norm, const Dtype mean0, const Dtype mean1, const Dtype mean2, const Dtype mean3, const Dtype std0, const Dtype std1, const Dtype std2, const Dtype std3, const bool refine_out_of_map_bbox, const bool regress_agnostic, const int num_class, const Dtype* thr_cls, const Dtype* rois_data, const Dtype* prob_data, const Dtype* tgt_data, Dtype* conf_data, Dtype* bbox_data, Context<NV> *ctx) { int threads = num_rois; rcnn_cmp_conf_bbox_kernel<Dtype><<<CUDA_GET_BLOCKS(threads), CUDA_NUM_THREADS, 0, ctx->get_compute_stream()>>>(num_rois, input_height, input_width, allow_border, allow_border_ratio, min_size_w, min_size_h, min_size_mode_and_else_or, thr_obj, bsz01, do_bbox_norm, mean0, mean1, mean2, mean3, std0, std1, std2, std3, refine_out_of_map_bbox, regress_agnostic, num_class, thr_cls, rois_data, prob_data, tgt_data, conf_data, bbox_data); CUDA_POST_KERNEL_CHECK; } template void rcnn_cmp_conf_bbox_gpu(const int num_rois, const float input_height, const float input_width, const float allow_border, const float allow_border_ratio, const float min_size_w, const float min_size_h, const bool min_size_mode_and_else_or, const float thr_obj, const float bsz01, const bool do_bbox_norm, const float mean0, const float mean1, const float mean2, const float mean3, const float std0, const float std1, const float std2, const float std3, const bool refine_out_of_map_bbox, const bool regress_agnostic, const int num_class, const float* thr_cls, const float* rois_data, const float* prob_data, const float* tgt_data, float* conf_data, float* bbox_data, Context<NV> *ctx); // nms, copy and modify some cuda codes form yolo template <typename Dtype> __host__ __device__ Dtype bbox_size_gpu(const Dtype *bbox, const Dtype bsz01) { if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { return Dtype(0.); } else { return (bbox[2] - bbox[0] + bsz01) * (bbox[3] - bbox[1] + bsz01); } } template <typename Dtype> __host__ __device__ Dtype jaccard_overlap_gpu(const Dtype *bbox1, const Dtype *bbox2, const Dtype bsz01) { if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { return Dtype(0.); } else { const Dtype inter_xmin = max(bbox1[0], bbox2[0]); const Dtype inter_ymin = max(bbox1[1], bbox2[1]); const Dtype inter_xmax = min(bbox1[2], bbox2[2]); const Dtype inter_ymax = min(bbox1[3], bbox2[3]); const Dtype inter_width = inter_xmax - inter_xmin + bsz01; const Dtype inter_height = inter_ymax - inter_ymin + bsz01; const Dtype inter_size = inter_width * inter_height; const Dtype bbox1_size = bbox_size_gpu(bbox1, bsz01); const Dtype bbox2_size = bbox_size_gpu(bbox2, bsz01); return inter_size / (bbox1_size + bbox2_size - inter_size); } } template <typename Dtype> __global__ void compute_overlapped_by_idx_kernel( const int nthreads, const Dtype *bbox_data, const int bbox_step, const Dtype overlap_threshold, const int *idx, const int num_idx, const Dtype bsz01, bool *overlapped_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); index += blockDim.x * gridDim.x) { const int j = index % num_idx; const int i = index / num_idx; if (i == j) { // Ignore same bbox. return; } // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = idx[i] * bbox_step; const int start_loc_j = idx[j] * bbox_step; const Dtype overlap = jaccard_overlap_gpu(bbox_data + start_loc_i, bbox_data + start_loc_j, bsz01); overlapped_data[index] = overlap > overlap_threshold; } } //template <typename Dtype> //void compute_overlapped_by_idx_gpu( // const int nthreads, const Dtype *bbox_data, const int bbox_step, // const Dtype overlap_threshold, const int *idx, const int num_idx, // const Dtype bsz01, bool *overlapped_data) { // // NOLINT_NEXT_LINE(whitespace/operators) // const int thread_size = 256; // int block_size = (nthreads + thread_size - 1) / thread_size; // compute_overlapped_by_idx_kernel << < block_size, thread_size >> > ( // nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx, // bsz01, overlapped_data); //} template <typename Dtype> void compute_overlapped_by_idx_gpu( const int nthreads, const Dtype *bbox_data, const int bbox_step, const Dtype overlap_threshold, const int *idx, const int num_idx, const Dtype bsz01, bool *overlapped_data, const cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) const int thread_size = 256; int block_size = (nthreads + thread_size - 1) / thread_size; // printf("thread_size = %d, block_size = %d\n", thread_size, block_size); compute_overlapped_by_idx_kernel << < block_size, thread_size, 0, stream >> > ( nthreads, bbox_data, bbox_step, overlap_threshold, idx, num_idx, bsz01, overlapped_data); cudaDeviceSynchronize(); } // Do nms, modified by mingli. void apply_nms(const bool *overlapped, const int num, const int top_k, const std::vector<int> &idxes, std::vector<int> *indices, const int nmsed_num = 0, const int nmsed_loc = 0) { std::vector<bool> mask(num, false); if (nmsed_num > 0) { int k_x_num_add_nmsed_num = nmsed_num; for (int k = 0; k < nmsed_num; k++) { int k_x_num_add_p = k_x_num_add_nmsed_num; for (int p = nmsed_num; p < num; p++) { if (overlapped[k_x_num_add_p++]) { mask[p] = true; } } k_x_num_add_nmsed_num += num; } } int count = nmsed_num; int k_x_num = (nmsed_num -1) * num; for (int k = nmsed_num; k < num; k++) { k_x_num += num; if (mask[k]) { continue; } else { indices->push_back(idxes[nmsed_loc + k - nmsed_num]); if (++count >= top_k) { break; } int k_x_num_add_p = k_x_num + k + 1; for (int p = k + 1; p < num; p++) { if (overlapped[k_x_num_add_p++]) { mask[p] = true; } } } } } template <typename Dtype, typename PGlue_nv> void apply_nms_gpu(const Dtype *bbox_data, const Dtype *conf_data, const int num_bboxes, const int bbox_step, const Dtype confidence_threshold, const int max_candidate_n, const int top_k, const Dtype nms_threshold, const Dtype bsz01, std::vector<int> *indices, PGlue_nv *overlapped, PGlue_nv *idx_sm, Context<NV> *ctx, std::vector<int> *idx_ptr, const int conf_step, const int conf_idx, const int nms_gpu_max_n_per_time) { indices->clear(); std::vector<int> idx; std::vector<Dtype> confidences; if (idx_ptr == NULL) { if (conf_step == 1) { for (int i = 0; i < num_bboxes; ++i) { if (conf_data[i] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i]); } } } else { int i_x_step_add_idx = conf_idx; for (int i = 0; i < num_bboxes; ++i) { if (conf_data[i_x_step_add_idx] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i_x_step_add_idx]); } i_x_step_add_idx += conf_step; } } } else { if (conf_step == 1) { for (int k = 0; k < idx_ptr->size(); k++) { int i = (*idx_ptr)[k]; if (conf_data[i] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i]); } } } else { for (int k = 0; k < idx_ptr->size(); k++) { int i = (*idx_ptr)[k]; int i_x_step_add_idx = i * conf_step + conf_idx; if (conf_data[i_x_step_add_idx] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i_x_step_add_idx]); } } } } int num_remain = confidences.size(); if (num_remain == 0) { return; } if (nms_threshold >= Dtype(1.0)) { for (int i = 0; i < idx.size(); i++) { indices->push_back(idx[i]); } return; } thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0], thrust::greater<Dtype>()); if (max_candidate_n > -1 && max_candidate_n < num_remain) { num_remain = max_candidate_n; } int idx_loc = 0; int indices_size_pre = 0; while (idx_loc < num_remain && indices->size() < top_k) { int *idx_data = (int*)idx_sm->host_mutable_data(ctx); std::copy(indices->begin() + indices_size_pre, indices->end(), idx_data + indices_size_pre); int idx_num_cur_time = min(int(nms_gpu_max_n_per_time - indices->size()), int(num_remain - idx_loc)); std::copy(idx.begin() + idx_loc, idx.begin() + idx_loc + idx_num_cur_time, idx_data + indices->size()); int candidate_n_cur_time = indices->size() + idx_num_cur_time; int total_bboxes = candidate_n_cur_time * candidate_n_cur_time; bool *overlapped_data = (bool*)overlapped->device_mutable_data(ctx); compute_overlapped_by_idx_gpu(total_bboxes, bbox_data, bbox_step, nms_threshold, (const int*)idx_sm->device_data(ctx), candidate_n_cur_time, bsz01, overlapped_data, ctx->get_compute_stream()); const bool *overlapped_results = (const bool*)overlapped->host_data(ctx); indices_size_pre = indices->size(); apply_nms(overlapped_results, candidate_n_cur_time, top_k, idx, indices, indices->size(), idx_loc); idx_loc += idx_num_cur_time; } } template void apply_nms_gpu(const float *bbox_data, const float *conf_data, const int num_bboxes, const int bbox_step, const float confidence_threshold, const int max_candidate_n, const int top_k, const float nms_threshold, const float bsz01, std::vector<int> *indices, PGlue<Tensor<NV>, Tensor<NVHX86> > *overlapped, PGlue<Tensor<NV>, Tensor<NVHX86> > *idx_sm, Context<NV> *ctx, std::vector<int> *idx_ptr, const int conf_step, const int conf_idx, const int nms_gpu_max_n_per_time); template <typename Dtype> void GenGrdFt_cpu(unsigned int im_width, unsigned int im_height, unsigned int blob_width, unsigned int blob_height, Dtype std_height, const std::vector<Dtype> & cam_params, Dtype* grd_ft, Dtype read_width_scale, Dtype read_height_scale, unsigned int read_height_offset, unsigned int valid_param_idx_st, bool trans_cam_pitch_to_zero, bool normalize_grd_ft, unsigned int normalize_grd_ft_dim) { CHECK_GT(im_width, 0); CHECK_GT(im_height, 0); CHECK_GE(blob_width, im_width); CHECK_GE(blob_height, im_height); CHECK_GT(read_width_scale, 0); CHECK_GT(read_height_scale, 0); CHECK_LE(valid_param_idx_st + 6, cam_params.size()); Dtype cam_xpz = cam_params[valid_param_idx_st + 0]; Dtype cam_xct = cam_params[valid_param_idx_st + 1]; Dtype cam_ypz = cam_params[valid_param_idx_st + 2]; Dtype cam_yct = cam_params[valid_param_idx_st + 3]; Dtype cam_hgrd = cam_params[valid_param_idx_st + 4]; Dtype cam_pitch = cam_params[valid_param_idx_st + 5]; CHECK_GT(cam_xpz, 0); CHECK_GT(cam_ypz, 0); CHECK_GT(cam_hgrd, 0); Dtype min_py_grd = cam_yct + cam_ypz * tan(cam_pitch); Dtype min_r_grd = (min_py_grd - read_height_offset) * read_height_scale; for (int r = 0; r < im_height; r++) { Dtype py_grd; Dtype z_grd, y_grd; Dtype z_std_h_upon_grd, y_std_h_upon_grd; Dtype py_std_h_upon_grd, r_std_h_upon_grd; if (r > min_r_grd) { py_grd = r / read_height_scale + read_height_offset; z_grd = cam_ypz * cam_hgrd / (py_grd - cam_yct - cam_ypz * tan(cam_pitch)); y_grd = cam_hgrd + z_grd * tan(cam_pitch); z_std_h_upon_grd = z_grd + std_height * (trans_cam_pitch_to_zero?0.0:tan(cam_pitch)); y_std_h_upon_grd = y_grd - std_height * (trans_cam_pitch_to_zero?1.0:cos(cam_pitch)); py_std_h_upon_grd = cam_ypz * y_std_h_upon_grd / z_std_h_upon_grd + cam_yct; r_std_h_upon_grd = (py_std_h_upon_grd - read_height_offset) * read_height_scale; } for (int c = 0; c < im_width; c++) { if (r <= min_r_grd) { grd_ft[r * blob_width + c] = Dtype(0.0); } else { Dtype px_grd = c / read_width_scale; Dtype x_grd = (px_grd - cam_xct) * z_grd / cam_xpz; Dtype x_std_h_upon_grd = x_grd; Dtype px_std_h_upon_grd = cam_xpz * x_std_h_upon_grd / z_std_h_upon_grd + cam_xct; Dtype c_std_h_upon_grd = px_std_h_upon_grd * read_width_scale; Dtype std_h_prj_scale = sqrt((c_std_h_upon_grd - c) * (c_std_h_upon_grd - c) + (r_std_h_upon_grd - r) * (r_std_h_upon_grd - r)); if (!normalize_grd_ft) { grd_ft[r * blob_width + c] = std_h_prj_scale; } else { int norm_chl = std::min<int>(normalize_grd_ft_dim - 1, std::max<int>(0, static_cast<int>( std::ceil(std::log(std_h_prj_scale) / std::log(2.0))))); grd_ft[(norm_chl * blob_height + r) * blob_width + c] = std_h_prj_scale / std::pow(2.0, norm_chl); } } } } } template void GenGrdFt_cpu(unsigned int im_width, unsigned int im_height, unsigned int blob_width, unsigned int blob_height, float std_height, const std::vector<float> & cam_params, float* grd_ft, float read_width_scale, float read_height_scale, unsigned int read_height_offset, unsigned int valid_param_idx_st, bool trans_cam_pitch_to_zero,bool normalize_grd_ft, unsigned int normalize_grd_ft_dim); template <typename Dtype> __global__ void GenGrdFt_kernel(unsigned int im_width, unsigned int blob_width, unsigned int blob_height, unsigned int n, Dtype std_height, Dtype cam_xpz, Dtype cam_xct, Dtype cam_ypz, Dtype cam_yct, Dtype cam_hgrd, Dtype cam_pitch, Dtype cam_tanh, Dtype cam_ypz_x_tanh, Dtype std_height_x_tanh, Dtype std_height_x_cos, Dtype cam_ypz_x_cam_hgrd, Dtype read_width_scale, Dtype read_height_scale, unsigned int read_height_offset, Dtype min_py_grd, Dtype min_r_grd, bool normalize_grd_ft, unsigned int normalize_grd_ft_dim, Dtype* grd_ft_gpu_data) { CUDA_KERNEL_LOOP(index, n) { int r = index / im_width; int c = index % im_width; if (r <= min_r_grd) { grd_ft_gpu_data[r * blob_width + c] = Dtype(0.0); } else { Dtype py_grd = r / read_height_scale + read_height_offset; Dtype z_grd = cam_ypz_x_cam_hgrd / (py_grd - cam_yct - cam_ypz_x_tanh); Dtype y_grd = cam_hgrd + z_grd * cam_tanh; Dtype z_std_h_upon_grd = z_grd + std_height_x_tanh; Dtype y_std_h_upon_grd = y_grd - std_height_x_cos; Dtype py_std_h_upon_grd = cam_ypz * y_std_h_upon_grd / z_std_h_upon_grd + cam_yct; Dtype r_std_h_upon_grd = (py_std_h_upon_grd - read_height_offset) * read_height_scale; Dtype px_grd = c / read_width_scale; Dtype x_grd = (px_grd - cam_xct) * z_grd / cam_xpz; Dtype x_std_h_upon_grd = x_grd; Dtype px_std_h_upon_grd = cam_xpz * x_std_h_upon_grd / z_std_h_upon_grd + cam_xct; Dtype c_std_h_upon_grd = px_std_h_upon_grd * read_width_scale; Dtype std_h_prj_scale = sqrt((c_std_h_upon_grd - c) * (c_std_h_upon_grd - c) + (r_std_h_upon_grd - r) * (r_std_h_upon_grd - r)); if (!normalize_grd_ft) { grd_ft_gpu_data[r * blob_width + c] = std_h_prj_scale; } else { int norm_chl = min(normalize_grd_ft_dim - 1, max(0, int(ceil(log(std_h_prj_scale) / log(2.0))))); grd_ft_gpu_data[(norm_chl * blob_height + r) * blob_width + c] = std_h_prj_scale / pow(2.0, norm_chl); } } } } template <typename Dtype> void GenGrdFt_gpu(unsigned int im_width, unsigned int im_height, unsigned int blob_width, unsigned int blob_height, Dtype std_height, const std::vector<Dtype> & cam_params, Dtype* grd_ft, Dtype read_width_scale, Dtype read_height_scale, unsigned int read_height_offset, unsigned int valid_param_idx_st, bool trans_cam_pitch_to_zero, bool normalize_grd_ft, unsigned int normalize_grd_ft_dim) { CHECK_GT(im_width, 0); CHECK_GT(im_height, 0); CHECK_GE(blob_width, im_width); CHECK_GE(blob_height, im_height); CHECK_GT(read_width_scale, 0); CHECK_GT(read_height_scale, 0); CHECK_LE(valid_param_idx_st + 6, cam_params.size()); Dtype cam_xpz = cam_params[valid_param_idx_st + 0]; Dtype cam_xct = cam_params[valid_param_idx_st + 1]; Dtype cam_ypz = cam_params[valid_param_idx_st + 2]; Dtype cam_yct = cam_params[valid_param_idx_st + 3]; Dtype cam_hgrd = cam_params[valid_param_idx_st + 4]; Dtype cam_pitch = cam_params[valid_param_idx_st + 5]; CHECK_GT(cam_xpz, 0); CHECK_GT(cam_ypz, 0); CHECK_GT(cam_hgrd, 0); Dtype cam_tanh = tanh(cam_pitch); Dtype cam_ypz_x_tanh = cam_ypz * cam_tanh; Dtype std_height_x_tanh = std_height * (trans_cam_pitch_to_zero ? 0.0 : tanh(cam_pitch)); Dtype std_height_x_cos = std_height * (trans_cam_pitch_to_zero ? 1.0 : cos(cam_pitch)); Dtype cam_ypz_x_cam_hgrd = cam_ypz * cam_hgrd; Dtype min_py_grd = cam_yct + cam_ypz_x_tanh; Dtype min_r_grd = (min_py_grd - read_height_offset) * read_height_scale; int count = im_height * im_width; GenGrdFt_kernel<Dtype><<<CUDA_GET_BLOCKS(count, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( im_width, blob_width, blob_height, count, std_height, cam_xpz, cam_xct, cam_ypz, cam_yct, cam_hgrd, cam_pitch, cam_tanh, cam_ypz_x_tanh, std_height_x_tanh, std_height_x_cos, cam_ypz_x_cam_hgrd, read_width_scale, read_height_scale, read_height_offset, min_py_grd, min_r_grd, normalize_grd_ft, normalize_grd_ft_dim, grd_ft); CUDA_POST_KERNEL_CHECK; } template void GenGrdFt_gpu(unsigned int im_width, unsigned int im_height, unsigned int blob_width, unsigned int blob_height, float std_height, const std::vector<float> & cam_params, float* grd_ft, float read_width_scale, float read_height_scale, unsigned int read_height_offset, unsigned int valid_param_idx_st, bool trans_cam_pitch_to_zero, bool normalize_grd_ft, unsigned int normalize_grd_ft_dim); } }
the_stack
* \file * AgentScanByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan by key. */ #pragma once #include <iterator> #include "single_pass_scan_operators.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_scan.cuh" #include "../block/block_discontinuity.cuh" #include "../config.cuh" #include "../iterator/cache_modified_input_iterator.cuh" CUB_NAMESPACE_BEGIN /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for AgentScanByKey */ template <int _BLOCK_THREADS, int _ITEMS_PER_THREAD = 1, BlockLoadAlgorithm _LOAD_ALGORITHM = BLOCK_LOAD_DIRECT, CacheLoadModifier _LOAD_MODIFIER = LOAD_DEFAULT, BlockScanAlgorithm _SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS, BlockStoreAlgorithm _STORE_ALGORITHM = BLOCK_STORE_DIRECT> struct AgentScanByKeyPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ITEMS_PER_THREAD = _ITEMS_PER_THREAD, }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; static const BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief AgentScanByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan by key. */ template < typename AgentScanByKeyPolicyT, ///< Parameterized AgentScanPolicyT tuning policy type typename KeysInputIteratorT, ///< Random-access input iterator type typename ValuesInputIteratorT, ///< Random-access input iterator type typename ValuesOutputIteratorT, ///< Random-access output iterator type typename EqualityOp, ///< Equality functor type typename ScanOpT, ///< Scan functor type typename InitValueT, ///< The init_value element for ScanOpT type (cub::NullType for inclusive scan) typename OffsetT> ///< Signed integer type for global offsets struct AgentScanByKey { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- using KeyT = cub::detail::value_t<KeysInputIteratorT>; using InputT = cub::detail::value_t<ValuesInputIteratorT>; // The output value type -- used as the intermediate accumulator // Per https://wg21.link/P0571, use InitValueT if provided, otherwise the // input iterator's value type. using OutputT = cub::detail::conditional_t<std::is_same<InitValueT, NullType>::value, InputT, InitValueT>; using SizeValuePairT = KeyValuePair<OffsetT, OutputT>; using KeyValuePairT = KeyValuePair<KeyT, OutputT>; using ReduceBySegmentOpT = ReduceBySegmentOp<ScanOpT>; using ScanTileStateT = ReduceByKeyScanTileState<OutputT, OffsetT>; // Constants enum { IS_INCLUSIVE = std::is_same<InitValueT, NullType>::value, // Inclusive scan if no init_value type is provided BLOCK_THREADS = AgentScanByKeyPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = AgentScanByKeyPolicyT::ITEMS_PER_THREAD, ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD, }; using WrappedKeysInputIteratorT = cub::detail::conditional_t<std::is_pointer<KeysInputIteratorT>::value, CacheModifiedInputIterator<AgentScanByKeyPolicyT::LOAD_MODIFIER, KeyT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator KeysInputIteratorT>; using WrappedValuesInputIteratorT = cub::detail::conditional_t<std::is_pointer<ValuesInputIteratorT>::value, CacheModifiedInputIterator<AgentScanByKeyPolicyT::LOAD_MODIFIER, InputT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator ValuesInputIteratorT>; using BlockLoadKeysT = BlockLoad<KeyT, BLOCK_THREADS, ITEMS_PER_THREAD, AgentScanByKeyPolicyT::LOAD_ALGORITHM>; using BlockLoadValuesT = BlockLoad<OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, AgentScanByKeyPolicyT::LOAD_ALGORITHM>; using BlockStoreValuesT = BlockStore<OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, AgentScanByKeyPolicyT::STORE_ALGORITHM>; using BlockDiscontinuityKeysT = BlockDiscontinuity<KeyT, BLOCK_THREADS, 1, 1>; using TilePrefixCallbackT = TilePrefixCallbackOp<SizeValuePairT, ReduceBySegmentOpT, ScanTileStateT>; using BlockScanT = BlockScan<SizeValuePairT, BLOCK_THREADS, AgentScanByKeyPolicyT::SCAN_ALGORITHM, 1, 1>; union TempStorage { struct ScanStorage { typename BlockScanT::TempStorage scan; typename TilePrefixCallbackT::TempStorage prefix; typename BlockDiscontinuityKeysT::TempStorage discontinuity; } scan_storage; typename BlockLoadKeysT::TempStorage load_keys; typename BlockLoadValuesT::TempStorage load_values; typename BlockStoreValuesT::TempStorage store_values; }; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- TempStorage & storage; WrappedKeysInputIteratorT d_keys_in; WrappedValuesInputIteratorT d_values_in; ValuesOutputIteratorT d_values_out; InequalityWrapper<EqualityOp> inequality_op; ScanOpT scan_op; ReduceBySegmentOpT pair_scan_op; InitValueT init_value; //--------------------------------------------------------------------- // Block scan utility methods (first tile) //--------------------------------------------------------------------- // Exclusive scan specialization __device__ __forceinline__ void ScanTile( SizeValuePairT (&scan_items)[ITEMS_PER_THREAD], SizeValuePairT &tile_aggregate, Int2Type<false> /* is_inclusive */) { BlockScanT(storage.scan_storage.scan) .ExclusiveScan(scan_items, scan_items, pair_scan_op, tile_aggregate); } // Inclusive scan specialization __device__ __forceinline__ void ScanTile( SizeValuePairT (&scan_items)[ITEMS_PER_THREAD], SizeValuePairT &tile_aggregate, Int2Type<true> /* is_inclusive */) { BlockScanT(storage.scan_storage.scan) .InclusiveScan(scan_items, scan_items, pair_scan_op, tile_aggregate); } //--------------------------------------------------------------------- // Block scan utility methods (subsequent tiles) //--------------------------------------------------------------------- // Exclusive scan specialization (with prefix from predecessors) __device__ __forceinline__ void ScanTile( SizeValuePairT (&scan_items)[ITEMS_PER_THREAD], SizeValuePairT & tile_aggregate, TilePrefixCallbackT &prefix_op, Int2Type<false> /* is_incclusive */) { BlockScanT(storage.scan_storage.scan) .ExclusiveScan(scan_items, scan_items, pair_scan_op, prefix_op); tile_aggregate = prefix_op.GetBlockAggregate(); } // Inclusive scan specialization (with prefix from predecessors) __device__ __forceinline__ void ScanTile( SizeValuePairT (&scan_items)[ITEMS_PER_THREAD], SizeValuePairT & tile_aggregate, TilePrefixCallbackT &prefix_op, Int2Type<true> /* is_inclusive */) { BlockScanT(storage.scan_storage.scan) .InclusiveScan(scan_items, scan_items, pair_scan_op, prefix_op); tile_aggregate = prefix_op.GetBlockAggregate(); } //--------------------------------------------------------------------- // Zip utility methods //--------------------------------------------------------------------- template <bool IS_LAST_TILE> __device__ __forceinline__ void ZipValuesAndFlags( OffsetT num_remaining, OutputT (&values)[ITEMS_PER_THREAD], OffsetT (&segment_flags)[ITEMS_PER_THREAD], SizeValuePairT (&scan_items)[ITEMS_PER_THREAD]) { // Zip values and segment_flags #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { // Set segment_flags for first out-of-bounds item, zero for others if (IS_LAST_TILE && OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM == num_remaining) segment_flags[ITEM] = 1; scan_items[ITEM].value = values[ITEM]; scan_items[ITEM].key = segment_flags[ITEM]; } } __device__ __forceinline__ void UnzipValues( OutputT (&values)[ITEMS_PER_THREAD], SizeValuePairT (&scan_items)[ITEMS_PER_THREAD]) { // Zip values and segment_flags #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { values[ITEM] = scan_items[ITEM].value; } } template <bool IsNull = std::is_same<InitValueT, NullType>::value, typename std::enable_if<!IsNull, int>::type = 0> __device__ __forceinline__ void AddInitToScan( OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&flags)[ITEMS_PER_THREAD]) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { items[ITEM] = flags[ITEM] ? init_value : scan_op(init_value, items[ITEM]); } } template <bool IsNull = std::is_same<InitValueT, NullType>::value, typename std::enable_if<IsNull, int>::type = 0> __device__ __forceinline__ void AddInitToScan( OutputT (&/*items*/)[ITEMS_PER_THREAD], OffsetT (&/*flags*/)[ITEMS_PER_THREAD]) {} //--------------------------------------------------------------------- // Cooperatively scan a device-wide sequence of tiles with other CTAs //--------------------------------------------------------------------- // Process a tile of input (dynamic chained scan) // template <bool IS_LAST_TILE> __device__ __forceinline__ void ConsumeTile( OffsetT /*num_items*/, OffsetT num_remaining, int tile_idx, OffsetT tile_base, ScanTileStateT& tile_state) { // Load items KeyT keys[ITEMS_PER_THREAD]; OutputT values[ITEMS_PER_THREAD]; OffsetT segment_flags[ITEMS_PER_THREAD]; SizeValuePairT scan_items[ITEMS_PER_THREAD]; if (IS_LAST_TILE) { // Fill last element with the first element // because collectives are not suffix guarded BlockLoadKeysT(storage.load_keys) .Load(d_keys_in + tile_base, keys, num_remaining, *(d_keys_in + tile_base)); } else { BlockLoadKeysT(storage.load_keys) .Load(d_keys_in + tile_base, keys); } CTA_SYNC(); if (IS_LAST_TILE) { // Fill last element with the first element // because collectives are not suffix guarded BlockLoadValuesT(storage.load_values) .Load(d_values_in + tile_base, values, num_remaining, *(d_values_in + tile_base)); } else { BlockLoadValuesT(storage.load_values) .Load(d_values_in + tile_base, values); } CTA_SYNC(); // first tile if (tile_idx == 0) { BlockDiscontinuityKeysT(storage.scan_storage.discontinuity) .FlagHeads(segment_flags, keys, inequality_op); // Zip values and segment_flags ZipValuesAndFlags<IS_LAST_TILE>(num_remaining, values, segment_flags, scan_items); // Exclusive scan of values and segment_flags SizeValuePairT tile_aggregate; ScanTile(scan_items, tile_aggregate, Int2Type<IS_INCLUSIVE>()); if (threadIdx.x == 0) { if (!IS_LAST_TILE) tile_state.SetInclusive(0, tile_aggregate); scan_items[0].key = 0; } } else { KeyT tile_pred_key = (threadIdx.x == 0) ? d_keys_in[tile_base - 1] : KeyT(); BlockDiscontinuityKeysT(storage.scan_storage.discontinuity) .FlagHeads(segment_flags, keys, inequality_op, tile_pred_key); // Zip values and segment_flags ZipValuesAndFlags<IS_LAST_TILE>(num_remaining, values, segment_flags, scan_items); SizeValuePairT tile_aggregate; TilePrefixCallbackT prefix_op(tile_state, storage.scan_storage.prefix, pair_scan_op, tile_idx); ScanTile(scan_items, tile_aggregate, prefix_op, Int2Type<IS_INCLUSIVE>()); } CTA_SYNC(); UnzipValues(values, scan_items); AddInitToScan(values, segment_flags); // Store items if (IS_LAST_TILE) { BlockStoreValuesT(storage.store_values) .Store(d_values_out + tile_base, values, num_remaining); } else { BlockStoreValuesT(storage.store_values) .Store(d_values_out + tile_base, values); } } //--------------------------------------------------------------------- // Constructor //--------------------------------------------------------------------- // Dequeue and scan tiles of items as part of a dynamic chained scan // with Init functor __device__ __forceinline__ AgentScanByKey( TempStorage & storage, KeysInputIteratorT d_keys_in, ValuesInputIteratorT d_values_in, ValuesOutputIteratorT d_values_out, EqualityOp equality_op, ScanOpT scan_op, InitValueT init_value) : storage(storage), d_keys_in(d_keys_in), d_values_in(d_values_in), d_values_out(d_values_out), inequality_op(equality_op), scan_op(scan_op), pair_scan_op(scan_op), init_value(init_value) {} /** * Scan tiles of items as part of a dynamic chained scan */ __device__ __forceinline__ void ConsumeRange( OffsetT num_items, ///< Total number of input items ScanTileStateT& tile_state, ///< Global tile state descriptor int start_tile) ///< The starting tile for the current grid { int tile_idx = blockIdx.x; OffsetT tile_base = OffsetT(ITEMS_PER_TILE) * tile_idx; OffsetT num_remaining = num_items - tile_base; if (num_remaining > ITEMS_PER_TILE) { // Not the last tile (full) ConsumeTile<false>(num_items, num_remaining, tile_idx, tile_base, tile_state); } else if (num_remaining > 0) { // The last tile (possibly partially-full) ConsumeTile<true>(num_items, num_remaining, tile_idx, tile_base, tile_state); } } }; CUB_NAMESPACE_END
the_stack
using namespace optix; // Array of texture sampler IDs for the material expression associated with this OptiX program. rtDeclareVariable(int, texture_sampler_ids, , ); typedef rtBufferId<int> BufferInt; // The wrap mode determines the texture lookup behavior if a lookup coordinate // is exceeding the normalized half-open texture space range of [0, 1). enum Tex_wrap_mode { wrap_clamp = 0, // clamps the lookup coordinate to the range wrap_repeat = 1, // takes the fractional part of the lookup coordinate // effectively repeating the texture along this axis wrap_mirrored_repeat = 2, // like wrap_repeat but takes one minus the fractional // part every other interval to mirror every second // instance of the texture wrap_clip = 3 // makes the texture lookup return zero for texture // coordinates outside of the range }; // Applies wrapping and cropping to the given coordinate. // Note: This macro returns if wrap mode is clip and the coordinate is out of range. #define WRAP_AND_CROP_OR_RETURN_BLACK(val, wrap_mode, crop_vals, store_res_func) \ do { \ if ( (wrap_mode) == wrap_repeat && (crop_vals)[0] == 0.0f && (crop_vals)[1] == 1.0f ) \ { \ /* Do nothing, use OptiX texture sampler default behaviour */ \ } \ else \ { \ if ( (wrap_mode) == wrap_clamp ) \ { \ if ( val < 0.0f ) val = 0.0f; \ else if ( val > 1.0f ) val = 1.0f; \ } \ else if ( (wrap_mode) == wrap_clip && (val < 0.0f || val > 1.0f) ) \ { \ store_res_func(result, 0.0f); \ return; \ } \ else if ( (wrap_mode) == wrap_mirrored_repeat ) \ { \ int int_val = int(floorf(val)); \ if ( (int_val & 1) != 0 ) \ val = 1.0f - (val - float(int_val)); \ else \ val = val - float(int_val); \ } \ else /* wrap_repeat */ \ val = val - floorf(val); \ val = val * ((crop_vals)[1] - (crop_vals)[0]) + (crop_vals)[0]; \ } \ } while(0) // Unused structure which is part of texture functions API. struct Core_tex_handler; // Stores a float4 in a float[4] array. __device__ void store_result4(float res[4], const float4 &v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; res[3] = v.w; } // Stores a float in all elements of a float[4] array. __device__ void store_result4(float res[4], const float v) { res[0] = res[1] = res[2] = res[3] = v; } // Stores the given float values in a float[4] array. __device__ void store_result4( float res[4], const float v0, const float v1, const float v2, const float v3) { res[0] = v0; res[1] = v1; res[2] = v2; res[3] = v3; } // Stores a float4 in a float[3] array, ignoring v.w. __device__ void store_result3(float res[3], const float4 &v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; } // Stores a float in all elements of a float[3] array. __device__ void store_result3(float res[3], const float v) { res[0] = res[1] = res[2] = v; } // Stores the given float values in a float[3] array. __device__ void store_result3(float res[3], const float v0, const float v1, const float v2) { res[0] = v0; res[1] = v1; res[2] = v2; } // Implementation of tex::lookup_float4 for a texture_2d texture. RT_CALLABLE_PROGRAM void tex_lookup_float4_2d( float result[4], Core_tex_handler const *self, unsigned texture_idx, float const coord[2], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, float const crop_u[2], float const crop_v[2]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result4(result, 0.0f); return; } float u = coord[0], v = coord[1]; WRAP_AND_CROP_OR_RETURN_BLACK(u, wrap_u, crop_u, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(v, wrap_v, crop_v, store_result4); const BufferInt ids(texture_sampler_ids); store_result4(result, rtTex2D<float4>(ids[texture_idx - 1], u, v)); } // Implementation of tex::lookup_float3 for a texture_2d texture. RT_CALLABLE_PROGRAM void tex_lookup_float3_2d( float result[3], Core_tex_handler const *self, unsigned texture_idx, float const coord[2], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, float const crop_u[2], float const crop_v[2]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result3(result, 0.0f); return; } float u = coord[0], v = coord[1]; WRAP_AND_CROP_OR_RETURN_BLACK(u, wrap_u, crop_u, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(v, wrap_v, crop_v, store_result3); const BufferInt ids(texture_sampler_ids); store_result3(result, rtTex2D<float4>(ids[texture_idx - 1], u, v)); } // Implementation of tex::texel_float4 for a texture_2d texture. // Note: uvtile textures are not supported RT_CALLABLE_PROGRAM void tex_texel_float4_2d( float result[4], Core_tex_handler const *self, unsigned texture_idx, int const coord[2], int const /*uv_tile*/[2]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result4(result, 0.0f); return; } const BufferInt ids(texture_sampler_ids); store_result4(result, rtTex2DFetch<float4>(ids[texture_idx - 1], coord[0], coord[1])); } // Implementation of tex::lookup_float4 for a texture_3d texture. RT_CALLABLE_PROGRAM void tex_lookup_float4_3d( float result[4], Core_tex_handler const *self, unsigned texture_idx, float const coord[3], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, Tex_wrap_mode wrap_w, float const crop_u[2], float const crop_v[2], float const crop_w[2]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result4(result, 0.0f); return; } float u = coord[0], v = coord[1], w = coord[2]; WRAP_AND_CROP_OR_RETURN_BLACK(u, wrap_u, crop_u, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(v, wrap_v, crop_v, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(w, wrap_w, crop_w, store_result4); const BufferInt ids(texture_sampler_ids); store_result4(result, rtTex3D<float4>(ids[texture_idx - 1], u, v, w)); } // Implementation of tex::lookup_float3 for a texture_3d texture. RT_CALLABLE_PROGRAM void tex_lookup_float3_3d( float result[3], Core_tex_handler const *self, unsigned texture_idx, float const coord[3], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, Tex_wrap_mode wrap_w, float const crop_u[2], float const crop_v[2], float const crop_w[2]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result3(result, 0.0f); return; } float u = coord[0], v = coord[1], w = coord[2]; WRAP_AND_CROP_OR_RETURN_BLACK(u, wrap_u, crop_u, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(v, wrap_v, crop_v, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(w, wrap_w, crop_w, store_result3); const BufferInt ids(texture_sampler_ids); store_result3(result, rtTex3D<float4>(ids[texture_idx - 1], u, v, w)); } // Implementation of tex::texel_float4 for a texture_3d texture. RT_CALLABLE_PROGRAM void tex_texel_float4_3d( float result[4], Core_tex_handler const *self, unsigned texture_idx, int const coord[3]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result4(result, 0.0f); return; } const BufferInt ids(texture_sampler_ids); store_result4(result, rtTex3DFetch<float4>(ids[texture_idx - 1], coord[0], coord[1], coord[2])); } // Implementation of tex::lookup_float3 for a texture_cube texture. RT_CALLABLE_PROGRAM void tex_lookup_float3_cube( float result[3], Core_tex_handler const *self, unsigned texture_idx, float const coord[3]) { if ( texture_idx == 0 ) { // invalid texture returns zero store_result3(result, 0.0f); return; } const BufferInt ids(texture_sampler_ids); store_result3(result, rtTexCubemap<float4>(ids[texture_idx - 1], coord[0], coord[1], coord[2])); } // Implementation of resolution_2d function needed by generated code. // Note: uvtile textures are not supported in this example implementation RT_CALLABLE_PROGRAM void tex_resolution_2d( int result[2], Core_tex_handler const *self, unsigned texture_idx, int const /*uv_tile*/[2]) { if ( texture_idx == 0 ) { // invalid texture returns zero result[0] = 0; result[1] = 0; return; } const BufferInt ids(texture_sampler_ids); uint3 size = rtTexSize(ids[texture_idx - 1]); result[0] = size.x; result[1] = size.y; }
the_stack
#include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/strings/string_set.h> #include <thrust/device_vector.h> namespace nvbio { namespace string_set_private { enum StringSetTest { ALL = 0x0FFFFFFFu, CPU = 0xF0000000u, SPARSE_TO_CONCAT = 1u, SPARSE_TO_PACKED_CONCAT = 2u, CONCAT_TO_PACKED_CONCAT = 4u, CONCAT_TO_STRIDED = 8u, SPARSE_TO_STRIDED = 16u, PACKED_CONCAT_TO_STRIDED = 32u, PACKED_SPARSE_TO_STRIDED = 64u, STRIDED_PACKED_TO_STRIDED = 128u, CONCAT_TO_STRIDED_PACKED = 256u, PACKED_CONCAT_TO_STRIDED_PACKED = 512u, PACKED_SPARSE_TO_STRIDED_PACKED = 1024u, }; } using namespace string_set_private; template <typename input_set, typename output_set> void check(const input_set& in_string_set, const output_set& out_string_set) { if (in_string_set.size() != out_string_set.size()) { fprintf(stderr, " \nerror: input set has size %u, output has %u\n", in_string_set.size(), out_string_set.size() ); exit(1); } // check that the string sets match for (uint32 i = 0; i < in_string_set.size(); ++i) { typename input_set::string_type in_string = in_string_set[i]; typename output_set::string_type out_string = out_string_set[i]; const uint32 in_len = in_string.size(); const uint32 out_len = out_string.size(); if (in_len != out_len) { fprintf(stderr, " \nerror: input string[%u] has length %u, output has length %u\n", i, in_len, out_len ); exit(1); } for (uint32 j = 0; j < in_len; ++j) { const uint8 in_c = in_string[j]; const uint8 out_c = out_string[j]; if (in_c != out_c) { fprintf(stderr, " \nerror: at string[%u][%u] expected : %u, got %u\n", i, j, uint32(in_c), uint32(out_c) ); exit(1); } } } } void make_test_string_set( const uint32 SYMBOL_SIZE, const uint32 N_strings, const uint32 N, const uint32 N_spacing, thrust::host_vector<uint8>& h_string, thrust::host_vector<uint2>& h_ranges) { h_string.resize( N_strings * N_spacing ); h_ranges.resize( N_strings ); LCG_random rand; for (uint32 i = 0; i < N_strings; ++i) { h_ranges[i] = make_uint2( N_spacing*i, N_spacing*i + N ); for (uint32 j = 0; j < N_spacing; ++j) h_string[ i * N_spacing + j ] = rand.next() & ((1u << SYMBOL_SIZE)-1); } } int string_set_test(int argc, char* argv[]) { fprintf(stderr, "nvbio/basic/string_set test... started\n"); const uint32 N = 128; const uint32 N_spacing = 150; const uint32 SYMBOL_SIZE = 4; const uint32 SYMBOLS_PER_WORD = (8u*sizeof(uint32)) / SYMBOL_SIZE; const uint32 N_words = (N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; uint32 N_strings = 256*1024; uint32 N_tests = 20; uint32 TEST_MASK = ALL; uint32 CPU_MASK = CPU; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-N" ) == 0) N_strings = atoi( argv[++i] ); else if (strcmp( argv[i], "-N-tests" ) == 0) N_tests = atoi( argv[++i] ); else if (strcmp( argv[i], "-cpu" ) == 0) CPU_MASK = atoi( argv[++i] ); else if (strcmp( argv[i], "-tests" ) == 0) { const std::string tests_string( argv[++i] ); char temp[256]; const char* begin = tests_string.c_str(); const char* end = begin; TEST_MASK = 0u; while (1) { while (*end != ':' && *end != '\0') { temp[end - begin] = *end; end++; } temp[end - begin] = '\0'; if (strcmp( temp, "packed-sparse-to-strided" ) == 0) TEST_MASK |= PACKED_SPARSE_TO_STRIDED; else if (strcmp( temp, "packed-sparse-to-strided-packed" ) == 0) TEST_MASK |= PACKED_SPARSE_TO_STRIDED_PACKED; else if (strcmp( temp, "sparse-to-concat" ) == 0) TEST_MASK |= SPARSE_TO_CONCAT; else if (strcmp( temp, "sparse-to-packed-concat" ) == 0) TEST_MASK |= SPARSE_TO_PACKED_CONCAT; else if (strcmp( temp, "concat-to-packed-concat" ) == 0) TEST_MASK |= CONCAT_TO_PACKED_CONCAT; else if (strcmp( temp, "sparse-to-strided" ) == 0) TEST_MASK |= SPARSE_TO_STRIDED; else if (strcmp( temp, "packed-concat-to-strided" ) == 0) TEST_MASK |= PACKED_CONCAT_TO_STRIDED; else if (strcmp( temp, "packed-sparse-to-strided" ) == 0) TEST_MASK |= PACKED_SPARSE_TO_STRIDED; else if (strcmp( temp, "strided-packed-to-strided" ) == 0) TEST_MASK |= STRIDED_PACKED_TO_STRIDED; else if (strcmp( temp, "concat-to-strided-packed" ) == 0) TEST_MASK |= CONCAT_TO_STRIDED_PACKED; else if (strcmp( temp, "packed-concat-to-strided-packed" ) == 0) TEST_MASK |= PACKED_CONCAT_TO_STRIDED_PACKED; else if (strcmp( temp, "packed-sparse-to-strided-packed" ) == 0) TEST_MASK |= PACKED_SPARSE_TO_STRIDED_PACKED; if (end[i] == '\0') break; ++end; begin = end; } } } TEST_MASK |= CPU_MASK; typedef SparseStringSet<uint8*,uint2*> base_string_set; thrust::host_vector<uint8> h_base_string; thrust::host_vector<uint2> h_base_ranges; make_test_string_set( SYMBOL_SIZE, N_strings, N, N_spacing, h_base_string, h_base_ranges ); thrust::device_vector<uint8> d_base_string( h_base_string ); thrust::device_vector<uint2> d_base_ranges( h_base_ranges ); base_string_set h_base_string_set( N_strings, thrust::raw_pointer_cast( &h_base_string.front() ), thrust::raw_pointer_cast( &h_base_ranges.front() ) ); base_string_set d_base_string_set( N_strings, thrust::raw_pointer_cast( &d_base_string.front() ), thrust::raw_pointer_cast( &d_base_ranges.front() ) ); // copy a packed sparse string set into a strided packed string set if ((TEST_MASK & PACKED_SPARSE_TO_STRIDED) && (TEST_MASK & CPU)) { fprintf(stderr, " test cpu packed-sparse -> strided copy... started\n"); const uint32 N_words = (N_spacing + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef SparseStringSet<packed_stream_type,const uint2*> input_set; typedef StridedStringSet< uint8*, uint32*> output_set; thrust::host_vector<uint32> h_in_string( N_strings * N_words ); thrust::host_vector<uint2> h_in_ranges( N_strings ); packed_stream_type h_packed_stream( thrust::raw_pointer_cast( &h_in_string.front() ) ); LCG_random rand; for (uint32 i = 0; i < N_strings; ++i) { h_in_ranges[i] = make_uint2( N_spacing*i, N_spacing*i + N ); for (uint32 j = 0; j < N_spacing; ++j) h_packed_stream[ i * N_spacing + j ] = rand.next() & ((1u << SYMBOL_SIZE) - 1u); } input_set h_in_string_set( N_strings, h_packed_stream, thrust::raw_pointer_cast( &h_in_ranges.front() ) ); // build the host output string set thrust::host_vector<uint8> h_out_stream( N_strings * N ); thrust::host_vector<uint32> h_out_lengths( N_strings ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) copy( h_in_string_set, h_out_string_set ); timer.stop(); fprintf(stderr, " test cpu packed-sparse -> strided copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a packed sparse string set into a strided packed string set if ((TEST_MASK & PACKED_SPARSE_TO_STRIDED_PACKED) && (TEST_MASK & CPU)) { fprintf(stderr, " test cpu packed-sparse -> strided-packed copy... started\n"); const uint32 N_words = (N_spacing + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef SparseStringSet<packed_stream_type,const uint2*> input_set; typedef StridedPackedStringSet< uint32*, uint8, SYMBOL_SIZE, false, uint32*> output_set; thrust::host_vector<uint32> h_in_string( N_strings * N_words ); thrust::host_vector<uint2> h_in_ranges( N_strings ); packed_stream_type h_packed_stream( thrust::raw_pointer_cast( &h_in_string.front() ) ); LCG_random rand; for (uint32 i = 0; i < N_strings; ++i) { h_in_ranges[i] = make_uint2( N_spacing*i, N_spacing*i + N ); for (uint32 j = 0; j < N_spacing; ++j) h_packed_stream[ i * N_spacing + j ] = rand.next() & ((1u << SYMBOL_SIZE) - 1u); } input_set h_in_string_set( N_strings, h_packed_stream, thrust::raw_pointer_cast( &h_in_ranges.front() ) ); // build the output string set thrust::host_vector<uint32> h_out_stream( N_strings * N_words ); thrust::host_vector<uint32> h_out_lengths( N_strings ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) copy( h_in_string_set, h_out_string_set ); timer.stop(); fprintf(stderr, " test cpu packed-sparse -> strided-packed copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a sparse string set into a concatenated one if (TEST_MASK & SPARSE_TO_CONCAT) { fprintf(stderr, " test sparse -> concat copy... started\n"); typedef base_string_set input_set; typedef ConcatenatedStringSet<uint8*,uint32*> output_set; // build the device output string set thrust::device_vector<uint8> d_out_string( N_strings * N ); thrust::device_vector<uint32> d_out_offsets( N_strings+1 ); output_set d_out_string_set( N_strings, thrust::raw_pointer_cast( &d_out_string.front() ), thrust::raw_pointer_cast( &d_out_offsets.front() ) ); Timer timer; timer.start(); for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_base_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint8> h_out_string( d_out_string ); thrust::host_vector<uint32> h_out_offsets( d_out_offsets ); output_set h_out_string_set( N_strings, thrust::raw_pointer_cast( &h_out_string.front() ), thrust::raw_pointer_cast( &h_out_offsets.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test sparse -> concat copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a sparse string set into a packed concatenated one if (TEST_MASK & SPARSE_TO_PACKED_CONCAT) { fprintf(stderr, " test sparse -> packed-concat copy... started\n"); typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef base_string_set input_set; typedef ConcatenatedStringSet<packed_stream_type,uint32*> output_set; // build the device output string set thrust::device_vector<uint32> d_out_string( N_strings * N_words ); thrust::device_vector<uint32> d_out_offsets( N_strings+1 ); packed_stream_type d_packed_stream( thrust::raw_pointer_cast( &d_out_string.front() ) ); output_set d_out_string_set( N_strings, d_packed_stream, thrust::raw_pointer_cast( &d_out_offsets.front() ) ); Timer timer; timer.start(); for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_base_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint32> h_out_string( d_out_string ); thrust::host_vector<uint32> h_out_offsets( d_out_offsets ); packed_stream_type h_packed_stream( thrust::raw_pointer_cast( &h_out_string.front() ) ); output_set h_out_string_set( N_strings, h_packed_stream, thrust::raw_pointer_cast( &h_out_offsets.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test sparse -> packed-concat copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a concatenated string set into a packed concatenated one if (TEST_MASK & CONCAT_TO_PACKED_CONCAT) { fprintf(stderr, " test concat -> packed-concat copy... started\n"); typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef ConcatenatedStringSet<uint8*,uint32*> input_set; typedef ConcatenatedStringSet<packed_stream_type,uint32*> output_set; // build the device input string set thrust::device_vector<uint8> d_in_string( N_strings * N ); thrust::device_vector<uint32> d_in_offsets( N_strings+1 ); input_set d_in_string_set( N_strings, thrust::raw_pointer_cast( &d_in_string.front() ), thrust::raw_pointer_cast( &d_in_offsets.front() ) ); // copy the base string set into the input one cuda::copy( d_base_string_set, d_in_string_set ); // build the device output string set thrust::device_vector<uint32> d_out_string( N_strings * N_words ); thrust::device_vector<uint32> d_out_offsets( N_strings+1 ); packed_stream_type d_packed_stream( thrust::raw_pointer_cast( &d_out_string.front() ) ); output_set d_out_string_set( N_strings, d_packed_stream, thrust::raw_pointer_cast( &d_out_offsets.front() ) ); Timer timer; timer.start(); for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint32> h_out_string( d_out_string ); thrust::host_vector<uint32> h_out_offsets( d_out_offsets ); packed_stream_type h_packed_stream( thrust::raw_pointer_cast( &h_out_string.front() ) ); output_set h_out_string_set( N_strings, h_packed_stream, thrust::raw_pointer_cast( &h_out_offsets.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test concat -> packed-concat copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a sparse string set into a strided one if (TEST_MASK & SPARSE_TO_STRIDED) { fprintf(stderr, " test sparse -> strided copy... started\n"); typedef base_string_set input_set; typedef StridedStringSet<uint8*,uint32*> output_set; // build the device output string set thrust::device_vector<uint8> d_out_string( N_strings * N ); thrust::device_vector<uint32> d_out_lengths( N_strings+1 ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_string.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_base_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint8> h_out_string( d_out_string ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_string.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test sparse -> strided copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a packed sparse string set into a strided packed string set if (TEST_MASK & PACKED_CONCAT_TO_STRIDED) { fprintf(stderr, " test packed-concat -> strided copy... started\n"); const uint32 N_words = (N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef ConcatenatedStringSet<packed_stream_type,uint32*> input_set; typedef StridedStringSet< uint8*, uint32*> output_set; // build the device input string set thrust::device_vector<uint32> d_in_string( N_strings * N_words ); thrust::device_vector<uint32> d_in_offsets( N_strings+1 ); packed_stream_type d_packed_stream( thrust::raw_pointer_cast( &d_in_string.front() ) ); input_set d_in_string_set( N_strings, d_packed_stream, thrust::raw_pointer_cast( &d_in_offsets.front() ) ); // copy the base string set into the input set cuda::copy( d_base_string_set, d_in_string_set ); // build the device output string set thrust::device_vector<uint8> d_out_stream( N_strings * N ); thrust::device_vector<uint32> d_out_lengths( N_strings ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_stream.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint8> h_out_stream( d_out_stream ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test packed-concat -> strided copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a packed sparse string set into a strided packed string set if (TEST_MASK & PACKED_SPARSE_TO_STRIDED) { fprintf(stderr, " test packed-sparse -> strided copy... started\n"); const uint32 N_words = (N_spacing + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef PackedStream<cuda::ldg_pointer<uint32>,uint8,SYMBOL_SIZE,false> tex_packed_stream_type; typedef SparseStringSet<packed_stream_type,const uint2*> input_set; typedef SparseStringSet<tex_packed_stream_type,const uint2*> tex_input_set; typedef StridedStringSet< uint8*, uint32*> output_set; thrust::host_vector<uint32> h_in_string( N_strings * N_words ); thrust::host_vector<uint2> h_in_ranges( N_strings ); packed_stream_type h_packed_stream( thrust::raw_pointer_cast( &h_in_string.front() ) ); LCG_random rand; for (uint32 i = 0; i < N_strings; ++i) { h_in_ranges[i] = make_uint2( N_spacing*i, N_spacing*i + N ); for (uint32 j = 0; j < N_spacing; ++j) h_packed_stream[ i * N_spacing + j ] = rand.next() & ((1u << SYMBOL_SIZE) - 1u); } // build the device input string set thrust::device_vector<uint32> d_in_string( h_in_string ); thrust::device_vector<uint2> d_in_ranges( h_in_ranges ); packed_stream_type d_packed_stream( thrust::raw_pointer_cast( &d_in_string.front() ) ); input_set d_in_string_set( N_strings, d_packed_stream, thrust::raw_pointer_cast( &d_in_ranges.front() ) ); // build the device output string set thrust::device_vector<uint8> d_out_stream( N_strings * N ); thrust::device_vector<uint32> d_out_lengths( N_strings ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_stream.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host input string set input_set h_in_string_set( N_strings, h_packed_stream, thrust::raw_pointer_cast( &h_in_ranges.front() ) ); // build the host output string set thrust::host_vector<uint8> h_out_stream( d_out_stream ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_in_string_set, h_out_string_set ); fprintf(stderr, " test packed-sparse -> strided copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); #if 1 fprintf(stderr, " test packed-sparse (tex) -> strided copy... started\n"); // bind the texture tex_packed_stream_type d_tex_packed_stream( cuda::ldg_pointer<uint32>( thrust::raw_pointer_cast( &d_in_string.front() ) ) ); tex_input_set d_tex_in_string_set( N_strings, d_tex_packed_stream, thrust::raw_pointer_cast( &d_in_ranges.front() ) ); timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_tex_in_string_set, d_out_string_set ); timer.stop(); fprintf(stderr, " test packed-sparse (tex) -> strided copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); #endif } // copy a strided-packed string set into a strided one if (TEST_MASK & STRIDED_PACKED_TO_STRIDED) { fprintf(stderr, " test strided-packed -> strided copy... started\n"); typedef StridedPackedStringSet< uint32*, uint8, SYMBOL_SIZE, false, uint32*> input_set; typedef StridedStringSet<uint8*,uint32*> output_set; // first build the input set thrust::device_vector<uint32> d_in_stream( N_strings * N_words ); thrust::device_vector<uint32> d_in_lengths( N_strings+1 ); input_set d_in_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_in_stream.front() ), thrust::raw_pointer_cast( &d_in_lengths.front() ) ); // copy the base string set into the input set cuda::copy( d_base_string_set, d_in_string_set ); // build the device output string set thrust::device_vector<uint8> d_out_string( N_strings * N ); thrust::device_vector<uint32> d_out_lengths( N_strings ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_string.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint8> h_out_string( d_out_string ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_string.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test strided-packed -> strided copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a simple concatenated string set into a strided packed string set if (TEST_MASK & CONCAT_TO_STRIDED_PACKED) { fprintf(stderr, " test concat -> strided-packed copy... started\n"); typedef ConcatenatedStringSet<uint8*,uint32*> input_set; typedef StridedPackedStringSet< uint32*, uint8, SYMBOL_SIZE, false, uint32*> output_set; // first build the input set thrust::device_vector<uint8> d_in_string( N_strings * N ); thrust::device_vector<uint32> d_in_offsets( N_strings+1 ); input_set d_in_string_set( N_strings, thrust::raw_pointer_cast( &d_in_string.front() ), thrust::raw_pointer_cast( &d_in_offsets.front() ) ); // copy the base string set into the input set cuda::copy( d_base_string_set, d_in_string_set ); // build the device output string set thrust::device_vector<uint32> d_out_stream( N_strings * N_words ); thrust::device_vector<uint32> d_out_lengths( N_strings ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_stream.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint32> h_out_stream( d_out_stream ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test concat -> strided-packed copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a packed concatenated string set into a strided packed string set if (TEST_MASK & PACKED_CONCAT_TO_STRIDED_PACKED) { fprintf(stderr, " test packed-concat -> strided-packed copy... started\n"); const uint32 N_words = (N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef ConcatenatedStringSet<packed_stream_type,uint32*> input_set; typedef StridedPackedStringSet< uint32*, uint8, SYMBOL_SIZE, false, uint32*> output_set; // build the device input string set thrust::device_vector<uint32> d_in_string( N_strings * N_words ); thrust::device_vector<uint32> d_in_offsets( N_strings+1 ); packed_stream_type d_packed_stream( thrust::raw_pointer_cast( &d_in_string.front() ) ); input_set d_in_string_set( N_strings, d_packed_stream, thrust::raw_pointer_cast( &d_in_offsets.front() ) ); // copy the base string set into the input set cuda::copy( d_base_string_set, d_in_string_set ); // build the device output string set thrust::device_vector<uint32> d_out_stream( N_strings * N_words ); thrust::device_vector<uint32> d_out_lengths( N_strings ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_stream.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host output string set thrust::host_vector<uint32> h_out_stream( d_out_stream ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_base_string_set, h_out_string_set ); fprintf(stderr, " test packed-concat -> strided-packed copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } // copy a packed sparse string set into a strided packed string set if (TEST_MASK & PACKED_SPARSE_TO_STRIDED_PACKED) { fprintf(stderr, " test packed-sparse -> strided-packed copy... started\n"); const uint32 N_words = (N_spacing + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef PackedStream<cuda::ldg_pointer<uint32>,uint8,SYMBOL_SIZE,false> tex_packed_stream_type; typedef SparseStringSet<packed_stream_type,const uint2*> input_set; typedef SparseStringSet<tex_packed_stream_type,const uint2*> tex_input_set; typedef StridedPackedStringSet< uint32*, uint8, SYMBOL_SIZE, false, uint32*> output_set; thrust::host_vector<uint32> h_in_string( N_strings * N_words ); thrust::host_vector<uint2> h_in_ranges( N_strings ); packed_stream_type h_packed_stream( thrust::raw_pointer_cast( &h_in_string.front() ) ); LCG_random rand; for (uint32 i = 0; i < N_strings; ++i) { h_in_ranges[i] = make_uint2( N_spacing*i, N_spacing*i + N ); for (uint32 j = 0; j < N_spacing; ++j) h_packed_stream[ i * N_spacing + j ] = rand.next() & ((1u << SYMBOL_SIZE) - 1u); } // build the device input string set thrust::device_vector<uint32> d_in_string( h_in_string ); thrust::device_vector<uint2> d_in_ranges( h_in_ranges ); packed_stream_type d_packed_stream( thrust::raw_pointer_cast( &d_in_string.front() ) ); input_set d_in_string_set( N_strings, d_packed_stream, thrust::raw_pointer_cast( &d_in_ranges.front() ) ); // build the device output string set thrust::device_vector<uint32> d_out_stream( N_strings * N_words ); thrust::device_vector<uint32> d_out_lengths( N_strings ); output_set d_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &d_out_stream.front() ), thrust::raw_pointer_cast( &d_out_lengths.front() ) ); Timer timer; timer.start(); // copy intput set into the output set for (uint32 i = 0; i < N_tests; ++i) cuda::copy( d_in_string_set, d_out_string_set ); timer.stop(); // build the host input string set input_set h_in_string_set( N_strings, h_packed_stream, thrust::raw_pointer_cast( &h_in_ranges.front() ) ); // build the host output string set thrust::host_vector<uint32> h_out_stream( d_out_stream ); thrust::host_vector<uint32> h_out_lengths( d_out_lengths ); output_set h_out_string_set( N_strings, N_strings, thrust::raw_pointer_cast( &h_out_stream.front() ), thrust::raw_pointer_cast( &h_out_lengths.front() ) ); // check that the string sets match check( h_in_string_set, h_out_string_set ); fprintf(stderr, " test packed-sparse -> strided-packed copy... done: %.2f GSYMS\n", (1.0e-9f*float(N_strings*N))*(float(N_tests)/timer.seconds())); } fprintf(stderr, "nvbio/basic/string_set test... done\n"); return 0; } } // namespace nvbio
the_stack
namespace arboretum_test { using arboretum::core::ContinuousTreeGrower; using arboretum::core::GainFunctionParameters; using arboretum::core::my_atomics; TEST(ContinuousTreeGrower, DISABLED_RootSearchCategoryFeature) { const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float2, float2>(size, 1, 0); thrust::device_vector<unsigned int> row2Node(size, 0); thrust::device_vector<float2> grad(32); thrust::host_vector<unsigned short> fvalue_h(32); thrust::device_vector<unsigned short> fvalue_d(32); float2 sum = make_float2(0.0, 0.0); for (int i = 0; i < size; ++i) { grad[i] = make_float2(float(i), 1.0); fvalue_h[i] = fvalue_d[i] = i / 2; sum += grad[i]; } thrust::device_vector<float2> parent_node_sum(2); parent_node_sum[0] = make_float2(0, 0); parent_node_sum[1] = sum; thrust::device_vector<unsigned int> parent_node_count(2, 0); parent_node_count[0] = 0; parent_node_count[1] = size; auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0); grower.ProcessCategoryFeature<unsigned int>(row2Node, grad, fvalue_d, fvalue_h, parent_node_sum, parent_node_count, 3, 0, p); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<my_atomics> result_h = grower.result_d; // copy pasted result ASSERT_EQ(result_h[0].ints[1], 0); ASSERT_FLOAT_EQ(result_h[0].floats[0], 480.0); } TEST(ContinuousTreeGrower, RootSearchContinuousFeature) { const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float2, float2>(size, 1, 0); thrust::device_vector<unsigned int> row2Node(size, 0); thrust::device_vector<unsigned int> partitioning_indexes(size, 0); thrust::device_vector<float2> grad(32); thrust::host_vector<unsigned short> fvalue_h(32); thrust::device_vector<unsigned short> fvalue_d(32); float2 sum = make_float2(0.0, 0.0); for (int i = 0; i < size; ++i) { grad[i] = make_float2(float(i), 1.0); fvalue_h[i] = fvalue_d[i] = i / 4; sum += grad[i]; } thrust::device_vector<float2> parent_node_sum(2); parent_node_sum[0] = make_float2(0, 0); parent_node_sum[1] = sum; thrust::device_vector<unsigned int> parent_node_count(2, 0); parent_node_count[0] = 0; parent_node_count[1] = size; auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0); grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d, thrust::raw_pointer_cast(fvalue_h.data()), parent_node_sum, parent_node_count, 3, 0, 1, p, false); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<my_atomics> result_h = grower.result_d; // copy pasted result ASSERT_EQ(result_h[0].ints[1], 16); ASSERT_FLOAT_EQ(result_h[0].floats[0], 2048.0); } TEST(ContinuousTreeGrower, FloatUnstableExample) { const unsigned level = 1; const unsigned depth = 4; const size_t size = 10; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float, float>(size, 4, 0, NULL); thrust::host_vector<unsigned int> node(size, 0); thrust::device_vector<unsigned> partitioning_indexes(size, 0); thrust::host_vector<float> grad(size); thrust::host_vector<unsigned short> feature(size); thrust::host_vector<float> parent_node_sum(3, 0.0); thrust::host_vector<unsigned int> parent_node_count(3, 0); grad[0] = 2.350000; feature[0] = 4; node[0] = 0; grad[1] = 0.750004; feature[1] = 3; node[1] = 4; grad[2] = 2.100000; feature[2] = 6; node[2] = 2; grad[3] = -2.100000; feature[3] = 2; node[3] = 4; grad[4] = -1.200003; feature[4] = 1; node[4] = 4; grad[5] = 1.199997; feature[5] = 0; node[5] = 4; grad[6] = -1.199997; feature[6] = 6; node[6] = 6; grad[7] = 1.200003; feature[7] = 6; node[7] = 6; grad[8] = -0.749996; feature[8] = 5; node[8] = 4; grad[9] = -2.350000; feature[9] = 6; node[9] = 4; // sum was computed with a slight error parent_node_sum[0] = 0.000000; parent_node_count[0] = 0; // prcise value 3.1 parent_node_sum[1] = 3.100004; parent_node_count[1] = 2; // prcise value 0.0 parent_node_sum[2] = 0.000008; parent_node_count[2] = 10; auto p = GainFunctionParameters(2, 2, 0, 0, 0, 0); thrust::device_vector<unsigned short> feature_d = feature; grower.CreatePartitioningIndexes(partitioning_indexes, node, parent_node_count, level, depth); grower.ProcessDenseFeature(partitioning_indexes, node, grad, feature_d, thrust::raw_pointer_cast(feature.data()), parent_node_sum, parent_node_count, 3, level, depth, p, false); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<my_atomics> result_h = grower.result_d; // copy pasted result ASSERT_EQ(result_h[0].ints[1], 0); ASSERT_FLOAT_EQ(result_h[0].floats[0], 0); ASSERT_EQ(result_h[1].ints[1], 7); ASSERT_FLOAT_EQ(result_h[1].floats[0], 5.676724); } TEST(ContinuousTreeGrower, DoubleUnstableExample) { const unsigned level = 1; const unsigned depth = 2; const size_t size = 10; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float, double>(size, 4, 0); thrust::device_vector<unsigned int> partitioning_indexes(size, 0); thrust::host_vector<unsigned int> node(size, 0); thrust::host_vector<float> grad(size); thrust::host_vector<unsigned short> feature(size); thrust::host_vector<double> parent_node_sum(3, 0.0); thrust::host_vector<unsigned int> parent_node_count(3, 0); grad[0] = 2.350000; feature[0] = 4; node[0] = 0; grad[1] = 0.750000; feature[1] = 3; node[1] = 4; grad[2] = 2.100000; feature[2] = 6; node[2] = 2; grad[3] = -2.100000; feature[3] = 2; node[3] = 4; grad[4] = -1.199999; feature[4] = 1; node[4] = 4; grad[5] = 1.200001; feature[5] = 0; node[5] = 4; grad[6] = -1.200001; feature[6] = 6; node[6] = 6; grad[7] = 1.199999; feature[7] = 6; node[7] = 6; grad[8] = -0.75; feature[8] = 5; node[8] = 4; grad[9] = -2.350000; feature[9] = 6; node[9] = 4; // precise sums parent_node_sum[0] = 0.0; parent_node_count[0] = 0; parent_node_sum[1] = 3.1; parent_node_count[1] = 2; parent_node_sum[2] = 0.0; parent_node_count[2] = 10; auto p = GainFunctionParameters(2, 2, 0, 0, 0, 0); thrust::device_vector<unsigned short> feature_d = feature; grower.CreatePartitioningIndexes(partitioning_indexes, node, parent_node_count, level, depth); grower.ProcessDenseFeature(partitioning_indexes, node, grad, feature_d, thrust::raw_pointer_cast(feature.data()), parent_node_sum, parent_node_count, 3, 1, 4, p, false); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<my_atomics> result_h = grower.result_d; // copy pasted result ASSERT_EQ(result_h[0].ints[1], 0); ASSERT_FLOAT_EQ(result_h[0].floats[0], 0); ASSERT_EQ(result_h[1].ints[1], 7); ASSERT_FLOAT_EQ(result_h[1].floats[0], 5.6767569); } TEST(ContinuousTreeGrower, Level1SearchContinuousFeature) { const unsigned level = 1; const unsigned depth = 2; const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float2, float2>( size, depth, 0); thrust::device_vector<unsigned int> row2Node(size, 0); thrust::device_vector<unsigned int> partitioning_indexes(size, 0); thrust::device_vector<float2> grad(32); thrust::host_vector<unsigned short> fvalue_h(32); thrust::device_vector<unsigned short> fvalue_d(32); float2 sum = make_float2(0.0, 0.0); thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0)); thrust::device_vector<unsigned int> parent_node_count(3, 0); for (int i = 0; i < size; ++i) { row2Node[i] = i % 2; grad[i] = make_float2(i * i, 1.0); fvalue_h[i] = fvalue_d[i] = i / 4; parent_node_sum[(i / 16) + 1] += grad[i]; // parent_node_count[(i / 16) + 1] += 1; } parent_node_count[1] += 16; parent_node_count[2] += 32; parent_node_sum[1] += parent_node_sum[0]; parent_node_sum[2] += parent_node_sum[1]; auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0); grower.CreatePartitioningIndexes(partitioning_indexes, row2Node, parent_node_count, level, depth); grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d, thrust::raw_pointer_cast(fvalue_h.data()), parent_node_sum, parent_node_count, 3, level, depth, p, false); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<my_atomics> result_h = grower.result_d; // copy pasted result ASSERT_EQ(result_h[0].ints[1], 10); ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672); ASSERT_EQ(result_h[1].ints[1], 24); ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504); ASSERT_EQ(grower.node_fvalue[0], 0); ASSERT_EQ(grower.node_fvalue[1], 0); ASSERT_EQ(grower.node_fvalue[2], 1); ASSERT_EQ(grower.node_fvalue[3], 1); ASSERT_EQ(grower.node_fvalue[4], 2); ASSERT_EQ(grower.node_fvalue[5], 2); ASSERT_EQ(grower.node_fvalue[6], 3); ASSERT_EQ(grower.node_fvalue[7], 3); ASSERT_EQ(grower.node_fvalue[8], 4); ASSERT_EQ(grower.node_fvalue[9], 4); ASSERT_EQ(grower.node_fvalue[10], 5); ASSERT_EQ(grower.node_fvalue[11], 5); ASSERT_EQ(grower.node_fvalue[12], 6); ASSERT_EQ(grower.node_fvalue[13], 6); ASSERT_EQ(grower.node_fvalue[14], 7); ASSERT_EQ(grower.node_fvalue[15], 7); ASSERT_EQ(grower.node_fvalue[16], 7); ASSERT_EQ(grower.node_fvalue[17], 7); ASSERT_EQ(grower.node_fvalue[18], 6); ASSERT_EQ(grower.node_fvalue[19], 6); ASSERT_EQ(grower.node_fvalue[20], 5); ASSERT_EQ(grower.node_fvalue[21], 5); ASSERT_EQ(grower.node_fvalue[22], 4); ASSERT_EQ(grower.node_fvalue[23], 4); ASSERT_EQ(grower.node_fvalue[24], 3); ASSERT_EQ(grower.node_fvalue[25], 3); ASSERT_EQ(grower.node_fvalue[26], 2); ASSERT_EQ(grower.node_fvalue[27], 2); ASSERT_EQ(grower.node_fvalue[28], 1); ASSERT_EQ(grower.node_fvalue[29], 1); ASSERT_EQ(grower.node_fvalue[30], 0); ASSERT_EQ(grower.node_fvalue[31], 0); } TEST(ContinuousTreeGrower, Level1SearchContinuousFeatureDouble) { const unsigned level = 1; const unsigned depth = 2; const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float2, mydouble2>( size, depth, 0); thrust::device_vector<unsigned int> row2Node(size, 0); thrust::device_vector<unsigned int> partitioning_indexes(size, 0); thrust::device_vector<float2> grad(32); thrust::host_vector<unsigned short> fvalue_h(32); thrust::device_vector<unsigned short> fvalue_d(32); float2 sum = make_float2(0.0, 0.0); thrust::device_vector<mydouble2> parent_node_sum(3, make_double2(0, 0)); thrust::device_vector<unsigned int> parent_node_count(3, 0); for (int i = 0; i < size; ++i) { row2Node[i] = i % 2; grad[i] = make_float2(i * i, 1.0); fvalue_h[i] = fvalue_d[i] = i / 4; mydouble2 tmp = mydouble2(grad[i]); parent_node_sum[(i / 16) + 1] += tmp; // parent_node_count[(i / 16) + 1] += 1; } parent_node_count[1] += 16; parent_node_count[2] += 32; parent_node_sum[1] += parent_node_sum[0]; parent_node_sum[2] += parent_node_sum[1]; auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0); grower.CreatePartitioningIndexes(partitioning_indexes, row2Node, parent_node_count, level, depth); grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d, thrust::raw_pointer_cast(fvalue_h.data()), parent_node_sum, parent_node_count, 3, level, depth, p, false); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<my_atomics> result_h = grower.result_d; // copy pasted result ASSERT_EQ(result_h[0].ints[1], 10); ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672); ASSERT_EQ(result_h[1].ints[1], 24); ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504); ASSERT_EQ(grower.node_fvalue[0], 0); ASSERT_EQ(grower.node_fvalue[1], 0); ASSERT_EQ(grower.node_fvalue[2], 1); ASSERT_EQ(grower.node_fvalue[3], 1); ASSERT_EQ(grower.node_fvalue[4], 2); ASSERT_EQ(grower.node_fvalue[5], 2); ASSERT_EQ(grower.node_fvalue[6], 3); ASSERT_EQ(grower.node_fvalue[7], 3); ASSERT_EQ(grower.node_fvalue[8], 4); ASSERT_EQ(grower.node_fvalue[9], 4); ASSERT_EQ(grower.node_fvalue[10], 5); ASSERT_EQ(grower.node_fvalue[11], 5); ASSERT_EQ(grower.node_fvalue[12], 6); ASSERT_EQ(grower.node_fvalue[13], 6); ASSERT_EQ(grower.node_fvalue[14], 7); ASSERT_EQ(grower.node_fvalue[15], 7); ASSERT_EQ(grower.node_fvalue[16], 7); ASSERT_EQ(grower.node_fvalue[17], 7); ASSERT_EQ(grower.node_fvalue[18], 6); ASSERT_EQ(grower.node_fvalue[19], 6); ASSERT_EQ(grower.node_fvalue[20], 5); ASSERT_EQ(grower.node_fvalue[21], 5); ASSERT_EQ(grower.node_fvalue[22], 4); ASSERT_EQ(grower.node_fvalue[23], 4); ASSERT_EQ(grower.node_fvalue[24], 3); ASSERT_EQ(grower.node_fvalue[25], 3); ASSERT_EQ(grower.node_fvalue[26], 2); ASSERT_EQ(grower.node_fvalue[27], 2); ASSERT_EQ(grower.node_fvalue[28], 1); ASSERT_EQ(grower.node_fvalue[29], 1); ASSERT_EQ(grower.node_fvalue[30], 0); ASSERT_EQ(grower.node_fvalue[31], 0); } TEST(ContinuousTreeGrower, ApplySplitLevel0) { const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float2, float2>(size, 2, 0); thrust::device_vector<unsigned int> row2Node(size, 0); row2Node[3] = 1; row2Node[4] = 1; row2Node[6] = 1; grower.node_fvalue[0] = 0; grower.node_fvalue[1] = 5; grower.node_fvalue[2] = 2; grower.node_fvalue[3] = 5; grower.node_fvalue[4] = 3; grower.node_fvalue[5] = 4; grower.node_fvalue[6] = 4; grower.ApplySplit(thrust::raw_pointer_cast(row2Node.data()), 0, 4, 0, size); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<unsigned> nodes = row2Node; ASSERT_EQ(nodes[0], 0); ASSERT_EQ(nodes[1], 1); ASSERT_EQ(nodes[2], 0); ASSERT_EQ(nodes[3], 1); ASSERT_EQ(nodes[4], 0); ASSERT_EQ(nodes[5], 1); ASSERT_EQ(nodes[6], 1); for (int i = 7; i < size; ++i) { ASSERT_EQ(nodes[i], 0); } } TEST(ContinuousTreeGrower, ApplySplitLevel1) { const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, float2, float2>(size, 2, 0); thrust::device_vector<unsigned int> row2Node(size, 3); row2Node[3] = 1; row2Node[4] = 1; row2Node[6] = 1; grower.node_fvalue[0] = 0; grower.node_fvalue[1] = 5; grower.node_fvalue[2] = 2; grower.node_fvalue[3] = 5; grower.node_fvalue[4] = 3; grower.node_fvalue[5] = 4; grower.node_fvalue[6] = 4; grower.ApplySplit(thrust::raw_pointer_cast(row2Node.data()), 1, 4, 1, 7); TEST_OK(cudaStreamSynchronize(grower.stream)); thrust::host_vector<unsigned> nodes = row2Node; ASSERT_EQ(nodes[0], 3); ASSERT_EQ(nodes[1], 3); ASSERT_EQ(nodes[2], 1); ASSERT_EQ(nodes[3], 3); ASSERT_EQ(nodes[4], 1); ASSERT_EQ(nodes[5], 3); ASSERT_EQ(nodes[6], 3); for (int i = 7; i < size; ++i) { ASSERT_EQ(nodes[i], 3); } } } // namespace arboretum_test
the_stack
// CHECK: #include <hip/hip_runtime.h> #include <cuda.h> #include <string> #include <stdio.h> int main() { printf("09. CUDA Driver API Functions synthetic test\n"); unsigned int flags = 0; size_t bytes = 0; size_t bytes_2 = 0; void* image = nullptr; std::string name = "str"; // CHECK: hipDevice_t device; // CHECK-NEXT: hipCtx_t context; // CHECK-NEXT: hipFuncCache_t func_cache; // CHECK-NEXT: hipLimit_t limit; // CHECK-NEXT: hipSharedMemConfig pconfig; // CHECK-NEXT: hipFunction_t function; // CHECK-NEXT: hipModule_t module_; // CHECK-NEXT: hipDeviceptr_t deviceptr; // CHECK-NEXT: hipDeviceptr_t deviceptr_2; // CHECK-NEXT: hipTexRef texref; // CHECK-NEXT: hipJitOption jit_option; // CHECK-NEXT: hipArray_t array_; // CHECK-NEXT: HIP_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR; // CHECK-NEXT: HIP_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR; // CHECK-NEXT: hipIpcEventHandle_t ipcEventHandle; // CHECK-NEXT: hipEvent_t event_; // CHECK-NEXT: hipIpcMemHandle_t ipcMemHandle; // CHECK-NEXT: hip_Memcpy2D MEMCPY2D; // CHECK-NEXT: HIP_MEMCPY3D MEMCPY3D; // CHECK-NEXT: hipStream_t stream; // CHECK-NEXT: hipMipmappedArray_t mipmappedArray; // CHECK-NEXT: hipStreamCallback_t streamCallback; CUdevice device; CUcontext context; CUfunc_cache func_cache; CUlimit limit; CUsharedconfig pconfig; CUfunction function; CUmodule module_; CUdeviceptr deviceptr; CUdeviceptr deviceptr_2; CUtexref texref; CUjit_option jit_option; CUarray array_; CUDA_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR; CUDA_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR; CUipcEventHandle ipcEventHandle; CUevent event_; CUipcMemHandle ipcMemHandle; CUDA_MEMCPY2D MEMCPY2D; CUDA_MEMCPY3D MEMCPY3D; CUstream stream; CUmipmappedArray mipmappedArray; CUstreamCallback streamCallback; #if CUDA_VERSION > 7050 // CHECK: hipMemRangeAttribute MemoryRangeAttribute; // CHECK-NEXT: hipMemoryAdvise MemoryAdvise; CUmem_range_attribute MemoryRangeAttribute; CUmem_advise MemoryAdvise; #endif #if CUDA_VERSION > 9020 // CHECK: hipGraph_t graph; CUgraph graph; #endif #if CUDA_VERSION > 10000 // CHECK: hipStreamCaptureMode streamCaptureMode; CUstreamCaptureMode streamCaptureMode; #endif // CUDA: CUresult CUDAAPI cuInit(unsigned int Flags); // HIP: hipError_t hipInit(unsigned int flags); // CHECK: hipError_t result = hipInit(flags); CUresult result = cuInit(flags); int driverVersion = 0; // CUDA: CUresult CUDAAPI cuDriverGetVersion(int *driverVersion); // HIP: hipError_t hipDriverGetVersion(int* driverVersion); // CHECK: result = hipDriverGetVersion(&driverVersion); result = cuDriverGetVersion(&driverVersion); int ordinal = 0; // CUDA: CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal); // HIP: hipError_t hipDeviceGet(hipDevice_t* device, int ordinal); // CHECK: result = hipDeviceGet(&device, ordinal); result = cuDeviceGet(&device, ordinal); int pi = 0; // CHECK: hipDeviceAttribute_t device_attribute = hipDeviceAttributePciBusId; CUdevice_attribute device_attribute = CU_DEVICE_ATTRIBUTE_PCI_BUS_ID; // CUDA: CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev); // HIP: hipError_t hipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attr, int deviceId); // CHECK: result = hipDeviceGetAttribute(&pi, device_attribute, device); result = cuDeviceGetAttribute(&pi, device_attribute, device); int count = 0; // CUDA: CUresult CUDAAPI cuDeviceGetCount(int *count); // HIP: hipError_t hipGetDeviceCount(int* count); // CHECK: result = hipGetDeviceCount(&count); result = cuDeviceGetCount(&count); // CUDA: CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev); // HIP: hipError_t hipDeviceTotalMem(size_t* bytes, hipDevice_t device); // CHECK: result = hipDeviceTotalMem(&bytes, device); // CHECK-NEXT: result = hipDeviceTotalMem(&bytes, device); result = cuDeviceTotalMem(&bytes, device); result = cuDeviceTotalMem_v2(&bytes, device); int major = 0, minor = 0; // CUDA: __CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev); // HIP: hipError_t hipDeviceComputeCapability(int* major, int* minor, hipDevice_t device); // CHECK: result = hipDeviceComputeCapability(&major, &minor, device); result = cuDeviceComputeCapability(&major, &minor, device); int active = 0; // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active); // HIP: hipError_t hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int* flags, int* active); // CHECK: result = hipDevicePrimaryCtxGetState(device, &flags, &active); result = cuDevicePrimaryCtxGetState(device, &flags, &active); // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); // HIP: hipError_t hipDevicePrimaryCtxRelease(hipDevice_t dev); // CHECK: result = hipDevicePrimaryCtxRelease(device); result = cuDevicePrimaryCtxRelease(device); #if CUDA_VERSION > 10020 // CHECK: result = hipDevicePrimaryCtxRelease(device); result = cuDevicePrimaryCtxRelease_v2(device); #endif // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); // HIP: hipError_t hipDevicePrimaryCtxReset(hipDevice_t dev); // CHECK: result = hipDevicePrimaryCtxReset(device); result = cuDevicePrimaryCtxReset(device); #if CUDA_VERSION > 10020 // CHECK: result = hipDevicePrimaryCtxReset(device); result = cuDevicePrimaryCtxReset_v2(device); #endif // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev); // HIP: hipError_t hipDevicePrimaryCtxRetain(hipCtx_t* pctx, hipDevice_t dev); // CHECK: result = hipDevicePrimaryCtxRetain(&context, device); result = cuDevicePrimaryCtxRetain(&context, device); // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); // HIP: hipError_t hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags); // CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags); result = cuDevicePrimaryCtxSetFlags(device, flags); #if CUDA_VERSION > 10020 // CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags); result = cuDevicePrimaryCtxSetFlags_v2(device, flags); #endif // CUDA: CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxCreate(hipCtx_t *ctx, unsigned int flags, hipDevice_t device); // CHECK: result = hipCtxCreate(&context, flags, device); // CHECK-NEXT: result = hipCtxCreate(&context, flags, device); result = cuCtxCreate(&context, flags, device); result = cuCtxCreate_v2(&context, flags, device); // CUDA: CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxDestroy(hipCtx_t ctx); // CHECK: result = hipCtxDestroy(context); // CHECK-NEXT: result = hipCtxDestroy(context); result = cuCtxDestroy(context); result = cuCtxDestroy_v2(context); unsigned int version = 0; // CUDA: CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetApiVersion(hipCtx_t ctx, int* apiVersion); // CHECK: result = hipCtxGetApiVersion(context, &version); result = cuCtxGetApiVersion(context, &version); // CUDA: CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCacheConfig(hipFuncCache_t* cacheConfig); // CHECK: result = hipCtxGetCacheConfig(&func_cache); result = cuCtxGetCacheConfig(&func_cache); // CUDA: CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCurrent(hipCtx_t* ctx); // CHECK: result = hipCtxGetCurrent(&context); result = cuCtxGetCurrent(&context); // CUDA: CUresult CUDAAPI cuCtxGetDevice(CUdevice *device); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetDevice(hipDevice_t* device); // CHECK: result = hipCtxGetDevice(&device); result = cuCtxGetDevice(&device); // CUDA: CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetFlags(unsigned int* flags); // CHECK: result = hipCtxGetFlags(&flags); result = cuCtxGetFlags(&flags); size_t pvalue = 0; // CUDA: CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit); // HIP: hipError_t hipDeviceGetLimit(size_t* pValue, enum hipLimit_t limit); // CHECK: result = hipDeviceGetLimit(&pvalue, limit); result = cuCtxGetLimit(&pvalue, limit); // CUDA: CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetSharedMemConfig(hipSharedMemConfig* pConfig); // CHECK: result = hipCtxGetSharedMemConfig(&pconfig); result = cuCtxGetSharedMemConfig(&pconfig); int leastPriority = 0, greatestPriority = 0; // CUDA: CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority); // HIP: hipError_t hipDeviceGetStreamPriorityRange(int* leastPriority, int* greatestPriority); // CHECK: result = hipDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority); result = cuCtxGetStreamPriorityRange(&leastPriority, &greatestPriority); // CUDA: CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPopCurrent(hipCtx_t* ctx); // CHECK: result = hipCtxPopCurrent(&context); // CHECK-NEXT: result = hipCtxPopCurrent(&context); result = cuCtxPopCurrent(&context); result = cuCtxPopCurrent_v2(&context); // CUDA: CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPushCurrent(hipCtx_t ctx); // CHECK: result = hipCtxPushCurrent(context); // CHECK-NEXT: result = hipCtxPushCurrent(context); result = cuCtxPushCurrent(context); result = cuCtxPushCurrent_v2(context); // CUDA: CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCacheConfig(hipFuncCache_t cacheConfig); // CHECK: result = hipCtxSetCacheConfig(func_cache); result = cuCtxSetCacheConfig(func_cache); // CUDA: CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCurrent(hipCtx_t ctx); // CHECK: result = hipCtxSetCurrent(context); result = cuCtxSetCurrent(context); // CUDA: CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetSharedMemConfig(hipSharedMemConfig config); // CHECK: result = hipCtxSetSharedMemConfig(pconfig); result = cuCtxSetSharedMemConfig(pconfig); // CUDA: CUresult CUDAAPI cuCtxSynchronize(void); // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSynchronize(void); // CHECK: result = hipCtxSynchronize(); result = cuCtxSynchronize(); // CUDA: CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name); // HIP: hipError_t hipModuleGetFunction(hipFunction_t* function, hipModule_t module, const char* kname); // CHECK: result = hipModuleGetFunction(&function, module_, name.c_str()); result = cuModuleGetFunction(&function, module_, name.c_str()); // CUDA: CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name); // HIP: hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name); // CHECK: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str()); // CHECK-NEXT: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str()); result = cuModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str()); result = cuModuleGetGlobal_v2(&deviceptr, &bytes, module_, name.c_str()); // CUDA: CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name); // HIP: hipError_t hipModuleGetTexRef(textureReference** texRef, hipModule_t hmod, const char* name); // CHECK: result = hipModuleGetTexRef(&texref, module_, name.c_str()); result = cuModuleGetTexRef(&texref, module_, name.c_str()); // CUDA: CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname); // HIP: hipError_t hipModuleLoad(hipModule_t* module, const char* fname); // CHECK: result = hipModuleLoad(&module_, name.c_str()); result = cuModuleLoad(&module_, name.c_str()); // CUDA: CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image); // HIP: hipError_t hipModuleLoadData(hipModule_t* module, const void* image); // CHECK: result = hipModuleLoadData(&module_, image); result = cuModuleLoadData(&module_, image); unsigned int numOptions = 0; void* optionValues = nullptr; // CUDA: CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); // HIP: hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues); // CHECK: result = hipModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues); result = cuModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues); // CUDA: CUresult CUDAAPI cuModuleUnload(CUmodule hmod); // HIP: hipError_t hipModuleUnload(hipModule_t module); // CHECK: result = hipModuleUnload(module_); result = cuModuleUnload(module_); // CUDA: CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray); // HIP: hipError_t hipArray3DCreate(hipArray** array, const HIP_ARRAY3D_DESCRIPTOR* pAllocateArray); // CHECK: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR); // CHECK-NEXT: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR); result = cuArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR); result = cuArray3DCreate_v2(&array_, &ARRAY3D_DESCRIPTOR); // CUDA: CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray); // HIP: hipError_t hipArrayCreate(hipArray** pHandle, const HIP_ARRAY_DESCRIPTOR* pAllocateArray); // CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR); // CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR); result = cuArrayCreate(&array_, &ARRAY_DESCRIPTOR); result = cuArrayCreate_v2(&array_, &ARRAY_DESCRIPTOR); // CUDA: CUresult CUDAAPI cuArrayDestroy(CUarray hArray); // HIP: hipError_t hipArrayDestroy(hipArray* array); // CHECK: result = hipArrayDestroy(array_); result = cuArrayDestroy(array_); std::string pciBusId; // CUDA: CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId); // HIP: hipError_t hipDeviceGetByPCIBusId(int* device, const char* pciBusId); // CHECK: result = hipDeviceGetByPCIBusId(&device, pciBusId.c_str()); result = cuDeviceGetByPCIBusId(&device, pciBusId.c_str()); int len = 0; char* pciBusId_ = const_cast<char*>(pciBusId.c_str()); // CUDA: CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev); // HIP: hipError_t hipDeviceGetPCIBusId(char* pciBusId, int len, int device); // CHECK: result = hipDeviceGetPCIBusId(pciBusId_, len, device); result = cuDeviceGetPCIBusId(pciBusId_, len, device); // CUDA: CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr); // HIP: hipError_t hipIpcCloseMemHandle(void* devPtr); // CHECK: result = hipIpcCloseMemHandle(deviceptr); result = cuIpcCloseMemHandle(deviceptr); // CUDA: CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event); // HIP: hipError_t hipIpcGetEventHandle(hipIpcEventHandle_t* handle, hipEvent_t event); // CHECK: result = hipIpcGetEventHandle(&ipcEventHandle, event_); result = cuIpcGetEventHandle(&ipcEventHandle, event_); // CUDA: CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr); // HIP: hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr); // CHECK: result = hipIpcGetMemHandle(&ipcMemHandle, deviceptr); result = cuIpcGetMemHandle(&ipcMemHandle, deviceptr); // CUDA: CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle); // HIP: hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle); // CHECK: result = hipIpcOpenEventHandle(&event_, ipcEventHandle); result = cuIpcOpenEventHandle(&event_, ipcEventHandle); // CUDA: CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); // HIP: hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags); // CHECK: result = hipIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags); result = cuIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags); // CUDA: CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize); // HIP: hipError_t hipMalloc(void** ptr, size_t size); // CHECK: result = hipMalloc(&deviceptr, bytes); // CHECK-NEXT: result = hipMalloc(&deviceptr, bytes); result = cuMemAlloc(&deviceptr, bytes); result = cuMemAlloc_v2(&deviceptr, bytes); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////// TODO: Get rid of additional attribute 'unsigned int flags' used by HIP without a default value /////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // CUDA: CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize); // HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags); // TODO: should be hipHostAlloc(&image, bytes, 0); // CHECK: result = hipHostAlloc(&image, bytes); // CHECK-NEXT: result = hipHostAlloc(&image, bytes); result = cuMemAllocHost(&image, bytes); result = cuMemAllocHost_v2(&image, bytes); // CUDA: CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags); // HIP: hipError_t hipMallocManaged(void** dev_ptr, size_t size, unsigned int flags __dparm(hipMemAttachGlobal)); // CHECK: result = hipMallocManaged(&deviceptr, bytes, flags); result = cuMemAllocManaged(&deviceptr, bytes, flags); size_t pitch = 0, width = 0, height = 0; // CUDA: CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); // HIP: hipError_t hipMemAllocPitch(hipDeviceptr_t* dptr, size_t* pitch, size_t widthInBytes, size_t height, unsigned int elementSizeBytes); // CHECK: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes); // CHECK-NEXT: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes); result = cuMemAllocPitch(&deviceptr, &pitch, width, height, bytes); result = cuMemAllocPitch_v2(&deviceptr, &pitch, width, height, bytes); // CUDA: CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy); // HIP: hipError_t hipMemcpyParam2D(const hip_Memcpy2D* pCopy); // CHECK: result = hipMemcpyParam2D(&MEMCPY2D); // CHECK-NEXT: result = hipMemcpyParam2D(&MEMCPY2D); result = cuMemcpy2D(&MEMCPY2D); result = cuMemcpy2D_v2(&MEMCPY2D); // CUDA: CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream); // HIP: hipError_t hipMemcpyParam2DAsync(const hip_Memcpy2D* pCopy, hipStream_t stream __dparm(0)); // CHECK: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream); // CHECK-NEXT: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream); result = cuMemcpy2DAsync(&MEMCPY2D, stream); result = cuMemcpy2DAsync_v2(&MEMCPY2D, stream); // CUDA: CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy); // HIP: hipError_t hipDrvMemcpy2DUnaligned(const hip_Memcpy2D* pCopy); // CHECK: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D); // CHECK-NEXT: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D); result = cuMemcpy2DUnaligned(&MEMCPY2D); result = cuMemcpy2DUnaligned_v2(&MEMCPY2D); // CUDA: CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy); // HIP: hipError_t hipDrvMemcpy3D(const HIP_MEMCPY3D* pCopy); // CHECK: result = hipDrvMemcpy3D(&MEMCPY3D); // CHECK-NEXT: result = hipDrvMemcpy3D(&MEMCPY3D); result = cuMemcpy3D(&MEMCPY3D); result = cuMemcpy3D_v2(&MEMCPY3D); // CUDA: CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream); // HIP: hipError_t hipDrvMemcpy3DAsync(const HIP_MEMCPY3D* pCopy, hipStream_t stream); // CHECK: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream); // CHECK-NEXT: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream); result = cuMemcpy3DAsync(&MEMCPY3D, stream); result = cuMemcpy3DAsync_v2(&MEMCPY3D, stream); void* dsthost = nullptr; size_t offset = 0; // CUDA: CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); // HIP: hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count); // CHECK: result = hipMemcpyAtoH(dsthost, array_, offset, bytes); // CHECK-NEXT: result = hipMemcpyAtoH(dsthost, array_, offset, bytes); result = cuMemcpyAtoH(dsthost, array_, offset, bytes); result = cuMemcpyAtoH_v2(dsthost, array_, offset, bytes); // CUDA: CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); // HIP: hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes); // CHECK: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes); // CHECK-NEXT: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes); result = cuMemcpyDtoD(deviceptr, deviceptr, bytes); result = cuMemcpyDtoD_v2(deviceptr, deviceptr, bytes); // CUDA: CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); // HIP: hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream); // CHECK: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream); // CHECK-NEXT: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream); result = cuMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream); result = cuMemcpyDtoDAsync_v2(deviceptr, deviceptr, bytes, stream); // CUDA: CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); // HIP: hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t sizeBytes); // CHECK: result = hipMemcpyDtoH(dsthost, deviceptr, bytes); // CHECK-NEXT: result = hipMemcpyDtoH(dsthost, deviceptr, bytes); result = cuMemcpyDtoH(dsthost, deviceptr, bytes); result = cuMemcpyDtoH_v2(dsthost, deviceptr, bytes); // CUDA: CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); // HIP: hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream); // CHECK: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream); // CHECK-NEXT: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream); result = cuMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream); result = cuMemcpyDtoHAsync_v2(dsthost, deviceptr, bytes, stream); // CUDA: CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); // HIP: hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count); // CHECK: result = hipMemcpyHtoA(array_, offset, dsthost, bytes); // CHECK-NEXT: result = hipMemcpyHtoA(array_, offset, dsthost, bytes); result = cuMemcpyHtoA(array_, offset, dsthost, bytes); result = cuMemcpyHtoA_v2(array_, offset, dsthost, bytes); // CUDA: CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); // HIP: hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t sizeBytes); // CHECK: result = hipMemcpyHtoD(deviceptr, dsthost, bytes); // CHECK-NEXT: result = hipMemcpyHtoD(deviceptr, dsthost, bytes); result = cuMemcpyHtoD(deviceptr, dsthost, bytes); result = cuMemcpyHtoD_v2(deviceptr, dsthost, bytes); // CUDA: CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); // HIP: hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t sizeBytes, hipStream_t stream); // CHECK: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream); // CHECK-NEXT: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream); result = cuMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream); result = cuMemcpyHtoDAsync_v2(deviceptr, dsthost, bytes, stream); // CUDA: CUresult CUDAAPI cuMemFree(CUdeviceptr dptr); // HIP: hipError_t hipFree(void* ptr); // CHECK: result = hipFree(deviceptr); // CHECK-NEXT: result = hipFree(deviceptr); result = cuMemFree(deviceptr); result = cuMemFree_v2(deviceptr); // CUDA: CUresult CUDAAPI cuMemFreeHost(void *p); // HIP: hipError_t hipHostFree(void* ptr); // CHECK: result = hipHostFree(image); result = cuMemFreeHost(image); // CUDA: CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr); // HIP: hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr); // CHECK: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2); // CHECK-NEXT: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2); result = cuMemGetAddressRange(&deviceptr, &bytes, deviceptr_2); result = cuMemGetAddressRange_v2(&deviceptr, &bytes, deviceptr_2); // CUDA: CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total); // HIP: hipError_t hipMemGetInfo(size_t* free, size_t* total); // CHECK: result = hipMemGetInfo(&bytes, &bytes_2); // CHECK-NEXT: result = hipMemGetInfo(&bytes, &bytes_2); result = cuMemGetInfo(&bytes, &bytes_2); result = cuMemGetInfo_v2(&bytes, &bytes_2); // CUDA: CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags); // HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags); // CHECK: result = hipHostAlloc(&image, bytes, flags); result = cuMemHostAlloc(&image, bytes, flags); // CUDA: CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags); // HIP: hipError_t hipHostGetDevicePointer(void** devPtr, void* hstPtr, unsigned int flags); // CHECK: result = hipHostGetDevicePointer(&deviceptr, image, flags); // CHECK-NEXT: result = hipHostGetDevicePointer(&deviceptr, image, flags); result = cuMemHostGetDevicePointer(&deviceptr, image, flags); result = cuMemHostGetDevicePointer_v2(&deviceptr, image, flags); // CUDA: CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p); // HIP: hipError_t hipHostGetFlags(&flags, image); // CHECK: result = hipHostGetFlags(&flags, image); result = cuMemHostGetFlags(&flags, image); // CUDA: CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); // HIP: hipError_t hipHostRegister(void* hostPtr, size_t sizeBytes, unsigned int flags); // CHECK: result = hipHostRegister(image, bytes, flags); // CHECK-NEXT: result = hipHostRegister(image, bytes, flags); result = cuMemHostRegister(image, bytes, flags); result = cuMemHostRegister_v2(image, bytes, flags); // CUDA: CUresult CUDAAPI cuMemHostUnregister(void *p); // HIP: hipError_t hipHostUnregister(void* hostPtr); // CHECK: result = hipHostUnregister(image); result = cuMemHostUnregister(image); unsigned short us = 0; // CUDA: CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N); // HIP: hipError_t hipMemsetD16(hipDeviceptr_t dest, unsigned short value, size_t count); // CHECK: result = hipMemsetD16(deviceptr, us, bytes); // CHECK-NEXT: result = hipMemsetD16(deviceptr, us, bytes); result = cuMemsetD16(deviceptr, us, bytes); result = cuMemsetD16_v2(deviceptr, us, bytes); // CUDA: CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); // HIP: hipError_t hipMemsetD16Async(hipDeviceptr_t dest, unsigned short value, size_t count, hipStream_t stream __dparm(0)); // CHECK: result = hipMemsetD16Async(deviceptr, us, bytes, stream); result = cuMemsetD16Async(deviceptr, us, bytes, stream); // CUDA: CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N) // HIP: hipError_t hipMemsetD32(hipDeviceptr_t dest, int value, size_t count); // CHECK: result = hipMemsetD32(deviceptr, flags, bytes); // CHECK-NEXT: result = hipMemsetD32(deviceptr, flags, bytes); result = cuMemsetD32(deviceptr, flags, bytes); result = cuMemsetD32_v2(deviceptr, flags, bytes); // CUDA: CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); // HIP: hipError_t hipMemsetD32Async(hipDeviceptr_t dst, int value, size_t count, hipStream_t stream __dparm(0)); // CHECK: result = hipMemsetD32Async(deviceptr, flags, bytes, stream); result = cuMemsetD32Async(deviceptr, flags, bytes, stream); unsigned char uc = 0; // CUDA: CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N); // HIP: hipError_t hipMemsetD8(hipDeviceptr_t dest, unsigned char value, size_t count); // CHECK: result = hipMemsetD8(deviceptr, uc, bytes); // CHECK-NEXT: result = hipMemsetD8(deviceptr, uc, bytes); result = cuMemsetD8(deviceptr, uc, bytes); result = cuMemsetD8_v2(deviceptr, uc, bytes); // CUDA: CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); // HIP: hipError_t hipMemsetD8Async(hipDeviceptr_t dest, unsigned char value, size_t count, hipStream_t stream __dparm(0)); // CHECK: result = hipMemsetD8Async(deviceptr, uc, bytes, stream); result = cuMemsetD8Async(deviceptr, uc, bytes, stream); // CUDA: CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels); // HIP: hipError_t hipMipmappedArrayCreate(hipMipmappedArray_t* pHandle, HIP_ARRAY3D_DESCRIPTOR* pMipmappedArrayDesc, unsigned int numMipmapLevels); // CHECK: result = hipMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags); result = cuMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags); // CUDA: CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray); // HIP: hipError_t hipMipmappedArrayDestroy(hipMipmappedArray_t hMipmappedArray); // CHECK: result = hipMipmappedArrayDestroy(mipmappedArray); result = cuMipmappedArrayDestroy(mipmappedArray); // CUDA: CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); // HIP: hipError_t hipMipmappedArrayGetLevel(hipArray_t* pLevelArray, hipMipmappedArray_t hMipMappedArray, unsigned int level); // CHECK: result = hipMipmappedArrayGetLevel(&array_, mipmappedArray, flags); result = cuMipmappedArrayGetLevel(&array_, mipmappedArray, flags); #if CUDA_VERSION > 7050 // CUDA: CUresult CUDAAPI cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device); // HIP: hipError_t hipMemAdvise(const void* dev_ptr, size_t count, hipMemoryAdvise advice, int device); // CHECK: result = hipMemAdvise(deviceptr, bytes, MemoryAdvise, device); result = cuMemAdvise(deviceptr, bytes, MemoryAdvise, device); // CUDA: CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); // HIP: hipError_t hipMemPrefetchAsync(const void* dev_ptr, size_t count, int device, hipStream_t stream __dparm(0)); // CHECK: result = hipMemPrefetchAsync(deviceptr, bytes, device, stream); result = cuMemPrefetchAsync(deviceptr, bytes, device, stream); // CUDA: CUresult CUDAAPI cuMemRangeGetAttribute(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count); // HIP: hipError_t hipMemRangeGetAttribute(void* data, size_t data_size, hipMemRangeAttribute attribute, const void* dev_ptr, size_t count); // CHECK: result = hipMemRangeGetAttribute(image, bytes, MemoryRangeAttribute, deviceptr, bytes); result = cuMemRangeGetAttribute(image, bytes, MemoryRangeAttribute, deviceptr, bytes); // CUDA: CUresult CUDAAPI cuMemRangeGetAttributes(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count); // HIP: hipError_t hipMemRangeGetAttributes(void** data, size_t* data_sizes, hipMemRangeAttribute* attributes, size_t num_attributes, const void* dev_ptr, size_t count); // CHECK: result = hipMemRangeGetAttributes(&image, &bytes, &MemoryRangeAttribute, bytes, deviceptr, bytes); result = cuMemRangeGetAttributes(&image, &bytes, &MemoryRangeAttribute, bytes, deviceptr, bytes); #endif // CUDA: CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); // HIP: hipError_t hipStreamAddCallback(hipStream_t stream, hipStreamCallback_t callback, void* userData, unsigned int flags); // CHECK: result = hipStreamAddCallback(stream, streamCallback, image, flags); result = cuStreamAddCallback(stream, streamCallback, image, flags); // CUDA: CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); // HIP: hipError_t hipStreamAttachMemAsync(hipStream_t stream, void* dev_ptr, size_t length __dparm(0), unsigned int flags __dparm(hipMemAttachSingle)); // CHECK: result = hipStreamAttachMemAsync(stream, deviceptr, bytes, flags); result = cuStreamAttachMemAsync(stream, deviceptr, bytes, flags); #if CUDA_VERSION > 10000 // CUDA: CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode); // HIP: hipError_t hipStreamBeginCapture(hipStream_t stream, hipStreamCaptureMode mode); // CHECK: result = hipStreamBeginCapture(stream, streamCaptureMode); // CHECK-NEXT: result = hipStreamBeginCapture(stream, streamCaptureMode); result = cuStreamBeginCapture(stream, streamCaptureMode); result = cuStreamBeginCapture_v2(stream, streamCaptureMode); #endif // CUDA: CUresult CUDAAPI cuStreamCreate(CUstream *phStream, unsigned int Flags); // HIP: hipError_t hipStreamCreateWithFlags(hipStream_t* stream, unsigned int flags); // CHECK: result = hipStreamCreateWithFlags(&stream, flags); result = cuStreamCreate(&stream, flags); // CUDA: CUresult CUDAAPI cuStreamCreateWithPriority(CUstream *phStream, unsigned int flags, int priority); // HIP: hipError_t hipStreamCreateWithPriority(hipStream_t* stream, unsigned int flags, int priority); // CHECK: result = hipStreamCreateWithPriority(&stream, flags, leastPriority); result = cuStreamCreateWithPriority(&stream, flags, leastPriority); // CUDA: CUresult CUDAAPI cuStreamDestroy(CUstream hStream); // HIP: hipError_t hipStreamDestroy(hipStream_t stream); // CHECK: result = hipStreamDestroy(stream); // CHECK-NEXT: result = hipStreamDestroy(stream); result = cuStreamDestroy(stream); result = cuStreamDestroy_v2(stream); #if CUDA_VERSION > 9020 // CUDA: CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); // HIP: hipError_t hipStreamEndCapture(hipStream_t stream, hipGraph_t* pGraph); // CHECK: result = hipStreamEndCapture(stream, &graph); result = cuStreamEndCapture(stream, &graph); #endif // CUDA: CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); // HIP: hipError_t hipStreamGetFlags(hipStream_t stream, unsigned int* flags); // CHECK: result = hipStreamGetFlags(stream, &flags); result = cuStreamGetFlags(stream, &flags); // CUDA: CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); // HIP: hipError_t hipStreamGetPriority(hipStream_t stream, int* priority); // CHECK: result = hipStreamGetPriority(stream, &leastPriority); result = cuStreamGetPriority(stream, &leastPriority); // CUDA: CUresult CUDAAPI cuStreamQuery(CUstream hStream); // HIP: hipError_t hipStreamQuery(hipStream_t stream); // CHECK: result = hipStreamQuery(stream); result = cuStreamQuery(stream); // CUDA: CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); // HIP: hipError_t hipStreamSynchronize(hipStream_t stream); // CHECK: result = hipStreamSynchronize(stream); result = cuStreamSynchronize(stream); // CUDA: CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); // HIP: hipError_t hipStreamWaitEvent(hipStream_t stream, hipEvent_t event, unsigned int flags); // CHECK: result = hipStreamWaitEvent(stream, event_, flags); result = cuStreamWaitEvent(stream, event_, flags); return 0; }
the_stack
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #include "../../utils.h" #define eps 1e-10 #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 32 #define BLOCK_SIZE 1024 #define NUM_SHARED_FACES 256 #define FULL_MASK 0xffffffff namespace kaolin { template <typename scalar_t> __global__ void deftet_sparse_render_forward_cuda_kernel( const scalar_t *__restrict__ face_vertices_z, const scalar_t *__restrict__ face_vertices_image, const scalar_t *__restrict__ face_bboxes, const scalar_t *__restrict__ pixel_coords, const scalar_t *__restrict__ depth_limits, int64_t *__restrict__ face_ids, scalar_t *__restrict__ pixel_depths, scalar_t *__restrict__ w0_arr, scalar_t *__restrict__ w1_arr, int batch_size, int num_faces, int num_pixels, int knum) { scalar_t x0, y0, min_depth, max_depth; // prefix mask is a mask representing the threadIdx.x // example: for threadIdx.x == 2 then the binary mask is // 0000 0000 0000 0000 0000 0000 0000 0011 unsigned prefix_mask = 0; #pragma unroll for (int i = 0; i < BLOCK_SIZE_X; i++) { if (i < threadIdx.x) { prefix_mask = ((prefix_mask << 1) + 1); } } const int threadGlobIdx = threadIdx.x + threadIdx.y * blockDim.x; __shared__ scalar_t shm_face_bboxes[32][5][8]; for (int start_batch_idx = 0; start_batch_idx < batch_size; start_batch_idx += gridDim.x) { const int batch_idx = start_batch_idx + blockIdx.x; // threads with the same threadIdx.x (within the same warp) // shared the same pixel for (int start_pixel_idx = 0; start_pixel_idx < num_pixels; start_pixel_idx += blockDim.y * gridDim.y) { const int pixel_idx = start_pixel_idx + blockIdx.y * blockDim.y + threadIdx.y; const bool is_active_pixel = pixel_idx < num_pixels; const int main_idx = batch_idx * num_pixels + pixel_idx; const int pixel_coords_idx = main_idx * 2; if (is_active_pixel) { // TODO(cfujitsang): could also vectorize this load (Also ILP) // pixel coordinates x0 = pixel_coords[pixel_coords_idx + 0]; y0 = pixel_coords[pixel_coords_idx + 1]; min_depth = depth_limits[pixel_coords_idx + 0]; max_depth = depth_limits[pixel_coords_idx + 1]; } int num_depths = 0; // Here we are batching load from face_bboxes to maximize bandwidth // we are loading 1 bboxe per thread (1024) // start_face_idx is the first index of the current load batch within the mesh // (i.e: 0 means first face of a mesh) for (int start_face_idx = 0; start_face_idx < num_faces; start_face_idx += NUM_SHARED_FACES) { // _start_idx is the index of the start_face_idx within the whole batch of mesh // (i.e: 0 means first face of the first mesh) const int _start_idx = batch_idx * num_faces + start_face_idx; __syncthreads(); const int pointsbbox_idx = threadGlobIdx + _start_idx * 4; // All the load are coalescent we use a shared memory of dims [32][5][8] // to avoid bank conflicts in the second step if (threadGlobIdx + start_face_idx * 4 < num_faces * 4) { shm_face_bboxes[threadIdx.y][threadIdx.x % 4][(threadIdx.x - (threadIdx.x % 4)) / 4] = face_bboxes[pointsbbox_idx]; } __syncthreads(); // We needed all the threads to be active for loading face_bboxes // in shared memory but now we can skip the computation // if the thread is not processing any pixel. if (!(is_active_pixel)) { continue; } #pragma unroll for (int sub_start_face_idx = 0; sub_start_face_idx < NUM_SHARED_FACES; sub_start_face_idx += blockDim.x) { const int i = sub_start_face_idx + threadIdx.x; const int last_idx = i % 8; const int first_idx = (i - last_idx) / 8; scalar_t w0, w1, w2, pixel_depth; bool is_intersecting = false; const int face_idx = start_face_idx + i; if (face_idx < num_faces) { const int shift1 = batch_idx * num_faces + face_idx; const int shift6 = shift1 * 6; const scalar_t xmin = shm_face_bboxes[first_idx][0][last_idx]; const scalar_t ymin = shm_face_bboxes[first_idx][1][last_idx]; const scalar_t xmax = shm_face_bboxes[first_idx][2][last_idx]; const scalar_t ymax = shm_face_bboxes[first_idx][3][last_idx]; // Is the pixel covered by the bounding box? // [min, max) if (x0 >= xmin && x0 < xmax && y0 >= ymin && y0 < ymax) { const scalar_t ax = face_vertices_image[shift6 + 0]; const scalar_t ay = face_vertices_image[shift6 + 1]; const scalar_t bx = face_vertices_image[shift6 + 2]; const scalar_t by = face_vertices_image[shift6 + 3]; const scalar_t cx = face_vertices_image[shift6 + 4]; const scalar_t cy = face_vertices_image[shift6 + 5]; // Compute barycenter weights for the intersection // TODO(cfujitsang): can we use tensorcore for some of this? scalar_t m = bx - ax; scalar_t p = by - ay; scalar_t n = cx - ax; scalar_t q = cy - ay; scalar_t s = x0 - ax; scalar_t t = y0 - ay; scalar_t k1 = s * q - n * t; scalar_t k2 = m * t - s * p; scalar_t k3 = m * q - n * p; w1 = k1 / (k3 + eps); w2 = k2 / (k3 + eps); w0 = 1.0 - w1 - w2; // Is the pixel covered by the face? if (w0 >= 0 && w1 >= 0 && w2 >= 0) { // Here we are computing intersection depth // we can use either distance from camera or // distance from image plan as it won't affect ordering const int shift3 = shift1 * 3; scalar_t az = face_vertices_z[shift3 + 0]; scalar_t bz = face_vertices_z[shift3 + 1]; scalar_t cz = face_vertices_z[shift3 + 2]; // TODO(cfujitsang): can we use tensorcore ? pixel_depth = w0 * az + w1 * bz + w2 * cz; if (pixel_depth < max_depth && pixel_depth >= min_depth && num_depths < knum) { is_intersecting = true; } } } } // Since warp are sharing the faces for the same pixel we are sharing // the information of what thread have an intersection to render // this information is stored as a mask in intersection_mask unsigned intersection_mask = __ballot_sync(FULL_MASK, is_intersecting); int num_inserted = __popc(intersection_mask); if (is_intersecting) { // With the intersection mask we can compute insertion position for each threads // so we ensure that insertion is coalescent without holes // example: // if threadIdx.x 0 and 2 have an intersection to render but not threadIdx.x 1, // then threadIdx.x 0 and 2 should intersect in two consecutive addresses. unsigned prefix_intersection_mask = intersection_mask & prefix_mask; int insertion_idx = num_depths + __popc(prefix_intersection_mask); if (insertion_idx < knum) { int true_insertion_idx = insertion_idx + (batch_idx * num_pixels + pixel_idx) * knum; face_ids[true_insertion_idx] = face_idx; w0_arr[true_insertion_idx] = w0; w1_arr[true_insertion_idx] = w1; pixel_depths[true_insertion_idx] = pixel_depth; } } num_depths += num_inserted; } } } } } void deftet_sparse_render_forward_cuda_impl( at::Tensor face_vertices_z, at::Tensor face_vertices_image, at::Tensor face_bboxes, at::Tensor pixel_coords, at::Tensor pixel_depth_ranges, at::Tensor selected_face_idx, at::Tensor pixel_depths, at::Tensor w0_arr, at::Tensor w1_arr) { int batch_size = face_vertices_z.size(0); int num_faces = face_vertices_z.size(1); int num_pixels = selected_face_idx.size(1); int knum = selected_face_idx.size(2); const int num_thread_per_pixel = BLOCK_SIZE_X; const int num_pixel_per_block = BLOCK_SIZE_Y; const int num_block_per_sample = (num_pixels + num_pixel_per_block - 1) / num_pixel_per_block; const dim3 threads(num_thread_per_pixel, num_pixel_per_block, 1); const dim3 blocks(batch_size, num_block_per_sample, 1); AT_DISPATCH_FLOATING_TYPES(face_vertices_z.scalar_type(), "deftet_sparse_render_forward_cuda", ([&] { deftet_sparse_render_forward_cuda_kernel<scalar_t><<<blocks, threads>>>( face_vertices_z.data_ptr<scalar_t>(), face_vertices_image.data_ptr<scalar_t>(), face_bboxes.data_ptr<scalar_t>(), pixel_coords.data_ptr<scalar_t>(), pixel_depth_ranges.data_ptr<scalar_t>(), selected_face_idx.data_ptr<int64_t>(), pixel_depths.data_ptr<scalar_t>(), w0_arr.data_ptr<scalar_t>(), w1_arr.data_ptr<scalar_t>(), batch_size, num_faces, num_pixels, knum); CUDA_CHECK(cudaGetLastError()); })); return; } template <typename scalar_t> __global__ void deftet_sparse_render_backward_cuda_kernel( const scalar_t *__restrict__ grad_interpolated_features, const int64_t *__restrict__ face_ids, const scalar_t *__restrict__ weights, const scalar_t *__restrict__ face_vertices_image, const scalar_t *__restrict__ face_features, scalar_t *__restrict__ grad_face_vertices_image, scalar_t *__restrict__ grad_face_features, int batch_size, int num_faces, int num_pixels, int knum, int feat_dim) { // Each iteration is treating a single feature of a single pixel for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < batch_size * num_pixels * knum; idx += blockDim.x * gridDim.x) { const int k_idx = idx % knum; const int true_pixel_idx = (idx - k_idx) / knum; const int pixel_idx = true_pixel_idx % num_pixels; const int batch_idx = (true_pixel_idx - pixel_idx) / num_pixels; const int start_weight_idx = idx * 3; const int start_feat_idx = idx * feat_dim; const int face_idx = face_ids[idx]; if (face_idx >= 0) { const int true_face_idx = batch_idx * num_faces + face_idx; const int start_image_idx = true_face_idx * 6; const int start_features_idx = true_face_idx * 3 * feat_dim; // gradient of face_features #pragma unroll for (int ii = 0; ii < 3; ii++) { scalar_t w = weights[start_weight_idx + ii]; int pointshift = start_features_idx + ii * feat_dim; for (int feat_idx = 0; feat_idx < feat_dim; feat_idx++) { int colorshift = pointshift + feat_idx; // this should be atomic operation scalar_t *addr = grad_face_features + colorshift; scalar_t val = grad_interpolated_features[start_feat_idx + feat_idx] * w; atomicAdd(addr, val); } } // gradient of points // here, we calculate dl/dp // dl/dp = dldI * dI/dp // dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp scalar_t ax = face_vertices_image[start_image_idx + 0]; scalar_t ay = face_vertices_image[start_image_idx + 1]; scalar_t bx = face_vertices_image[start_image_idx + 2]; scalar_t by = face_vertices_image[start_image_idx + 3]; scalar_t cx = face_vertices_image[start_image_idx + 4]; scalar_t cy = face_vertices_image[start_image_idx + 5]; scalar_t aw = weights[start_weight_idx + 0]; scalar_t bw = weights[start_weight_idx + 1]; scalar_t cw = weights[start_weight_idx + 2]; scalar_t x0 = aw * ax + bw * bx + cw * cx; scalar_t y0 = aw * ay + bw * by + cw * cy; scalar_t m = bx - ax; scalar_t p = by - ay; scalar_t n = cx - ax; scalar_t q = cy - ay; scalar_t s = x0 - ax; scalar_t t = y0 - ay; // m * w1 + n * w2 = s // p * w1 + q * w2 = t // w1 = (sq - nt) / (mq - np) // w2 = (mt - sp) / (mq - np) scalar_t k1 = s * q - n * t; scalar_t k2 = m * t - s * p; scalar_t k3 = m * q - n * p + eps; scalar_t dk1dm = 0; scalar_t dk1dn = -t; scalar_t dk1dp = 0; scalar_t dk1dq = s; scalar_t dk1ds = q; scalar_t dk1dt = -n; scalar_t dk2dm = t; scalar_t dk2dn = 0; scalar_t dk2dp = -s; scalar_t dk2dq = 0; scalar_t dk2ds = -p; scalar_t dk2dt = m; scalar_t dk3dm = q; scalar_t dk3dn = -p; scalar_t dk3dp = -n; scalar_t dk3dq = m; scalar_t dk3ds = 0; scalar_t dk3dt = 0; // w1 = k1 / k3 // w2 = k2 / k3 // we need divide k3 ^ 2 scalar_t dw1dm = dk1dm * k3 - dk3dm * k1; scalar_t dw1dn = dk1dn * k3 - dk3dn * k1; scalar_t dw1dp = dk1dp * k3 - dk3dp * k1; scalar_t dw1dq = dk1dq * k3 - dk3dq * k1; scalar_t dw1ds = dk1ds * k3 - dk3ds * k1; scalar_t dw1dt = dk1dt * k3 - dk3dt * k1; scalar_t dw2dm = dk2dm * k3 - dk3dm * k2; scalar_t dw2dn = dk2dn * k3 - dk3dn * k2; scalar_t dw2dp = dk2dp * k3 - dk3dp * k2; scalar_t dw2dq = dk2dq * k3 - dk3dq * k2; scalar_t dw2ds = dk2ds * k3 - dk3ds * k2; scalar_t dw2dt = dk2dt * k3 - dk3dt * k2; scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds); scalar_t dw1day = -(dw1dp + dw1dq + dw1dt); scalar_t dw1dbx = dw1dm; scalar_t dw1dby = dw1dp; scalar_t dw1dcx = dw1dn; scalar_t dw1dcy = dw1dq; scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds); scalar_t dw2day = -(dw2dp + dw2dq + dw2dt); scalar_t dw2dbx = dw2dm; scalar_t dw2dby = dw2dp; scalar_t dw2dcx = dw2dn; scalar_t dw2dcy = dw2dq; for (int feat_idx = 0; feat_idx < feat_dim; feat_idx++) { scalar_t c0 = face_features[start_features_idx + feat_idx]; scalar_t c1 = face_features[start_features_idx + feat_dim + feat_idx]; scalar_t c2 = face_features[start_features_idx + feat_dim + feat_dim + feat_idx]; scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax; scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day; scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx; scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby; scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx; scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy; scalar_t dldI = grad_interpolated_features[start_feat_idx + feat_idx] / (k3 * k3); atomicAdd(grad_face_vertices_image + start_image_idx + 0, dldI * dIdax); atomicAdd(grad_face_vertices_image + start_image_idx + 1, dldI * dIday); atomicAdd(grad_face_vertices_image + start_image_idx + 2, dldI * dIdbx); atomicAdd(grad_face_vertices_image + start_image_idx + 3, dldI * dIdby); atomicAdd(grad_face_vertices_image + start_image_idx + 4, dldI * dIdcx); atomicAdd(grad_face_vertices_image + start_image_idx + 5, dldI * dIdcy); } } } } void deftet_sparse_render_backward_cuda_impl( at::Tensor grad_interpolated_features, at::Tensor face_idx, at::Tensor weights, at::Tensor face_vertices_image, at::Tensor face_features, at::Tensor grad_face_vertices_image, at::Tensor grad_face_features) { int batch_size = grad_interpolated_features.size(0); int num_pixels = grad_interpolated_features.size(1); int knum = grad_interpolated_features.size(2); int feat_dim = grad_interpolated_features.size(3); int num_faces = grad_face_vertices_image.size(1); // for bxhxw image size const int threads = 512; const int totalthread = batch_size * num_pixels * knum; const int blocks = (totalthread + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES(grad_interpolated_features.scalar_type(), "deftet_sparse_render_backward_cuda", ([&] { deftet_sparse_render_backward_cuda_kernel<scalar_t><<<blocks, threads>>>( grad_interpolated_features.data_ptr<scalar_t>(), face_idx.data_ptr<int64_t>(), weights.data_ptr<scalar_t>(), face_vertices_image.data_ptr<scalar_t>(), face_features.data_ptr<scalar_t>(), grad_face_vertices_image.data_ptr<scalar_t>(), grad_face_features.data_ptr<scalar_t>(), batch_size, num_faces, num_pixels, knum, feat_dim); CUDA_CHECK(cudaGetLastError()); }) ); } } // namespace kaolin
the_stack
#include <isce3/core/Constants.h> #include <isce3/core/DateTime.h> #include <isce3/core/Ellipsoid.h> #include <isce3/core/LookSide.h> #include <isce3/core/LUT1d.h> #include <isce3/core/Peg.h> #include <isce3/core/Pegtrans.h> #include <isce3/core/Projections.h> #include <isce3/geometry/DEMInterpolator.h> #include <isce3/geometry/RTC.h> #include <isce3/geometry/Topo.h> #include <isce3/product/Product.h> #include <isce3/cuda/core/gpuLUT1d.h> #include <isce3/cuda/core/Orbit.h> #include <isce3/cuda/core/OrbitView.h> #include <isce3/cuda/except/Error.h> #include <isce3/cuda/geometry/gpuGeometry.h> #include <isce3/cuda/geometry/gpuDEMInterpolator.h> __constant__ double start, r0, pixazm, dr; __constant__ float xbound, ybound; using isce3::core::avgLUT2dToLUT1d; using isce3::core::OrbitInterpBorderMode; using isce3::core::Vec3; using isce3::core::Mat3; __global__ void facet(float* out, size_t xmax, size_t ymax, float upsample_factor, isce3::cuda::geometry::gpuDEMInterpolator dem_interp, isce3::core::Ellipsoid ellps, isce3::cuda::core::OrbitView orbit, isce3::cuda::core::gpuLUT1d<double> dop, size_t width, double wavelength, isce3::core::LookSide side) { size_t xidx = threadIdx.x + blockIdx.x * blockDim.x; size_t yidx = threadIdx.y + blockIdx.y * blockDim.y; // Current y-coord in DEM const double dem_y0 = dem_interp.yStart() + yidx * dem_interp.deltaY() / upsample_factor; const double dem_y1 = dem_y0 + dem_interp.deltaY() / upsample_factor; const double dem_ymid = dem_interp.yStart() + (0.5 + yidx) * dem_interp.deltaY() / upsample_factor; Vec3 lookXYZ; const double dem_xmid = dem_interp.xStart() + dem_interp.deltaX() * (xidx + 0.5) / upsample_factor; double a, r; Vec3 inputLLH; const Vec3 inputDEM { dem_xmid, dem_ymid, dem_interp.interpolateXY(dem_xmid, dem_ymid) }; int epsgcode = dem_interp.epsgCode(); isce3::cuda::core::projInverse(epsgcode, inputDEM, inputLLH); isce3::cuda::geometry::geo2rdr(inputLLH, ellps, orbit, dop, &a, &r, wavelength, side, 1e-4, 20, 1e-4); const float azpix = (a - start) / pixazm; const float ranpix = (r - r0) / dr; // Establish bounds for bilinear weighting model const float x1 = std::floor(ranpix); const float x2 = x1 + 1.; const float y1 = std::floor(azpix); const float y2 = y1 + 1.; // Check to see if pixel lies in valid RDC range if (ranpix < 0.0 or x2 > xbound or azpix < 0.0 or y2 > ybound) return; // Current x-coord in DEM const double dem_x0 = dem_interp.xStart() + dem_interp.deltaX() * xidx / upsample_factor; const double dem_x1 = dem_x0 + dem_interp.deltaX() / upsample_factor; // Set DEM-coordinate corner vectors const Vec3 dem00 {dem_x0, dem_y0, dem_interp.interpolateXY(dem_x0, dem_y0)}; const Vec3 dem01 {dem_x0, dem_y1, dem_interp.interpolateXY(dem_x0, dem_y1)}; const Vec3 dem10 {dem_x1, dem_y0, dem_interp.interpolateXY(dem_x1, dem_y0)}; const Vec3 dem11 {dem_x1, dem_y1, dem_interp.interpolateXY(dem_x1, dem_y1)}; // Get LLH corner vectors Vec3 llh00, llh01, llh10, llh11; isce3::cuda::core::projInverse(epsgcode, dem00, llh00); isce3::cuda::core::projInverse(epsgcode, dem01, llh01); isce3::cuda::core::projInverse(epsgcode, dem10, llh10); isce3::cuda::core::projInverse(epsgcode, dem11, llh11); // Convert to XYZ const Vec3 xyz00 = ellps.lonLatToXyz(llh00); const Vec3 xyz01 = ellps.lonLatToXyz(llh01); const Vec3 xyz10 = ellps.lonLatToXyz(llh10); const Vec3 xyz11 = ellps.lonLatToXyz(llh11); // Compute normal vectors for each facet const Vec3 normalFacet1 = isce3::core::normalPlane(xyz00, xyz10, xyz01); const Vec3 normalFacet2 = isce3::core::normalPlane(xyz01, xyz10, xyz11); // Side lengths const double p00_01 = (xyz00 - xyz01).norm(); const double p00_10 = (xyz00 - xyz10).norm(); const double p10_01 = (xyz10 - xyz01).norm(); const double p11_01 = (xyz11 - xyz01).norm(); const double p11_10 = (xyz11 - xyz10).norm(); // Semi-perimeters const float h1 = 0.5 * (p00_01 + p00_10 + p10_01); const float h2 = 0.5 * (p11_01 + p11_10 + p10_01); // Heron's formula to get area of facets in XYZ coordinates const float AP1 = std::sqrt(h1 * (h1 - p00_01) * (h1 - p00_10) * (h1 - p10_01)); const float AP2 = std::sqrt(h2 * (h2 - p11_01) * (h2 - p11_10) * (h2 - p10_01)); // Compute look angle from sensor to ground const Vec3 xyz_mid = ellps.lonLatToXyz(inputLLH); Vec3 xyz_plat; orbit.interpolate(&xyz_plat, nullptr, a, OrbitInterpBorderMode::FillNaN); lookXYZ = (xyz_plat - xyz_mid).normalized(); // Compute dot product between each facet and look vector const double cosIncFacet1 = lookXYZ.dot(normalFacet1); const double cosIncFacet2 = lookXYZ.dot(normalFacet2); // If facets are not illuminated by radar, skip if (cosIncFacet1 < 0. or cosIncFacet2 < 0.) return; // Compute projected area const float area = AP1 * cosIncFacet1 + AP2 * cosIncFacet2; // Get integer indices of bounds const int ix1 = static_cast<int>(x1); const int ix2 = static_cast<int>(x2); const int iy1 = static_cast<int>(y1); const int iy2 = static_cast<int>(y2); // Compute fractional weights from indices const float Wr = ranpix - x1; const float Wa = azpix - y1; const float Wrc = 1. - Wr; const float Wac = 1. - Wa; // Use bilinear weighting to distribute area atomicAdd(&out[width * iy1 + ix1], area * Wrc * Wac); atomicAdd(&out[width * iy1 + ix2], area * Wr * Wac); atomicAdd(&out[width * iy2 + ix1], area * Wrc * Wa); atomicAdd(&out[width * iy2 + ix2], area * Wr * Wa); } // Compute the flat earth incidence angle correction applied by UAVSAR processing __global__ void flatearth(float* out, const isce3::cuda::geometry::gpuDEMInterpolator flat_interp, const isce3::cuda::core::OrbitView orbit, const isce3::core::Ellipsoid ellps, size_t length, size_t width, double wavelength, isce3::core::LookSide lookSide, float avg_hgt ) { size_t j = threadIdx.x + blockIdx.x * blockDim.x; size_t i = threadIdx.y + blockIdx.y * blockDim.y; if (j >= width or i >= length) return; Vec3 xyz_plat; double t = start + i * pixazm; orbit.interpolate(&xyz_plat, nullptr, t, OrbitInterpBorderMode::FillNaN); // Slant range for current pixel const double slt_range = r0 + j * dr; // Get LLH and XYZ coordinates for this azimuth/range Vec3 targetLLH; targetLLH[2] = avg_hgt; // initialize first guess isce3::cuda::geometry::rdr2geo(start + i * pixazm, slt_range, 0, orbit, ellps, flat_interp, targetLLH, wavelength, lookSide, 1e-4, 20, 20); // Computation of ENU coordinates around ground target const Vec3 targetXYZ = ellps.lonLatToXyz(targetLLH); const Vec3 satToGround = targetXYZ - xyz_plat; const Mat3 xyz2enu = Mat3::xyzToEnu(targetLLH[1], targetLLH[0]); const Vec3 enu = xyz2enu.dot(satToGround); // Compute incidence angle components const double costheta = fabs(enu[2]) / enu.norm(); const double sintheta = sqrt(1. - costheta*costheta); out[width * i + j] *= sintheta; } double computeUpsamplingFactor(const isce3::geometry::DEMInterpolator& dem_interp, const isce3::core::Ellipsoid& ellps, double rangePixelSpacing) { // Create a projection object from the DEM interpolator isce3::core::ProjectionBase * proj = isce3::core::createProj(dem_interp.epsgCode()); // Get middle XY coordinate in DEM coords, lat/lon, and ECEF XYZ Vec3 demXY{dem_interp.midX(), dem_interp.midY(), 0.}, llh; proj->inverse(demXY, llh); Vec3 xyz0; ellps.lonLatToXyz(llh, xyz0); // Repeat for middle coordinate + deltaX demXY[0] += dem_interp.deltaX(); proj->inverse(demXY, llh); Vec3 xyz1; ellps.lonLatToXyz(llh, xyz1); // Repeat for middle coordinate + deltaX + deltaY demXY[1] += dem_interp.deltaY(); proj->inverse(demXY, llh); Vec3 xyz2; ellps.lonLatToXyz(llh, xyz2); // Estimate width of DEM pixel Vec3 delta = xyz1 - xyz0; const double dx = delta.norm(); // Estimate length of DEM pixel delta = xyz2 - xyz1; const double dy = delta.norm(); // Compute area of DEM pixel const double demArea = dx * dy; // Compute area of radar pixel (for now, just use spacing in range direction) const double radarArea = rangePixelSpacing * rangePixelSpacing; // Upsampling factor is the ratio return std::sqrt(demArea / radarArea); } template<typename T> T* deviceCopy(T& host_obj) { T* dev_obj; checkCudaErrors(cudaMalloc(&dev_obj, sizeof(T))); checkCudaErrors(cudaMemcpy(dev_obj, &host_obj, sizeof(T), cudaMemcpyHostToDevice)); return dev_obj; } template<typename T> T* deviceCopy(T* host_obj) { T* dev_obj; checkCudaErrors(cudaMalloc(&dev_obj, sizeof(T))); checkCudaErrors(cudaMemcpy(dev_obj, host_obj, sizeof(T), cudaMemcpyHostToDevice)); return dev_obj; } namespace isce3 { namespace cuda { namespace geometry { void computeRtc(isce3::product::Product& product, isce3::io::Raster& dem, isce3::io::Raster& out_raster, char frequency) { isce3::core::Ellipsoid ellps_h; isce3::core::Orbit orbit_h(product.metadata().orbit()); isce3::product::RadarGridParameters radarGrid(product, frequency); isce3::geometry::Topo topo_h(product, frequency, true); const isce3::core::LookSide lookDirection = product.lookSide(); // Initialize other ISCE objects isce3::core::Peg peg; isce3::core::Pegtrans ptm; ptm.radarToXYZ(ellps_h, peg); const double start_h = radarGrid.sensingStart(); const double end = radarGrid.sensingStop(); const double pixazm_h = (end - start_h) / radarGrid.length(); // azimuth difference per pixel const double r0_h = radarGrid.startingRange(); const double dr_h = radarGrid.rangePixelSpacing(); const float xbound_h = radarGrid.width() - 1.; const float ybound_h = radarGrid.length() - 1.; checkCudaErrors(cudaMemcpyToSymbol(start, &start_h, sizeof(start_h))); checkCudaErrors(cudaMemcpyToSymbol(pixazm, &pixazm_h, sizeof(pixazm_h))); checkCudaErrors(cudaMemcpyToSymbol(r0, &r0_h, sizeof(r0_h))); checkCudaErrors(cudaMemcpyToSymbol(dr, &dr_h, sizeof(dr_h))); checkCudaErrors(cudaMemcpyToSymbol(xbound, &xbound_h, sizeof(xbound_h))); checkCudaErrors(cudaMemcpyToSymbol(ybound, &ybound_h, sizeof(ybound_h))); // Output raster auto out = std::make_unique<float[]>(radarGrid.size()); float* out_d; checkCudaErrors(cudaMalloc(&out_d, radarGrid.size() * sizeof(float))); // ------------------------------------------------------------------------ // Main code: decompose DEM into facets, compute RDC coordinates // ------------------------------------------------------------------------ // Create CPU-only objects isce3::geometry::DEMInterpolator dem_interp_h( 0, isce3::core::dataInterpMethod::BIQUINTIC_METHOD); topo_h.computeDEMBounds(dem, dem_interp_h, 0, radarGrid.length()); // determine DEM bounds const float upsample_factor = computeUpsamplingFactor( dem_interp_h, ellps_h, radarGrid.rangePixelSpacing()); float max_hgt, avg_hgt; pyre::journal::info_t info("gpuRTC"); dem_interp_h.computeHeightStats(max_hgt, avg_hgt, info); isce3::cuda::geometry::gpuDEMInterpolator flat_interp(avg_hgt); // Create hostside device objects isce3::cuda::geometry::gpuDEMInterpolator dem_interp(dem_interp_h); isce3::core::Ellipsoid ellps(ellps_h); isce3::cuda::core::Orbit orbit(orbit_h); // Convert LUT2d doppler to LUT1d isce3::core::LUT1d<double> dop_h( avgLUT2dToLUT1d(product.metadata().procInfo().dopplerCentroid(frequency))); isce3::cuda::core::gpuLUT1d<double> dop(dop_h); const size_t xmax = dem_interp.width() * upsample_factor; const size_t ymax = dem_interp.length() * upsample_factor; dem_interp.initProjInterp(); #define BLOCK_X 16 #define BLOCK_Y 16 static_assert(BLOCK_X * BLOCK_Y <= 256, "RTC block dim too large for resources available on GPU"); { dim3 block(BLOCK_X, BLOCK_Y); dim3 grid(xmax / BLOCK_X + 1, ymax / BLOCK_Y + 1); facet<<<grid, block>>>(out_d, xmax, ymax, upsample_factor, dem_interp, ellps, orbit, dop, radarGrid.width(), radarGrid.wavelength(), lookDirection); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); } { dim3 block(BLOCK_X, BLOCK_Y); dim3 grid(radarGrid.width() / BLOCK_X + 1, radarGrid.width() / BLOCK_Y + 1); flatearth<<<grid, block>>>(out_d, flat_interp, orbit, ellps, radarGrid.length(), radarGrid.width(), radarGrid.wavelength(), lookDirection, avg_hgt); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); } checkCudaErrors(cudaMemcpy(&out[0], out_d, radarGrid.size() * sizeof(float), cudaMemcpyDeviceToHost)); out_raster.setBlock(&out[0], 0, 0, radarGrid.width(), radarGrid.length()); } } }}
the_stack
#define CUB_STDERR #include <iterator> #include <cub/warp/warp_load.cuh> #include <cub/iterator/cache_modified_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include <cub/util_allocator.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include "test_util.h" using namespace cub; const int MAX_ITERATIONS = 30; template <int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm, typename InputIteratorT> __global__ void kernel(InputIteratorT input, int *err) { using InputT = cub::detail::value_t<InputIteratorT>; using WarpLoadT = WarpLoad<InputT, ItemsPerThread, LoadAlgorithm, WarpThreads>; constexpr int warps_in_block = BlockThreads / WarpThreads; constexpr int tile_size = ItemsPerThread * WarpThreads; const int warp_id = static_cast<int>(threadIdx.x) / WarpThreads; __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; InputT reg[ItemsPerThread]; WarpLoadT(temp_storage[warp_id]).Load(input + warp_id * tile_size, reg); for (int item = 0; item < ItemsPerThread; item++) { const auto expected_value = static_cast<InputT>(threadIdx.x * ItemsPerThread + item); if (reg[item] != expected_value) { printf("TID: %u; WID: %d; LID: %d: ITEM: %d/%d: %d != %d\n", threadIdx.x, warp_id, static_cast<int>(threadIdx.x) % WarpThreads, item, ItemsPerThread, static_cast<int>(reg[item]), static_cast<int>(expected_value)); atomicAdd(err, 1); break; } } } template <int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm, typename InputIteratorT> __global__ void kernel(int valid_items, InputIteratorT input, int *err) { using InputT = cub::detail::value_t<InputIteratorT>; using WarpLoadT = WarpLoad<InputT, ItemsPerThread, LoadAlgorithm, WarpThreads>; constexpr int warps_in_block = BlockThreads / WarpThreads; constexpr int tile_size = ItemsPerThread * WarpThreads; const int tid = static_cast<int>(threadIdx.x); const int warp_id = tid / WarpThreads; const int lane_id = tid % WarpThreads; __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; InputT reg[ItemsPerThread]; const auto oob_default = static_cast<InputT>(valid_items); WarpLoadT(temp_storage[warp_id]) .Load(input + warp_id * tile_size, reg, valid_items, oob_default); for (int item = 0; item < ItemsPerThread; item++) { const auto expected_value = static_cast<InputT>(tid * ItemsPerThread + item); const bool is_oob = LoadAlgorithm == WarpLoadAlgorithm::WARP_LOAD_STRIPED ? item * WarpThreads + lane_id >= valid_items : lane_id * ItemsPerThread + item >= valid_items; if (is_oob) { if (reg[item] != oob_default) { atomicAdd(err, 1); } } else if (reg[item] != expected_value) { atomicAdd(err, 1); } } } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm, typename InputIteratorT> void TestImplementation(InputIteratorT input) { thrust::device_vector<int> err(1, 0); kernel<BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm> <<<1, BlockThreads>>>(input, thrust::raw_pointer_cast(err.data())); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); const int errors_number = err[0]; const int expected_errors_number = 0; AssertEquals(errors_number, expected_errors_number); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm, typename InputIteratorT> void TestImplementation(int valid_items, InputIteratorT input) { thrust::device_vector<int> err(1, 0); kernel<BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm> <<<1, BlockThreads>>>(valid_items, input, thrust::raw_pointer_cast(err.data())); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); const int errors_number = err[0]; const int expected_errors_number = 0; AssertEquals(errors_number, expected_errors_number); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm> thrust::device_vector<T> GenInput() { const int tile_size = WarpThreads * ItemsPerThread; const int total_warps = BlockThreads / WarpThreads; const int elements = total_warps * tile_size; thrust::device_vector<T> input(elements); if (LoadAlgorithm == WarpLoadAlgorithm::WARP_LOAD_STRIPED) { thrust::host_vector<T> h_input(elements); // In this case we need different stripe pattern, so the // items/threads parameters are swapped constexpr int fake_block_size = ItemsPerThread * (BlockThreads / WarpThreads); FillStriped<ItemsPerThread, WarpThreads, fake_block_size>(h_input.begin()); input = h_input; } else { thrust::sequence(input.begin(), input.end()); } return input; } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm> void TestPointer() { thrust::device_vector<T> input = GenInput<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>(); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>( thrust::raw_pointer_cast(input.data())); const unsigned int max_valid_items = WarpThreads * ItemsPerThread; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { const int valid_items = static_cast<int>(RandomValue(max_valid_items)); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>( valid_items, thrust::raw_pointer_cast(input.data())); } } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm, CacheLoadModifier LoadModifier> void TestIterator() { thrust::device_vector<T> input = GenInput<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>(); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>( CacheModifiedInputIterator<LoadModifier, T>( thrust::raw_pointer_cast(input.data()))); const int max_valid_items = WarpThreads * ItemsPerThread; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { const int valid_items = RandomValue(max_valid_items); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>( valid_items, CacheModifiedInputIterator<LoadModifier, T>( thrust::raw_pointer_cast(input.data()))); } } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm> void TestIterator() { TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_DEFAULT>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_CA>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_CG>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_CS>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_CV>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_LDG>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm, CacheLoadModifier::LOAD_VOLATILE>(); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpLoadAlgorithm LoadAlgorithm> void Test() { TestPointer<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, LoadAlgorithm>(); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread> void Test() { Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpLoadAlgorithm::WARP_LOAD_DIRECT>(); Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpLoadAlgorithm::WARP_LOAD_STRIPED>(); Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpLoadAlgorithm::WARP_LOAD_TRANSPOSE>(); Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpLoadAlgorithm::WARP_LOAD_VECTORIZE>(); } template <typename T, int BlockThreads, int WarpThreads> void Test() { Test<T, BlockThreads, WarpThreads, 1>(); Test<T, BlockThreads, WarpThreads, 4>(); Test<T, BlockThreads, WarpThreads, 7>(); } template <typename T, int BlockThreads> void Test() { Test<T, BlockThreads, 4>(); Test<T, BlockThreads, 16>(); Test<T, BlockThreads, 32>(); } template <int BlockThreads> void Test() { Test<std::uint16_t, BlockThreads>(); Test<std::uint32_t, BlockThreads>(); Test<std::uint64_t, BlockThreads>(); } int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); // Initialize device CubDebugExit(args.DeviceInit()); Test<256>(); }
the_stack
#include "common.h" #include "multilgKernels.h" #include "transformerKernels.h" /** @file Implemented the cuda kernel function and its launcher that required by multilingual nmt model. Currently, fp16 and fp32 versions are provided */ namespace lightseq { namespace cuda { /** @brief: ker_multilg_enc_emb for encoder, look up token embedding, add position embedding @thread gridDim.x = batch_size gridDim.y = batch_seq_len blockDim.x = max_thread_per_block @param token_emb: [vocab_size, hidden_size] pos_emb: [max_step, hidden_size] token_id: input token id, [batch_size, batch_seq_len] output: result, [batch_size, batch_seq_len, hidden_size] padding_mask: record the padding token, [batch_size, batch_seq_len] padding_id, the padding token id */ template <typename T> __global__ void ker_multilg_enc_emb(const T* token_emb, const T* pos_emb, const T* src_lang_emb, const int* token_id, T* output, int* padding_mask, int padding_id, const int hidden_size) { int target_pos = blockIdx.x * gridDim.y + blockIdx.y; int start = target_pos * hidden_size + threadIdx.x; int end = (target_pos + 1) * hidden_size; int tid = token_id[target_pos]; int lang_id = token_id[blockIdx.x * gridDim.y]; if (tid == padding_id) { // for padding id if (threadIdx.x == 0) padding_mask[target_pos] = 1; for (uint i = start; i < end; i += blockDim.x) { // output[target_pos * blockDim.x + threadIdx.x] = 0.f; output[i] = 0.f; } return; } if (threadIdx.x == 0) { padding_mask[target_pos] = 0; } for (uint i = start; i < end; i += blockDim.x) { int offset = i - target_pos * hidden_size; output[i] = token_emb[tid * hidden_size + offset] + pos_emb[blockIdx.y * hidden_size + offset] + src_lang_emb[lang_id * hidden_size + offset]; } } template <> __global__ void ker_multilg_enc_emb<__half>(const __half* token_emb, const __half* pos_emb, const __half* src_lang_emb, const int* token_id, __half* output, int* padding_mask, int padding_id, const int half_hidden_size) { int target_pos = blockIdx.x * gridDim.y + blockIdx.y; int start = target_pos * half_hidden_size + threadIdx.x; int end = (target_pos + 1) * half_hidden_size; int tid = token_id[target_pos]; int lang_id = token_id[blockIdx.x * gridDim.y]; half2* output_h = (half2*)output; if (tid == padding_id) { // for padding id if (threadIdx.x == 0) padding_mask[target_pos] = 1; for (uint i = start; i < end; i += blockDim.x) { output_h[i] = __float2half2_rn(0.f); } return; } if (threadIdx.x == 0) { padding_mask[target_pos] = 0; } for (uint i = start; i < end; i += blockDim.x) { int offset = i - target_pos * half_hidden_size; float2 te = __half22float2( ((const half2*)token_emb)[tid * half_hidden_size + offset]); float2 pe = __half22float2( ((const half2*)pos_emb)[blockIdx.y * half_hidden_size + offset]); float2 le = __half22float2( ((const half2*)src_lang_emb)[lang_id * half_hidden_size + offset]); te.x = te.x + pe.x + le.x; te.y = te.y + pe.y + le.y; output_h[i] = __float22half2_rn(te); } } template <typename T> void ker_multilg_enc_emb_launcher(int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const T* token_emb, const T* pos_emb, const T* src_lang_emb, const int* token_id, T* output, int* padding_mask, int padding_id, int max_thread_per_block) { ker_multilg_enc_emb<T> <<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>( token_emb, pos_emb, src_lang_emb, token_id, output, padding_mask, padding_id, hidden_size); } template <> void ker_multilg_enc_emb_launcher<__half>( int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const __half* token_emb, const __half* pos_emb, const __half* src_lang_emb, const int* token_id, __half* output, int* padding_mask, int padding_id, int max_thread_per_block) { ker_multilg_enc_emb<__half> <<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>( token_emb, pos_emb, src_lang_emb, token_id, output, padding_mask, padding_id, hidden_size / 2); } template void ker_multilg_enc_emb_launcher<float>( int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const float* token_emb, const float* pos_emb, const float* src_lang_emb, const int* token_id, float* output, int* padding_mask, int padding_id, int max_thread_per_block); template void ker_multilg_enc_emb_launcher<__half>( int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const __half* token_emb, const __half* pos_emb, const __half* src_lang_emb, const int* token_id, __half* output, int* padding_mask, int padding_id, int max_thread_per_block); /** @brief: ker_multilg_dec_emb for multilingual decoder, look up token embedding, add position embedding and lang embedding @thread gridDim.x = batch_size * beam_size blockDim.x = max_thread_per_block @param token_emb: [hidden_size, vocab_size], note, it is different with encoder pos_emb: [max_step, hidden_size] src_lang_emb: [lang_num, hidden_size] trg_lang_emb: [lang_num, hidden_size] src_token_id: [batch_size, src_seq_len] token_id: input token id, [batch_size, beam_size, max_step] output: result, [batch_size, beam_size, hidden_size] step: current step max_step: max decoder steps vocab_size: vocabulary size */ template <typename T> __global__ void ker_multilg_dec_emb( const T* token_emb, const T* pos_emb, const T* src_lang_emb, const T* trg_lang_emb, const int* src_token_id, const int* token_id, T* output, int step, int max_step, int vocab_size, int hidden_size, int beam_size, int src_seq_len) { int batch_id = blockIdx.x / beam_size; // src seq is in [src_lang_id, trg_lang_id, tokens...] format int src_lang_id = src_token_id[batch_id * src_seq_len]; int trg_lang_id = src_token_id[batch_id * src_seq_len + 1]; int token_idx = (step == 0 ? trg_lang_id : token_id[blockIdx.x * max_step + step]); for (uint offset = threadIdx.x; offset < hidden_size; offset += blockDim.x) { output[blockIdx.x * hidden_size + offset] = token_emb[offset * vocab_size + token_idx] + pos_emb[step * hidden_size + offset] + src_lang_emb[src_lang_id * hidden_size + offset] + trg_lang_emb[trg_lang_id * hidden_size + offset]; } } template <typename T> void ker_multilg_dec_emb_launcher(int step_token_num, int hidden_size, cudaStream_t stream, const T* token_emb, const T* pos_emb, const T* src_lang_emb, const T* trg_lang_emb, const int* src_token_id, const int* token_id, T* output, int step, int max_step, int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block) { ker_multilg_dec_emb<T><<<step_token_num, max_thread_per_block, 0, stream>>>( token_emb, pos_emb, src_lang_emb, trg_lang_emb, src_token_id, token_id, output, step, max_step, vocab_size, hidden_size, beam_size, src_seq_len); } template void ker_multilg_dec_emb_launcher<float>( int step_token_num, int hidden_size, cudaStream_t stream, const float* token_emb, const float* pos_emb, const float* src_lang_emb, const float* trg_lang_emb, const int* src_token_id, const int* token_id, float* output, int step, int max_step, int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block); template void ker_multilg_dec_emb_launcher<__half>( int step_token_num, int hidden_size, cudaStream_t stream, const __half* token_emb, const __half* pos_emb, const __half* src_lang_emb, const __half* trg_lang_emb, const int* src_token_id, const int* token_id, __half* output, int step, int max_step, int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block); /** @brief: select_beam_rough_topk_multilg one block for one beam, compute the log seq probability ended with every token in vocab, base on the previous log seq probability and current step's logit, select rough topK candidate. @thread gridDim.x = batch_size * beam_size blockDim.x = max_thread_per_block @param logits: [batch_size, beam_size, vocab_size], cur step logit logit_bias: [vocab_size], logit bias seq_probs: [batch_size, beam_size], prefix sequence log probability seq_score: [batch_size, beam_size], prefix sequence score alive_seq: [batch_size, beam_size, max_step], prefix sequence id can_idx: [batch_size, beam_size, vocab_size], topk candidate's index can_score: [batch_size, beam_size, vocab_size], topk candidate's score num_beam_can: [1 + batch_size * beam_size]. the first ele save the number of topk candidate of the whole batch the remaining batch_size * beam_size ele save the number of topk candidate of each beam vocab_size: the vocab size of decoder max_step: max decode step length_norm: length penlty value for current step cur_step: current step diverse_lambda: lambda for diverse beam search */ template <typename T, int beam_size> __global__ void select_beam_rough_topk_multilg( const T* logits, const T* logit_bias, const float* seq_probs, const float* seq_score, const int* alive_seq, const int* vocab_mask, const int* src_token_id, int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, float diverse_lambda, int end_id, int src_seq_len) { if (alive_seq[blockIdx.x * max_step + cur_step] == end_id) { // this is a finished beam if (threadIdx.x == 0) { num_beam_can[blockIdx.x + 1] = 1; // generate one candidate int pos = atomicAdd(num_beam_can, 1); // get a candidate pos if (diverse_lambda == 0) { can_score[pos] = seq_score[blockIdx.x]; // this beam's score will not be change } else { // add the beam id offset in score to sort in each beam int batch_id = blockIdx.x / beam_size; can_score[pos] = seq_score[blockIdx.x] + (blockIdx.x - batch_id) * min_log_probability; } can_idx[pos] = end_id + (blockIdx.x % beam_size) * vocab_size; // EOS } return; } /* step1: compute each thread's max_logit and sum_exp_logit, store in * rough_top_kth_logit, sum_exp_logit */ int batch_id = blockIdx.x / beam_size; int trg_lang_id = src_token_id[batch_id * src_seq_len + 1]; const int block_start = blockIdx.x * vocab_size; const int left_idx = block_start + threadIdx.x; const int right_idx = (blockIdx.x + 1) * vocab_size; float rough_top_kth_logit = CUDA_FLOAT_INF_NEG; float sum_exp_logit = 0; for (int i = left_idx; i < right_idx; i += blockDim.x) { int lang_mask = vocab_mask[trg_lang_id * vocab_size + i - block_start]; float lgt = (lang_mask == 0 ? CUDA_FLOAT_INF_NEG : (float)logits[i] + (float)__ldg(&logit_bias[i - block_start])); rough_top_kth_logit = fmaxf(rough_top_kth_logit, lgt); } float max_logit = blockReduceMax(rough_top_kth_logit); __shared__ float s_max_logit; if (threadIdx.x == 0) { s_max_logit = max_logit; } __syncthreads(); for (int i = left_idx; i < right_idx; i += blockDim.x) { int lang_mask = vocab_mask[trg_lang_id * vocab_size + i - block_start]; float lgt = lang_mask == 0 ? 0.f : expf(fmaxf((float)(logits[i]) + (float)__ldg(&logit_bias[i - block_start]) - s_max_logit, logit_thresh_min)); sum_exp_logit += lgt; } /* step2: compute rough top-kth-logits and sum_exp_logit among the whole beam, saved into s_topk and s_log_prob_base */ __shared__ float s_log_prob_base; // prefix sequence log prob - log_sum_exp_logit __shared__ float s_topk; // rough top k-th value of logits __shared__ int num_cur_beam_can; // candidate number for this beam sum_exp_logit = blockReduceSum(sum_exp_logit); rough_top_kth_logit = blockRoughTopK<float, beam_size>(rough_top_kth_logit); if (threadIdx.x == 0) { s_log_prob_base = seq_probs[blockIdx.x] - logf(sum_exp_logit) - s_max_logit; s_topk = rough_top_kth_logit; num_cur_beam_can = 0; } /* step3 : select the candidate token with logits bigger than s_topk, compute the seq probability ended with them, save the probability, token_index, selected token number. */ int idx = left_idx; int batch_start_pos = batch_id * beam_size * vocab_size; // int unk_vocab_id = vocab_size - 3; // last three element: unk, start, eos __shared__ int l_n; // current iteration candidate number for (int iter = 0; iter < (vocab_size + blockDim.x - 1) / blockDim.x; iter++) { // zero the counter if (threadIdx.x == 0) l_n = 0; __syncthreads(); float lgt = CUDA_FLOAT_INF_NEG - 1.f; // min s_topk is CUDA_FLOAT_INF_NEG int pos; int vocab_id = idx - block_start; // if ((vocab_id < vocab_size) && (vocab_id != unk_vocab_id)) { if (vocab_id < vocab_size) { int lang_mask = vocab_mask[trg_lang_id * vocab_size + vocab_id]; if (lang_mask != 0) { lgt = (float)(logits[idx]) + (float)__ldg(&logit_bias[vocab_id]); if (lgt >= s_topk) // pos: relative pos inside this iteration pos = atomicAdd(&l_n, 1); } } __syncthreads(); // leader increments the global counter if (threadIdx.x == 0) { atomicAdd(&num_cur_beam_can, l_n); l_n = atomicAdd(num_beam_can, l_n); } __syncthreads(); // threads with true predicates write their elements if ((lgt >= s_topk)) { pos += l_n; // increment local pos by global counter if (diverse_lambda == 0) { can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm, min_log_probability + 1.f) + batch_id * min_log_probability; } else { can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm, min_log_probability + 1.f) + blockIdx.x * min_log_probability; } can_idx[pos] = idx - batch_start_pos; } __syncthreads(); idx += blockDim.x; } if (threadIdx.x == 0) { num_beam_can[blockIdx.x + 1] = num_cur_beam_can; } } template <typename T> void select_beam_rough_topk_multilg_launcher( const T* logits, const T* logit_bias, const float* seq_probs, const float* seq_score, const int* alive_seq, const int* vocab_mask, const int* src_token_id, int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, int step_token_num, int max_thread_per_block, cudaStream_t stream, int beam_size, float diverse_lambda, int end_id, int src_seq_len) { if (beam_size == 1) select_beam_rough_topk_multilg<T, 1> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, vocab_mask, src_token_id, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, src_seq_len); if (beam_size == 2) select_beam_rough_topk_multilg<T, 2> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, vocab_mask, src_token_id, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, src_seq_len); if (beam_size == 4) select_beam_rough_topk_multilg<T, 4> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, vocab_mask, src_token_id, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, src_seq_len); if (beam_size == 8) select_beam_rough_topk_multilg<T, 8> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, vocab_mask, src_token_id, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, src_seq_len); if (beam_size == 16) select_beam_rough_topk_multilg<T, 16> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, vocab_mask, src_token_id, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, src_seq_len); if (beam_size == 32) select_beam_rough_topk_multilg<T, 32> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, vocab_mask, src_token_id, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, src_seq_len); } template void select_beam_rough_topk_multilg_launcher<float>( const float* logits, const float* logit_bias, const float* seq_probs, const float* seq_score, const int* alive_seq, const int* vocab_mask, const int* src_token_id, int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, int step_token_num, int max_thread_per_block, cudaStream_t stream, int beam_size, float diverse_lambda, int end_id, int src_seq_len); template void select_beam_rough_topk_multilg_launcher<__half>( const __half* logits, const __half* logit_bias, const float* seq_probs, const float* seq_score, const int* alive_seq, const int* vocab_mask, const int* src_token_id, int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, int step_token_num, int max_thread_per_block, cudaStream_t stream, int beam_size, float diverse_lambda, int end_id, int src_seq_len); } // namespace cuda } // namespace lightseq
the_stack
struct cmp_functor_dict { const unsigned long long* source; bool *dest; const unsigned int *pars; cmp_functor_dict(const unsigned long long int* _source, bool * _dest, const unsigned int * _pars): source(_source), dest(_dest), pars(_pars) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int idx = pars[0]; unsigned int cmp = pars[1]; unsigned int bits = ((unsigned int*)source)[1]; unsigned int fit_count = ((unsigned int*)source)[0]; unsigned int int_sz = 64; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = ((fit_count-src_loc)-1)*bits; unsigned long long int tmp = source[src_idx+2] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); //printf("COMP1 %llu %d \n", tmp, idx); if(cmp == 4) { // == if(tmp == idx) dest[i] = 1; else dest[i] = 0; } else { // != if(tmp == idx) dest[i] = 0; else dest[i] = 1; }; } }; struct gpu_regex { char *source; char *pattern; bool * dest; const unsigned int *len; gpu_regex(char * _source,char * _pattern, bool * _dest, const unsigned int * _len): source(_source), pattern(_pattern), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { bool star = 0; int j = 0; char* s; char* p; char* str = source + len[0]*i; char* pat = pattern; loopStart: for (s = str, p = pat; j < len[0] && *s; ++s, ++p, ++j) { switch (*p) { case '?': if (*s == '.') goto starCheck; break; case '%': star = 1; str = s, pat = p; do { ++pat; } while (*pat == '%'); if (!*pat) { dest[i] = 1; return; } goto loopStart; default : if (*s != *p) goto starCheck; break; } /* endswitch */ } /* endfor */ while (*p == '%') ++p; dest[i] = !*p; return; starCheck: if (!star) { dest[i] = 0; return; }; str++; j++; goto loopStart; } }; bool* filter(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, unsigned int segment) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<unsigned int> exe_precision; stack<int_type> exe_nums; stack<bool*> bool_vectors; string s1, s2, s1_val, s2_val; int_type n1, n2, res; bool free_mem, free_mem1; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); //cout << endl << ss << " " << op_nums.size() << " " << op_nums_precision.size() << endl; if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0 || ss.compare("FIELD") == 0) { if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); exe_type.push(ss); exe_precision.push(op_nums_precision.front()); op_nums_precision.pop(); } else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); exe_type.push(ss); } else if(ss.compare("FIELD") == 0) { size_t pos1 = op_value.front().find_first_of(".", 0); string tbl = op_value.front().substr(0,pos1); string field = op_value.front().substr(pos1+1, string::npos); op_value.pop(); CudaSet *b = varNames.find(tbl)->second; auto val = b->h_columns_int[field][0]; exe_nums.push(val); exe_type.push("NUMBER"); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s2.compare("NAME") == 0 && s1.compare("STRING") == 0) { s2_val = exe_value.top(); exe_value.pop(); s1_val = exe_value.top(); exe_value.pop(); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s2.compare("NAME") == 0 && s1.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type* t = get_vec(a, s2_val, exe_vectors, free_mem); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(0); } else if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) n1 = n1*(unsigned int)pow(10,p1); if(p2) n2 = n2*(unsigned int)pow(10,p2); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res,(int_type)0); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = get_decimals(a, s1_val, exe_precision); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(a->op(t,n1,ss,1, p1, p2)); if(free_mem) cudaFree(t); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = get_decimals(a, s2_val, exe_precision); int_type* t = get_vec(a, s2_val, exe_vectors, free_mem); auto pres = precision_func(p2, p1, ss); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(a->op(t,n1,ss,0, p2, p1)); if(free_mem) cudaFree(t); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[s1_val] == 0) { int_type* t1 = get_vec(a, s1_val, exe_vectors, free_mem); int_type* t = get_vec(a, s2_val, exe_vectors, free_mem1); auto p1 = get_decimals(a, s1_val, exe_precision); auto p2 = get_decimals(a, s2_val, exe_precision); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(a->op(t,t1,ss,0,p2,p1)); if(free_mem) cudaFree(t1); if(free_mem1) cudaFree(t); } } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("NAME"); exe_value.push(""); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) n1 = n1*(unsigned int)pow(10,p1); if(p2) n2 = n2*(unsigned int)pow(10,p2); bool_vectors.push(a->compare(n1,n2,cmp_type)); } else if ((s1.compare("STRING") == 0 && s2.compare("NAME") == 0) || (s1.compare("NAME") == 0 && s2.compare("STRING") == 0)) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1.swap(s2); s1_val.swap(s2_val); }; void* d_res, *d_v; if(cmp_type != 7) cudaMalloc((void **) &d_res, a->mRecCount); else cudaMalloc((void **) &d_res, a->hostRecCount); thrust::device_ptr<bool> dd_res((bool*)d_res); cudaMalloc((void **) &d_v, 8); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::counting_iterator<unsigned int> begin(0); if(s2_val.find(".") != string::npos) { //bitmap index auto pos1 = s2_val.find_first_of("."); auto pos2 = s2_val.find_last_of("."); auto set = s2_val.substr(pos1+1, (pos2-pos1)-1); auto col = s2_val.substr(pos2+1); auto len = data_dict[set][col].col_length; while(s1_val.length() < len) s1_val = s1_val + '\0'; auto s1_hash = MurmurHash64A(&s1_val[0], len, hash_seed)/2; if(a->idx_dictionary_int[s2_val].find(s1_hash) != a->idx_dictionary_int[s2_val].end()) { dd_v[0] = a->idx_dictionary_int[s2_val][s1_hash]; dd_v[1] = (unsigned int)cmp_type; cmp_functor_dict ff(idx_vals[s2_val], (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { cudaMemset(d_res,0,a->mRecCount); } } else { auto s = a->string_map[s2_val]; auto pos = s.find_first_of("."); auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length; dd_v[0] = len; dd_v[1] = (unsigned int)s1_val.length() + 1; if(cmp_type != 7) { thrust::device_vector<unsigned long long int> vv(1); while(s1_val.length() < len) { s1_val = s1_val + '\0'; }; vv[0] = MurmurHash64A(&s1_val[0], s1_val.length(), hash_seed)/2; string f1 = a->load_file_name + "." + s2_val + "." + to_string(segment) + ".hash"; FILE* f = fopen(f1.c_str(), "rb" ); unsigned long long int* buff = new unsigned long long int[a->mRecCount]; unsigned int cnt; fread(&cnt, 4, 1, f); fread(buff, a->mRecCount*8, 1, f); fclose(f); thrust::device_vector<unsigned long long int> vals(a->mRecCount); thrust::copy(buff, buff+a->mRecCount, vals.begin()); if(cmp_type == 4) //== thrust::transform(vals.begin(), vals.end(), thrust::make_constant_iterator(vv[0]), dd_res, thrust::equal_to<unsigned long long int>()); else if(cmp_type == 3) //!= thrust::transform(vals.begin(), vals.end(), thrust::make_constant_iterator(vv[0]), dd_res, thrust::not_equal_to<unsigned long long int>()); delete [] buff; } else { if(a->map_like.find(s2_val) == a->map_like.end()) { void* d_str; cudaMalloc((void **) &d_str, len); cudaMemset(d_str,0,len); cudaMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), cudaMemcpyHostToDevice); string f1 = a->load_file_name + "." + s2_val; FILE* f = fopen(f1.c_str(), "rb" ); fseek(f, 0, SEEK_END); long fileSize = ftell(f); fseek(f, 0, SEEK_SET); unsigned int pieces = 1; if(fileSize > getFreeMem()/2) pieces = fileSize /(getFreeMem()/2) + 1; auto piece_sz = fileSize/pieces; ldiv_t ldivresult = ldiv(fileSize/pieces, len); if(ldivresult.rem != 0) piece_sz = fileSize/pieces + (len - ldivresult.rem); thrust::device_vector<char> dev(piece_sz); char* buff = new char[piece_sz]; a->map_res[s2_val] = thrust::device_vector<unsigned int>(); for(auto i = 0; i < pieces; i++) { if(i == pieces-1) piece_sz = fileSize - piece_sz*i; fread(buff, piece_sz, 1, f); cudaMemcpy( thrust::raw_pointer_cast(dev.data()), (void*)buff, piece_sz, cudaMemcpyHostToDevice); gpu_regex ff(thrust::raw_pointer_cast(dev.data()), (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + piece_sz/len, ff); auto cnt = thrust::count(dd_res, dd_res + piece_sz/len, 1); auto offset = a->map_res[s2_val].size(); a->map_res[s2_val].resize(a->map_res[s2_val].size() + cnt); thrust::copy_if(thrust::make_counting_iterator((unsigned int)(i*(piece_sz/len))), thrust::make_counting_iterator((unsigned int)((i+1)*(piece_sz/len))), dd_res, a->map_res[s2_val].begin() + offset, thrust::identity<bool>()); }; fclose(f); delete [] buff; cudaFree(d_str); thrust::sort(a->map_res[s2_val].begin(), a->map_res[s2_val].end()); a->map_like[s2_val] = 1; }; // now lets calc the current segments's matches cudaMemset(d_res, 0, a->hostRecCount); binary_search(a->map_res[s2_val].begin(),a->map_res[s2_val].end(), a->d_columns_int[s2_val].begin(), a->d_columns_int[s2_val].end(), dd_res); }; }; cudaFree(d_v); exe_type.push("NAME"); bool_vectors.push((bool*)d_res); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); if(s1_val.find(".") != string::npos) { //bitmap index void* d_v, *d_res; cudaMalloc((void **) &d_v, 8); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); cudaMalloc((void **) &d_res, a->mRecCount); if(a->idx_dictionary_int[s1_val].find(n1) != a->idx_dictionary_int[s1_val].end()) { dd_v[0] = a->idx_dictionary_int[s1_val][n1]; dd_v[1] = (unsigned int)cmp_type; thrust::counting_iterator<unsigned int> begin(0); cmp_functor_dict ff(idx_vals[s1_val], (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { cudaMemset(d_res,0,a->mRecCount); }; exe_type.push("NAME"); bool_vectors.push((bool*)d_res); cudaFree(d_v); } else { int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); thrust::device_ptr<int_type> bp((int_type*)t); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s1_val, exe_precision); auto pres = std::max(p1, p2); exe_precision.push(pres); exe_type.push("NAME"); bool_vectors.push(a->compare(t,n1,cmp_type, pres-p1, pres-p2)); if(free_mem) cudaFree(t); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if(s2_val.find(".") != string::npos) { //bitmap index void* d_v, *d_res; cudaMalloc((void **) &d_v, 8); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); cudaMalloc((void **) &d_res, a->mRecCount); if(a->idx_dictionary_int[s2_val].find(n1) != a->idx_dictionary_int[s2_val].end()) { dd_v[0] = a->idx_dictionary_int[s2_val][n1]; dd_v[1] = (unsigned int)cmp_type; thrust::counting_iterator<unsigned int> begin(0); cmp_functor_dict ff(idx_vals[s2_val], (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { cudaMemset(d_res,0,a->mRecCount); }; exe_type.push("NAME"); bool_vectors.push((bool*)d_res); cudaFree(d_v); } else { int_type* t = get_vec(a, s2_val, exe_vectors, free_mem); auto p2 = exe_precision.top(); exe_precision.pop(); auto p1 = get_decimals(a, s2_val, exe_precision); auto pres = std::max(p1, p2); exe_precision.push(pres); exe_type.push("NAME"); bool_vectors.push(a->compare(t,n1,cmp_type, p1, p2)); if(free_mem) cudaFree(t); }; } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("NAME"); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); int_type* t1 = get_vec(a, s2_val, exe_vectors, free_mem1); auto p1 = get_decimals(a, s1_val, exe_precision); auto p2 = get_decimals(a, s2_val, exe_precision); auto pres = max(p1, p2); exe_precision.push(pres); bool_vectors.push(a->compare(t1,t,cmp_type, p2, p1)); if(free_mem) cudaFree(t); if(free_mem1) cudaFree(t1); } } else if (ss.compare("AND") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("NAME"); bool_vectors.push(a->logical_and(s2,s3)); } else if (ss.compare("OR") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("NAME"); bool_vectors.push(a->logical_or(s2,s3)); } else { cout << "found nothing " << endl; } }; }; return bool_vectors.top(); }
the_stack
namespace hornets_nest { __device__ __forceinline__ void initialize(vid_t diag_id, vid_t u_len, vid_t v_len, vid_t* __restrict__ u_min, vid_t* __restrict__ u_max, vid_t* __restrict__ v_min, vid_t* __restrict__ v_max, int* __restrict__ found) { if (diag_id == 0) { *u_min = *u_max = *v_min = *v_max = 0; *found = 1; } else if (diag_id < u_len) { *u_min = 0; *u_max = diag_id; *v_max = diag_id; *v_min = 0; } else if (diag_id < v_len) { *u_min = 0; *u_max = u_len; *v_max = diag_id; *v_min = diag_id - u_len; } else { *u_min = diag_id - v_len; *u_max = u_len; *v_min = diag_id - u_len; *v_max = v_len; } } __device__ __forceinline__ void workPerThread(vid_t uLength, vid_t vLength, int threadsPerIntersection, int threadId, int* __restrict__ outWorkPerThread, int* __restrict__ outDiagonalId) { int totalWork = uLength + vLength; int remainderWork = totalWork % threadsPerIntersection; int workPerThread = totalWork / threadsPerIntersection; int longDiagonals = threadId > remainderWork ? remainderWork : threadId; int shortDiagonals = threadId > remainderWork ? threadId - remainderWork : 0; *outDiagonalId = (workPerThread + 1) * longDiagonals + workPerThread * shortDiagonals; *outWorkPerThread = workPerThread + (threadId < remainderWork); } __device__ __forceinline__ void bSearch(unsigned found, vid_t diagonalId, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes, const vid_t* __restrict__ uLength, vid_t* __restrict__ outUMin, vid_t* __restrict__ outUMax, vid_t* __restrict__ outVMin, vid_t* __restrict__ outVMax, vid_t* __restrict__ outUCurr, vid_t* __restrict__ outVCurr) { vid_t length; while (!found) { *outUCurr = (*outUMin + *outUMax) >> 1; *outVCurr = diagonalId - *outUCurr; if (*outVCurr >= *outVMax) { length = *outUMax - *outUMin; if (length == 1) { found = 1; continue; } } unsigned comp1 = uNodes[*outUCurr] > vNodes[*outVCurr - 1]; unsigned comp2 = uNodes[*outUCurr - 1] > vNodes[*outVCurr]; if (comp1 && !comp2) found = 1; else if (comp1) { *outVMin = *outVCurr; *outUMax = *outUCurr; } else { *outVMax = *outVCurr; *outUMin = *outUCurr; } } if (*outVCurr >= *outVMax && length == 1 && *outVCurr > 0 && *outUCurr > 0 && *outUCurr < *uLength - 1) { unsigned comp1 = uNodes[*outUCurr] > vNodes[*outVCurr - 1]; unsigned comp2 = uNodes[*outUCurr - 1] > vNodes[*outVCurr]; if (!comp1 && !comp2) { (*outUCurr)++; (*outVCurr)--; } } } __device__ __forceinline__ int fixStartPoint(vid_t uLength, vid_t vLength, vid_t* __restrict__ uCurr, vid_t* __restrict__ vCurr, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes) { unsigned uBigger = (*uCurr > 0) && (*vCurr < vLength) && (uNodes[*uCurr - 1] == vNodes[*vCurr]); unsigned vBigger = (*vCurr > 0) && (*uCurr < uLength) && (vNodes[*vCurr - 1] == uNodes[*uCurr]); *uCurr += vBigger; *vCurr += uBigger; return uBigger + vBigger; } /* __device__ __forceinline__ vid_t* binSearch(vid_t *a, vertexId_t x, vid_t n) { vid_t min = 0, max = n, acurr, curr;// = (min+max)/2 do { curr = (min + max) / 2; acurr = a[curr]; min = (x > acurr) ? curr : min; max = (x < acurr) ? curr : max; } while (x != acurr || min != max); return a + curr; }*/ /* __device__ __forceinline__ int findIndexOfVertex(HornetGraph* hornet, vid_t src, vid_t dst__) { vid_t srcLen = hornet->dVD->used[src]; vid_t* adj_src = hornet->dVD->adj[src]->dst; for (vid_t adj = 0; adj < srcLen; adj++) { vid_t dst = adj_src[adj]; if (dst == dst__) return adj; } #if !defined(NDEBUG) printf("This should never happpen\n"); #endif return -1; }*/ __device__ __forceinline__ void indexBinarySearch(vid_t* data, vid_t arrLen, vid_t key, int& pos) { int low = 0; int high = arrLen - 1; while (high >= low) { int middle = (low + high) / 2; if (data[middle] == key) { pos = middle; return; } if (data[middle] < key) low = middle + 1; if (data[middle] > key) high = middle - 1; } } template<typename Vertex> __device__ __forceinline__ void findIndexOfTwoVerticesBinary(const Vertex& vertex, vid_t v1, vid_t v2, int &pos_v1, int &pos_v2) { //vid_t* adj_src = hornet->dVD->adj[src]->dst; //vid_t srcLen = hornet->dVD->used[src]; vid_t srcLen = vertex.degree(); vid_t* adj_src = vertex.neighbor_ptr(); pos_v1 = -1; pos_v2 = -1; indexBinarySearch(adj_src, srcLen, v1, pos_v1); indexBinarySearch(adj_src, srcLen, v2, pos_v2); } template<typename Vertex> __device__ __forceinline__ void findIndexOfTwoVertices(const Vertex& vertex, vid_t v1, vid_t v2, int &pos_v1, int &pos_v2) { //vid_t srcLen = hornet->dVD->used[src]; //vid_t* adj_src = hornet->dVD->adj[src]->dst; vid_t srcLen = vertex.degree(); vid_t* adj_src = vertex.neighbor_ptr(); pos_v1 = -1; pos_v2 = -1; for(vid_t adj = 0; adj < srcLen; adj += 1) { vid_t dst = adj_src[adj]; if (dst == v1) pos_v1 = adj; if (dst == v2) pos_v2 = adj; if (pos_v1 != -1 && pos_v2 != -1) return; } #if !defined(NDEBUG) printf("This should never happpen\n"); #endif } template<bool uMasked, bool vMasked, bool subtract, bool upd3rdV, typename HornetDevice> __device__ __forceinline__ void intersectCount(HornetDevice& hornet, vid_t uLength, vid_t vLength, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes, vid_t* __restrict__ uCurr, vid_t* __restrict__ vCurr, int* __restrict__ workIndex, const int* __restrict__ workPerThread, int* __restrict__ triangles, int found, const triangle_t* __restrict__ output_triangles, const vid_t* __restrict__ uMask, const vid_t* __restrict__ vMask, triangle_t multiplier, vid_t src, vid_t dest, vid_t u, vid_t v) { if (*uCurr < uLength && *vCurr < vLength) { int comp; int vmask; int umask; while (*workIndex < *workPerThread) { // vmask = vMasked ? vMask[*vCurr] : 0; // umask = uMasked ? uMask[*uCurr] : 0; vmask=umask=0; comp = uNodes[*uCurr] - vNodes[*vCurr]; // *triangles += (comp == 0 && !umask && !vmask); *triangles += (comp == 0); // if (upd3rdV && comp == 0 && !umask && !vmask) { if (upd3rdV && comp == 0) { if (subtract) { // atomicSub(output_triangles + uNodes[*uCurr], multiplier); // Ktruss vid_t common = uNodes[*uCurr]; //vid_t pos_id; auto vertex_common = hornet.vertex(common); auto edge_weight_ptr = vertex_common.edge_weight_ptr(); vid_t posu, posv; //findIndexOfTwoVerticesBinary(hornet, common, u, v, // posu, posv); findIndexOfTwoVerticesBinary(vertex_common, u, v, posu, posv); if (posu != -1) atomicSub(edge_weight_ptr + posu, 1); //atomicSub(hornet->dVD->adj[common]->ew + posu, 1); #if !defined(NDEBUG) else printf("1"); #endif if (posv != -1) atomicSub(edge_weight_ptr + posv, 1); //atomicSub(hornet->dVD->adj[common]->ew + posv, 1); #if !defined(NDEBUG) else printf("2"); #endif auto vertex_u = hornet.vertex(u); auto vertex_v = hornet.vertex(v); //atomicSub(hornet->dVD->adj[u]->ew + *uCurr, 1); //atomicSub(hornet->dVD->adj[v]->ew + *vCurr, 1); atomicSub(vertex_u.edge_weight_ptr() + *uCurr, 1); atomicSub(vertex_v.edge_weight_ptr() + *vCurr, 1); } } *uCurr += (comp <= 0 && !vmask) || umask; *vCurr += (comp >= 0 && !umask) || vmask; *workIndex += (comp == 0 && !umask && !vmask) + 1; if (*vCurr >= vLength || *uCurr >= uLength) break; } *triangles -= ((comp == 0) && (*workIndex > *workPerThread) && found); } } // u_len < v_len template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV, typename HornetDevice> __device__ __forceinline__ triangle_t count_triangles(HornetDevice& hornet, vid_t u, const vid_t* __restrict__ u_nodes, vid_t u_len, vid_t v, const vid_t* __restrict__ v_nodes, vid_t v_len, int threads_per_block, volatile vid_t* __restrict__ firstFound, int tId, const triangle_t* __restrict__ output_triangles, const vid_t* __restrict__ uMask, const vid_t* __restrict__ vMask, triangle_t multiplier, vid_t src, vid_t dest) { // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements to //Tersect - this number will be off by 1. int work_per_thread, diag_id; workPerThread(u_len, v_len, threads_per_block, tId, &work_per_thread, &diag_id); triangle_t triangles = 0; int work_index = 0; int found = 0; vid_t u_min, u_max, v_min, v_max, u_curr, v_curr; firstFound[tId] = 0; if (work_per_thread > 0) { // For the binary search, we are figuring out the initial poT of search. initialize(diag_id, u_len, v_len, &u_min, &u_max, &v_min, &v_max, &found); u_curr = 0; v_curr = 0; bSearch(found, diag_id, u_nodes, v_nodes, &u_len, &u_min, &u_max, &v_min, &v_max, &u_curr, &v_curr); int sum = fixStartPoint(u_len, v_len, &u_curr, &v_curr, u_nodes, v_nodes); work_index += sum; if (tId > 0) firstFound[tId - 1] = sum; triangles += sum; intersectCount<uMasked, vMasked, subtract, upd3rdV> (hornet, u_len, v_len, u_nodes, v_nodes, &u_curr, &v_curr, &work_index, &work_per_thread, &triangles, firstFound[tId], output_triangles, uMask, vMask, multiplier, src, dest, u, v); } return triangles; } __device__ __forceinline__ void workPerBlock(vid_t numVertices, vid_t* __restrict__ outMpStart, vid_t* __restrict__ outMpEnd, int blockSize) { vid_t verticesPerMp = numVertices / gridDim.x; vid_t remainderBlocks = numVertices % gridDim.x; vid_t extraVertexBlocks = (blockIdx.x > remainderBlocks) ? remainderBlocks : blockIdx.x; vid_t regularVertexBlocks = (blockIdx.x > remainderBlocks) ? blockIdx.x - remainderBlocks : 0; vid_t mpStart = (verticesPerMp + 1) * extraVertexBlocks + verticesPerMp * regularVertexBlocks; *outMpStart = mpStart; *outMpEnd = mpStart + verticesPerMp + (blockIdx.x < remainderBlocks); } //============================================================================== //============================================================================== template<typename HornetDevice> __global__ void devicecuStingerKTruss(HornetDevice hornet, const triangle_t* __restrict__ output_triangles, int threads_per_block, int number_blocks, int shifter, HostDeviceVar<KTrussData> hd_data) { vid_t nv = hornet.nV(); // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements //to intersect - this number will be off by no more than one. int tx = threadIdx.x; vid_t this_mp_start, this_mp_stop; const int blockSize = blockDim.x; workPerBlock(nv, &this_mp_start, &this_mp_stop, blockSize); //__shared__ triangle_t s_triangles[1024]; __shared__ vid_t firstFound[1024]; vid_t adj_offset = tx >> shifter; vid_t* firstFoundPos = firstFound + (adj_offset << shifter); for (vid_t src = this_mp_start; src < this_mp_stop; src++) { //vid_t srcLen = hornet->dVD->getUsed()[src]; auto vertex = hornet.vertex(src); vid_t srcLen = vertex.degree(); triangle_t tCount = 0; for(int k = adj_offset; k < srcLen; k += number_blocks) { //vid_t dest = hornet->dVD->getAdj()[src]->dst[k]; vid_t dest = vertex.edge(k).dst_id(); //int destLen = hornet->dVD->getUsed()[dest]; int destLen = hornet.vertex(dest).degree(); if (dest < src) //opt continue; //opt bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2); if (avoidCalc) continue; bool sourceSmaller = srcLen < destLen; vid_t small = sourceSmaller ? src : dest; vid_t large = sourceSmaller ? dest : src; vid_t small_len = sourceSmaller ? srcLen : destLen; vid_t large_len = sourceSmaller ? destLen : srcLen; //const vid_t* small_ptr = hornet->dVD->getAdj()[small]->dst; //const vid_t* large_ptr = hornet->dVD->getAdj()[large]->dst; const vid_t* small_ptr = hornet.vertex(small).neighbor_ptr(); const vid_t* large_ptr = hornet.vertex(large).neighbor_ptr(); // triangle_t triFound = count_triangles<false,false,false,true> triangle_t triFound = count_triangles<false, false, false, false> (hornet, small, small_ptr, small_len, large, large_ptr, large_len, threads_per_block, firstFoundPos, tx % threads_per_block, output_triangles, nullptr, nullptr, 1, src, dest); tCount += triFound; int pos = hd_data().offset_array[src] + k; atomicAdd(hd_data().triangles_per_edge + pos, triFound); pos = -1; //opt //indexBinarySearch(hornet->dVD->getAdj()[dest]->dst // destLen, src,pos); auto dest_ptr = hornet.vertex(dest).neighbor_ptr(); indexBinarySearch(dest_ptr, destLen, src, pos); pos = hd_data().offset_array[dest] + pos; atomicAdd(hd_data().triangles_per_edge + pos, triFound); } // s_triangles[tx] = tCount; // blockReduce(&output_triangles[src],s_triangles,blockSize); } } //============================================================================== void kTrussOneIteration(HornetGraph& hornet, const triangle_t* __restrict__ output_triangles, int threads_per_block, int number_blocks, int shifter, int thread_blocks, int blockdim, HostDeviceVar<KTrussData>& hd_data) { //devicecuStingerKTruss <<< thread_blocks, blockdim >>> // (hornet.devicePtr(), output_triangles, threads_per_block, // number_blocks, shifter, devData); devicecuStingerKTruss <<< thread_blocks, blockdim >>> (hornet.device_side(), output_triangles, threads_per_block, number_blocks, shifter, hd_data); } //============================================================================== //============================================================================== template<typename HornetDevice> __global__ void devicecuStingerNewTriangles(HornetDevice hornet, gpu::BatchUpdate batch_update, const triangle_t* __restrict__ output_triangles, int threads_per_block, int number_blocks, int shifter, bool deletion) { //vid_t batchSize = *(batch_update->getBatchSize()); vid_t batchSize = batch_update.size(); // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements to //intersect - this number will be off by no more than one. int tx = threadIdx.x; vid_t this_mp_start, this_mp_stop; //vid_t* d_ind = batch_update->getDst(); //vid_t* d_seg = batch_update->getSrc(); vid_t* d_ind = batch_update.dst_ptr(); vid_t* d_seg = batch_update.src_ptr(); workPerBlock(batchSize, &this_mp_start, &this_mp_stop, blockDim.x); __shared__ vid_t firstFound[1024]; vid_t adj_offset = tx >> shifter; vid_t* firstFoundPos = firstFound + (adj_offset << shifter); for (vid_t edge = this_mp_start + adj_offset; edge < this_mp_stop; edge += number_blocks){ //if (batch_update->getIndDuplicate()[edge] == 1) // this means it's a duplicate edge // continue; vid_t src = d_seg[edge]; vid_t dest = d_ind[edge]; if (src < dest) continue; vid_t srcLen = hornet.vertex(src).degree(); vid_t destLen = hornet.vertex(dest).degree(); //vid_t srcLen = hornet->dVD->getUsed()[src]; //vid_t destLen = hornet->dVD->getUsed()[dest]; bool avoidCalc = (src == dest) || (destLen == 0) || (srcLen == 0); if (avoidCalc) continue; bool sourceSmaller = srcLen < destLen; vid_t small = sourceSmaller ? src : dest; vid_t large = sourceSmaller ? dest : src; vid_t small_len = sourceSmaller ? srcLen : destLen; vid_t large_len = sourceSmaller ? destLen : srcLen; //const vid_t* small_ptr = hornet->dVD->getAdj()[small]->dst; //const vid_t* large_ptr = hornet->dVD->getAdj()[large]->dst; const vid_t* small_ptr = hornet.vertex(small).neighbor_ptr(); const vid_t* large_ptr = hornet.vertex(large).neighbor_ptr(); triangle_t tCount = count_triangles<false, false, true, true>( hornet, small, small_ptr, small_len, large, large_ptr, large_len, threads_per_block, firstFoundPos, tx % threads_per_block, output_triangles, nullptr, nullptr, 2, src, dest); __syncthreads(); } } //============================================================================== template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV, typename HornetDevice> __device__ __forceinline__ void intersectCountAsymmetric(HornetDevice& hornet, vid_t uLength, vid_t vLength, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes, vid_t* __restrict__ uCurr, vid_t* __restrict__ vCurr, int* __restrict__ workIndex, const int* __restrict__ workPerThread, int* __restrict__ triangles, int found, triangle_t* __restrict__ output_triangles, const vid_t* __restrict__ uMask, const vid_t* __restrict__ vMask, triangle_t multiplier, vid_t src, vid_t dest, vid_t u, vid_t v) { // if(u==0) // printf("|u|=%d\n",uLength); // if(v==0) // printf("|v|=%d\n",vLength); // printf("%d %d\n",u,v); if (*uCurr < uLength && *vCurr < vLength) { int comp, vmask, umask; while (*workIndex < *workPerThread) { // vmask = vMasked ? vMask[*vCurr] : 0; // umask = uMasked ? uMask[*uCurr] : 0; umask=vmask=0; comp = uNodes[*uCurr] - vNodes[*vCurr]; // *triangles += (comp == 0 && !umask && !vmask); *triangles += (comp == 0); // if (upd3rdV && comp == 0 && !umask && !vmask) { if (upd3rdV && comp == 0) { if (subtract) { // atomicSub(output_triangles + uNodes[*uCurr], multiplier); // if(blockIdx.x<=10) // printf("!!! %d %d", u,v); // Ktruss //vid_t common = uNodes[*uCurr]; if (dest == u) { auto w_ptr = hornet.vertex(dest).edge_weight_ptr(); atomicSub(w_ptr + *uCurr, 1); //atomicSub(hornet->dVD->adj[dest]->ew + *uCurr, 1); } else { auto w_ptr = hornet.vertex(dest).edge_weight_ptr(); atomicSub(w_ptr + *vCurr, 1); //atomicSub(hornet->dVD->adj[dest]->ew + *vCurr, 1); } } } *uCurr += (comp <= 0 && !vmask) || umask; *vCurr += (comp >= 0 && !umask) || vmask; *workIndex += (comp == 0 && !umask && !vmask) + 1; if (*vCurr == vLength || *uCurr == uLength) break; } *triangles -= ((comp == 0) && (*workIndex > *workPerThread) && (found)); } } //============================================================================== //============================================================================== // u_len < v_len template <bool uMasked, bool vMasked, bool subtract, bool upd3rdV, typename HornetDevice> __device__ __forceinline__ triangle_t count_trianglesAsymmetric( HornetDevice& hornet, vid_t u, const vid_t* __restrict__ u_nodes, vid_t u_len, vid_t v, const vid_t* __restrict__ v_nodes, vid_t v_len, int threads_per_block, volatile vid_t* __restrict__ firstFound, int tId, triangle_t* __restrict__ output_triangles, const vid_t* __restrict__ uMask, const vid_t* __restrict__ vMask, triangle_t multiplier, vid_t src, vid_t dest) { // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements to // Tersect - this number will be off by 1. int work_per_thread, diag_id; workPerThread(u_len, v_len, threads_per_block, tId, &work_per_thread, &diag_id); triangle_t triangles = 0; int work_index = 0; int found = 0; vid_t u_min, u_max, v_min, v_max, u_curr, v_curr; firstFound[tId] = 0; if (work_per_thread > 0) { // For the binary search, we are figuring out the initial poT of search. initialize(diag_id, u_len, v_len, &u_min, &u_max, &v_min, &v_max, &found); u_curr = 0; v_curr = 0; bSearch(found, diag_id, u_nodes, v_nodes, &u_len, &u_min, &u_max, &v_min, &v_max, &u_curr, &v_curr); int sum = fixStartPoint(u_len, v_len, &u_curr, &v_curr, u_nodes, v_nodes); work_index += sum; if (tId > 0) firstFound[tId - 1] = sum; triangles += sum; intersectCountAsymmetric<uMasked, vMasked, subtract, upd3rdV> (hornet, u_len, v_len, u_nodes, v_nodes, &u_curr, &v_curr, &work_index, &work_per_thread, &triangles, firstFound[tId], output_triangles, uMask, vMask, multiplier, src, dest, u, v); } return triangles; } //============================================================================== //============================================================================== __device__ int d_value[32]; template<typename HornetDevice> __global__ void deviceBUTwoCUOneTriangles(HornetDevice hornet, gpu::BatchUpdate batch_update, triangle_t* __restrict__ output_triangles, int threads_per_block, int number_blocks, int shifter, bool deletion) { //vid_t batchsize = *(batch_update->getBatchSize()); vid_t batchsize = batch_update.size(); // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements to //intersect - this number will be off by no more than one. int tx = threadIdx.x; vid_t this_mp_start, this_mp_stop; //vid_t* d_off = batch_update->getOffsets(); const vid_t* d_off = batch_update.csr_wide_offsets_ptr(); //vid_t* d_ind = batch_update->getDst(); //vid_t* d_seg = batch_update->getSrc(); vid_t* d_ind = batch_update.dst_ptr(); vid_t* d_seg = batch_update.src_ptr(); int blockSize = blockDim.x; workPerBlock(batchsize, &this_mp_start, &this_mp_stop, blockSize); __shared__ vid_t firstFound[1024]; vid_t adj_offset = tx >> shifter; vid_t* firstFoundPos = firstFound + (adj_offset << shifter); for (vid_t edge = this_mp_start + adj_offset; edge < this_mp_stop; edge += number_blocks) { //if (batch_update->getIndDuplicate()[edge]) // this means it's a duplicate edge // continue; assert(edge < batch_update.size()); vid_t src = batch_update.src(edge); vid_t dest = batch_update.dst(edge); vid_t srcLen = d_off[src + 1] - d_off[src]; vid_t destLen = hornet.vertex(dest).degree(); bool avoidCalc = src == dest || srcLen == 0; if (avoidCalc) continue; const vid_t* src_ptr = d_ind + d_off[src]; //const vid_t* src_mask_ptr = batch_update->getIndDuplicate() + d_off[src];//??? const vid_t* src_mask_ptr = nullptr; //const vid_t* dst_ptr = hornet->dVD->getAdj()[dest]->dst; const vid_t* dst_ptr = hornet.vertex(dest).neighbor_ptr(); assert(d_off[src] < batch_update.size()); bool sourceSmaller = srcLen < destLen; vid_t small = sourceSmaller ? src : dest; vid_t large = sourceSmaller ? dest : src; vid_t small_len = sourceSmaller ? srcLen : destLen; vid_t large_len = sourceSmaller ? destLen : srcLen; const vid_t* small_ptr = sourceSmaller ? src_ptr : dst_ptr; const vid_t* small_mask_ptr = sourceSmaller ? src_mask_ptr : nullptr; const vid_t* large_ptr = sourceSmaller ? dst_ptr : src_ptr; const vid_t* large_mask_ptr = sourceSmaller ? nullptr : src_mask_ptr; // triangle_t tCount=0; triangle_t tCount = sourceSmaller ? count_trianglesAsymmetric<false, false, true, true> (hornet, small, small_ptr, small_len, large, large_ptr, large_len, threads_per_block, firstFoundPos, tx % threads_per_block, output_triangles, small_mask_ptr, large_mask_ptr, 1,src,dest) : count_trianglesAsymmetric<false, false, true, true> (hornet, small, small_ptr, small_len, large, large_ptr, large_len, threads_per_block, firstFoundPos, tx % threads_per_block, output_triangles, small_mask_ptr, large_mask_ptr, 1, src, dest); // atomicSub(output_triangles + src, tCount * 1); // atomicSub(output_triangles + dest, tCount * 1); __syncthreads(); } } void callDeviceDifferenceTriangles( const HornetGraph& hornet, const gpu::BatchUpdate& batch_update, triangle_t* __restrict__ output_triangles, int threads_per_intersection, int num_intersec_perblock, int shifter, int thread_blocks, int blockdim, bool deletion) { dim3 numBlocks(1, 1); //vid_t batchsize = *(batch_update.getHostBUD()->getBatchSize()); //vid_t nv = *(batch_update.getHostBUD()->getNumVertices()); vid_t batchsize = batch_update.size(); //vid_t nv = *(batch_update.getHostBUD()->getNumVertices()); vid_t nv = hornet.nV(); numBlocks.x = ceil( (float) nv / (float) blockdim ); //vid_t* redCU; //vid_t* redBU; numBlocks.x = ceil( (float) (batchsize * threads_per_intersection) / (float) blockdim ); // cout << "The block dim is " << blockdim << " and the number of blocks is" //<< numBlocks.x << endl; // Calculate all new traingles regardless of repetition devicecuStingerNewTriangles <<< numBlocks, blockdim >>> (hornet.device_side(), batch_update, output_triangles, threads_per_intersection, num_intersec_perblock, shifter, deletion); // Calculate triangles formed by ALL new edges // deviceBUThreeTriangles<<<numBlocks,blockdim>>>(hornet.devicePtr(), // batch_update.getDeviceBUD()->devicePtr(), output_triangles, //threads_per_intersection,num_intersec_perblock,shifter,deletion,redBU); // Calculate triangles formed by two new edges deviceBUTwoCUOneTriangles <<< numBlocks, blockdim >>> (hornet.device_side(), batch_update, output_triangles, threads_per_intersection, num_intersec_perblock, shifter, deletion); } } // namespace hornets_nest
the_stack
#include<cublas_v2.h> #include<iostream> #include<vector> #include<logger.hpp> #include<utils.hpp> using namespace livai::tts::waveglow; using namespace livai::tts::common; __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } /* kernel to apply gated activation function on input */ __global__ void fused_add_tanh_sigm_mul(size_t sz, float_t* in_conv_out, float* f3, float_t* dest) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if(index < sz) { dest[index] = tanhf(in_conv_out[index]+f3[index])* sigmoidf(in_conv_out[index+sz] + f3[index+sz]); } } /* kernel to apply affine transformation on one half of audio */ __global__ void affine_transform(size_t sz, float_t* audio, float_t* end_out, size_t stride) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if(index < sz) { audio[index+stride] = (audio[index+stride]-end_out[index])/expf(end_out[index+stride]); } } /* kernel to add skip and res results to global skip and res */ __global__ void skip_res_add(size_t sz, float_t* f5, float* f1, float_t* skip_out_sum, size_t stride) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if(index < sz) { skip_out_sum[index] += f5[index+stride]; f1[index] += f5[index]; } } /* kernel to add skip value to global skip in last layer */ __global__ void skip_add(size_t sz, float_t* f1, float* skip_out_sum) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if(index < sz) { skip_out_sum[index] += f1[index]; } } /* kernel to copy src value to dest */ __global__ void copy_kernel(size_t sz, float_t* src, float_t* dest) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if(index < sz) { dest[index]=src[index]; } } /* kernel to compute transpose of src and store in dest */ __global__ void transpose_kernel(size_t sz, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest) { size_t index = blockIdx.x*blockDim.x + threadIdx.x; size_t i = index/ld_src, j= index%ld_src; size_t dest_index = j*ld_dest + i; if(index < sz) { dest[dest_index] = src[index]; } } /* kernel to concatenate z in audio after every 4 rounds of flow */ __global__ void concat_z(size_t sz, float_t* src, float_t* dest, float_t* z, size_t stride) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; if(index < sz) { if(index>=stride) { dest[index]=src[index-stride]; } else { dest[index]=z[index]; } } } void WN::set(cudnnHandle_t& cudnn, size_t max_audio_len) /* initialize the weights and biases of the Convolution layers */ { input_len = max_audio_len; n_channels = hparams::n_channels; n_flows = hparams::n_flows; n_layers = hparams::n_layers; n_groups = hparams::n_groups; n_rem_channels = hparams::n_rem_channels; n_threads = 512; for (int k=0; k<n_flows; k++) { std::string kernel_fname = get_param_name(hparams::start_conv_weight, k); std::string bias_fname = get_param_name(hparams::start_conv_bias, k); auto kernel_weight = cnpy::npy_load(kernel_fname); auto bias_weight = cnpy::npy_load(bias_fname); size_t kernel_width = kernel_weight.shape[2]; size_t in_channel_size = kernel_weight.shape[1]; size_t out_channel_size = kernel_weight.shape[0]; start_conv[k].init(cudnn, kernel_weight, bias_weight, 1, input_len, in_channel_size, 1, input_len, out_channel_size, 1, kernel_width); } for (int k=0; k<n_flows; k++) { size_t dilation = 1; for(int i=0; i<n_layers; i++) { std::string kernel_fname = get_res_name(hparams::in_conv_weight, k, i); std::string bias_fname = get_res_name(hparams::in_conv_bias, k, i); auto kernel_weight = cnpy::npy_load(kernel_fname); auto bias_weight = cnpy::npy_load(bias_fname); size_t kernel_width = kernel_weight.shape[2]; size_t in_channel_size = kernel_weight.shape[1]; size_t out_channel_size = kernel_weight.shape[0]; in_conv[k][i].init(cudnn, kernel_weight, bias_weight, 1, input_len, in_channel_size, 1, input_len, out_channel_size, 1, kernel_width, 1, dilation); kernel_fname = get_res_name(hparams::cond_conv_weight, k, i); bias_fname = get_res_name(hparams::cond_conv_bias, k, i); kernel_weight = cnpy::npy_load(kernel_fname); bias_weight = cnpy::npy_load(bias_fname); kernel_width = kernel_weight.shape[2]; in_channel_size = kernel_weight.shape[1]; out_channel_size = kernel_weight.shape[0]; cond_conv[k][i].init(cudnn, kernel_weight, bias_weight, 1, input_len, in_channel_size, 1, input_len, out_channel_size, 1, kernel_width); kernel_fname = get_res_name(hparams::res_skip_conv_weight, k, i); bias_fname = get_res_name(hparams::res_skip_conv_bias, k, i); kernel_weight = cnpy::npy_load(kernel_fname); bias_weight = cnpy::npy_load(bias_fname); kernel_width = kernel_weight.shape[2]; in_channel_size = kernel_weight.shape[1]; out_channel_size = kernel_weight.shape[0]; res_skip_conv[k][i].init(cudnn, kernel_weight, bias_weight, 1, input_len, in_channel_size, 1, input_len, out_channel_size, 1, kernel_width); dilation*=2; } } for (int k=0; k<n_flows; k++) { std::string kernel_fname = get_param_name(hparams::end_conv_weight, k); std::string bias_fname = get_param_name(hparams::end_conv_bias, k); auto kernel_weight = cnpy::npy_load(kernel_fname); auto bias_weight = cnpy::npy_load(bias_fname); size_t kernel_width = kernel_weight.shape[2]; size_t in_channel_size = kernel_weight.shape[1]; size_t out_channel_size = kernel_weight.shape[0]; end_conv[k].init(cudnn, kernel_weight, bias_weight, 1, input_len, in_channel_size, 1, input_len, out_channel_size, 1, kernel_width); kernel_fname = get_param_name(hparams::inv_conv_weight, k); bias_fname = get_param_name(hparams::end_conv_bias, k); kernel_weight = cnpy::npy_load(kernel_fname); bias_weight = cnpy::npy_load(bias_fname); kernel_width = kernel_weight.shape[2]; in_channel_size = kernel_weight.shape[1]; out_channel_size = kernel_weight.shape[0]; inv_conv[k].init(cudnn, kernel_weight, bias_weight, 1, input_len, in_channel_size, 1, input_len, out_channel_size, 1, kernel_width); } cudnnCreateTensorDescriptor(&input_desc); cudnnCreateTensorDescriptor(&out_desc); // std::cout<<"input length is "<<input_len<<"\n"; { audio_0.init(n_groups/2, input_len); f1.init(n_channels, input_len); in_conv_out.init(2*n_channels, input_len); f3.init(2*n_channels, input_len); gated_activation_output.init(n_channels, input_len); skip_out_sum.init(n_channels, input_len); audio.init(n_groups, input_len); z.init(2, 2*input_len); input_t.init(n_groups,input_len); } { checkCURAND(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT)); checkCURAND(curandSetPseudoRandomGeneratorSeed(rng, 1337ull)); } } void WN::operator() (cudnnHandle_t& cudnn, gpu_float_array& mel_input, gpu_float_array& d_output, gpu_float_array& d_workspace) /* This function transforms noise to audio through series of normalising flows Arguments: -------------- cudnn: A cudnnHandle A cudnn handle used by various cudnn layers mel_input: a float array of size [640,x] Upsampled mel generated from upsamper layer, used for conditioning waveglow d_output: A float array of size [8*x,1] Pointer to store values of audiov (output) d_workspace: A float array of large size ( greater than required by any convolution) A chunk of memory to be used by convolution workspace, alternatively we can set size to a given maximum by selecting such algorithms in conv */ { size_t input_len = mel_input.shape[1]; size_t aud_channels = n_rem_channels; // std::cout<<"the value is"<<input_len<<"\t"<<input_t.shape[2]<<"\t"<<mel_input.shape[1]<<"\n"; input_t.reshape(aud_channels, input_len); curandGenerateNormal(rng, input_t.ptr, input_t.size(), 0.0f, 0.6); f1.reshape(n_channels, input_len); in_conv_out.reshape(2*n_channels, input_len); f3.reshape(2*n_channels, input_len); gated_activation_output.reshape(n_channels, input_len); skip_out_sum.reshape(n_channels, input_len); audio_0.reshape(aud_channels/2, input_len); audio.reshape(aud_channels, input_len); for(int k=n_flows-1; k>-1; k--) { copy_kernel <<< (audio_0.size()+n_threads-1)/n_threads, n_threads >>>(audio_0.size(), input_t.ptr, audio_0.ptr); cudnnSetTensor4dDescriptor(input_desc, /*format=*/cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, /*dataType=*/cudnnDataType_t::CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/aud_channels/2, /*image_height=*/1, /*image_width=*/input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, n_channels, 1, input_len); start_conv[k](cudnn, audio_0, f1, input_desc, out_desc, d_workspace); skip_out_sum.reset(); for(int j=0; j<n_layers; j++) { // log_d("input", f1.log("inp_in" + std::to_string(j)+ ".npy")); cudnnSetTensor4dDescriptor(input_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, n_channels, 1, input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, 2*n_channels, 1, input_len); in_conv[k][j](cudnn, f1, in_conv_out, input_desc, out_desc, d_workspace); // log_d("in_out", in_conv_out.log("in_out" + std::to_string(j)+ ".npy")); cudnnSetTensor4dDescriptor(input_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, 640, 1, input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, 2*n_channels, 1, input_len); cond_conv[k][j](cudnn, mel_input, f3, input_desc, out_desc, d_workspace); // log_d("cond_out", f3.log("cond_out" + std::to_string(j)+ ".npy")); fused_add_tanh_sigm_mul <<< (gated_activation_output.size()+n_threads-1)/n_threads, n_threads >>>(gated_activation_output.size(), in_conv_out.ptr, f3.ptr, gated_activation_output.ptr); // log_d("acts ", gated_activation_output.log("acts_out" + std::to_string(j)+ ".npy")); if(j<7) { cudnnSetTensor4dDescriptor(input_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, n_channels, 1, input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, 2*n_channels, 1, input_len); res_skip_conv[k][j](cudnn, gated_activation_output, f3, input_desc, out_desc, d_workspace); skip_res_add <<< (f1.size()+n_threads-1)/n_threads, n_threads >>>(f1.size(), f3.ptr, f1.ptr, skip_out_sum.ptr, 256*input_len); } else { cudnnSetTensor4dDescriptor(input_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, n_channels, 1, input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, n_channels, 1, input_len); res_skip_conv[k][j](cudnn, gated_activation_output, f1, input_desc, out_desc, d_workspace); skip_add <<< (f1.size()+n_threads-1)/n_threads, n_threads >>>(f1.size(), f1.ptr, skip_out_sum.ptr); } } cudnnSetTensor4dDescriptor(input_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, n_channels, 1, input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, aud_channels, 1, input_len); end_conv[k](cudnn, skip_out_sum, audio, input_desc, out_desc, d_workspace); affine_transform <<< (audio.size()/2+n_threads-1)/n_threads, n_threads >>>(audio.size()/2, input_t.ptr, audio.ptr, audio.size()/2); cudnnSetTensor4dDescriptor(input_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, aud_channels, 1, input_len); cudnnSetTensor4dDescriptor(out_desc, cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, 1, aud_channels, 1, input_len); inv_conv[k](cudnn, input_t, audio, input_desc, out_desc, d_workspace, 0); copy_kernel<<<(input_t.size()+n_threads-1)/n_threads, n_threads>>>(input_t.size(), audio.ptr, input_t.ptr); if((k%4==0) && (k>0)) { aud_channels +=2; input_t.reshape(aud_channels, input_len); z.reshape(2, input_len); curandGenerateNormal(rng, z.ptr, z.size(), 0.0f, 0.6); concat_z<<<(input_t.size()+n_threads-1)/n_threads, n_threads>>>(input_t.size(), audio.ptr, input_t.ptr, z.ptr, 2*input_len); audio_0.reshape(aud_channels/2, input_len); audio.reshape(aud_channels, input_len); } } transpose_kernel<<<(d_output.size()+n_threads-1)/n_threads, n_threads>>>(d_output.size(), input_t.ptr, d_output.ptr, input_t.shape[1], input_t.shape[0]); // std::cout<<input_t.shape[1]<<"\t"<<input_t.shape[0]<<"\n"; } WN::~WN() { }
the_stack
#include <cfloat> #include <vector> #include "caffe/layers/warp_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" namespace caffe { template <typename Dtype> __global__ void truncate_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_, const int num_, const int channels_, const int height_, const int width_, Dtype *theta_data, Dtype* theta_data_, Dtype *x_w_data, Dtype *top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp = 0; const int n = index / (channels_ * height_ * width_); temp = index % (channels_ * height_ * width_); const int c = temp / (height_ * width_); temp = temp % (height_ * width_); const int h = temp / width_; const int w = temp % width_; int index_x = ((n * 2 + 1) * height_ + h) * width_ + w; int index_y = ((n * 2 + 0) * height_ + h) * width_ + w; x_w_data[ index_x ] = h + bottom_1_data_[ index_x ]; x_w_data[ index_y ] = w + bottom_1_data_[ index_y ]; int xw_floor = (int)floor(x_w_data[ index_x ]); int yw_floor = (int)floor(x_w_data[ index_y ]); int xw_ceil = (int)ceil(x_w_data[ index_x ]); int yw_ceil = (int)ceil(x_w_data[ index_y ]); theta_data[ index_x ] = x_w_data[ index_x ] - floor(x_w_data[ index_x ]); theta_data[ index_y ] = x_w_data[ index_y ] - floor(x_w_data[ index_y ]); theta_data_[ index_x ] = 1 - theta_data[ index_x ]; theta_data_[ index_y ] = 1 - theta_data[ index_y ]; int offset = (n * channels_ + c) * height_; if (x_w_data[ index_x ] >= 0 && x_w_data[ index_x ] <= height_-1 && x_w_data[ index_y ] >= 0 && x_w_data[ index_y ] <= width_-1) { Dtype I0 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_floor ]; Dtype I1 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_floor ]; Dtype I2 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_ceil ]; Dtype I3 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_ceil ]; top_data[ (offset + h) * width_ + w ] = (theta_data_[index_x] * theta_data_[index_y] * I0) + (theta_data[index_x] * theta_data_[index_y] * I1) + (theta_data_[index_x] * theta_data[index_y] * I2) + (theta_data[index_x] * theta_data[index_y] * I3); } } } template <typename Dtype> __global__ void nearest_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_, const int num_, const int channels_, const int height_, const int width_, Dtype *theta_data, Dtype* theta_data_, Dtype *x_w_data, Dtype *top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp = 0; const int n = index / (channels_ * height_ * width_); temp = index % (channels_ * height_ * width_); const int c = temp / (height_ * width_); temp = temp % (height_ * width_); const int h = temp / width_; const int w = temp % width_; int index_x = ((n * 2 + 1) * height_ + h) * width_ + w; int index_y = ((n * 2 + 0) * height_ + h) * width_ + w; x_w_data[ index_x ] = h + bottom_1_data_[ index_x ]; x_w_data[ index_y ] = w + bottom_1_data_[ index_y ]; int xw_floor = (int)floor(x_w_data[ index_x ]); int yw_floor = (int)floor(x_w_data[ index_y ]); int xw_ceil = (int)ceil(x_w_data[ index_x ]); int yw_ceil = (int)ceil(x_w_data[ index_y ]); theta_data[ index_x ] = x_w_data[ index_x ] - floor(x_w_data[ index_x ]); theta_data[ index_y ] = x_w_data[ index_y ] - floor(x_w_data[ index_y ]); if (x_w_data[ index_x ] < 0) { theta_data[ index_x ] = x_w_data[ index_x ]; xw_floor = 0; xw_ceil = 0; } if (x_w_data[ index_x ] >= height_-1) { theta_data[ index_x ] = x_w_data[ index_x ] - height_; xw_floor = height_-1; xw_ceil = height_-1; } if (x_w_data[ index_y ] < 0) { theta_data[ index_y ] = x_w_data[ index_y ]; yw_floor = 0; yw_ceil = 0; } if (x_w_data[ index_y ] >= width_-1) { theta_data[ index_y ] = x_w_data[ index_y ] - width_; yw_floor = width_-1; yw_ceil = width_-1; } theta_data_[ index_x ] = 1 - theta_data[ index_x ]; theta_data_[ index_y ] = 1 - theta_data[ index_y ]; int offset = (n * channels_ + c) * height_; Dtype I0 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_floor ]; Dtype I1 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_floor ]; Dtype I2 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_ceil ]; Dtype I3 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_ceil ]; top_data[ (offset + h) * width_ + w ] = (theta_data_[index_x] * theta_data_[index_y] * I0) + (theta_data[index_x] * theta_data_[index_y] * I1) + (theta_data_[index_x] * theta_data[index_y] * I2) + (theta_data[index_x] * theta_data[index_y] * I3); } } template <typename Dtype> __global__ void truncate_interp2_bwd(const int nthreads, const int num_, const int channels_, const int height_, const int width_, const Dtype *theta_data, const Dtype* theta_data_, const Dtype *x_w_data, Dtype *bottom_0_diff, Dtype *bottom_1_diff, const Dtype *top_diff, const Dtype *top_data, const Dtype *bottom_0_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp = 0; const int n = index / (channels_ * height_ * width_); temp = index % (channels_ * height_ * width_); const int c = temp / (height_ * width_); temp = temp % (height_ * width_); const int h = temp / width_; const int w = temp % width_; int index_x = ((n * 2 + 1) * height_ + h) * width_ + w; int index_y = ((n * 2 + 0) * height_ + h) * width_ + w; if (!(x_w_data[ index_x ] < 0 || x_w_data[ index_x ] > height_-1 || x_w_data[ index_y ] < 0 || x_w_data[ index_y ] > width_-1)) { int xw_floor = (int)floor(x_w_data[ index_x ]); int yw_floor = (int)floor(x_w_data[ index_y ]); int xw_ceil = (int)ceil(x_w_data[ index_x ]); int yw_ceil = (int)ceil(x_w_data[ index_y ]); int bottom_0_index = ((n * channels_ + c) * height_ + h) * width_ + w; int offset = (n * channels_ + c) * height_; Dtype I0 = bottom_0_data[ (offset + xw_floor) * width_ + yw_floor ]; Dtype I1 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_floor ]; Dtype I2 = bottom_0_data[ (offset + xw_floor) * width_ + yw_ceil ]; Dtype I3 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_ceil ]; bottom_1_diff[ index_x ] += ( -1*theta_data_[index_y]*I0 + theta_data_[index_y]*I1 - theta_data[index_y] *I2 + theta_data[index_y] *I3 ) * top_diff[(offset + h) * width_ + w]; bottom_1_diff[ index_y ] += ( -1*theta_data_[index_x]*I0 - theta_data[index_x] *I1 + theta_data_[index_x]*I2 + theta_data[index_x] *I3 ) * top_diff[(offset + h) * width_ + w]; caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data_[ index_y ]*top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_floor) * width_ + yw_floor )); caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data_[ index_y ]*top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_floor )); caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data[ index_y ] *top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_floor) * width_ + yw_ceil )); caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data[ index_y ] *top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_ceil )); } } } template <typename Dtype> __global__ void nearest_interp2_bwd(const int nthreads, const int num_, const int channels_, const int height_, const int width_, const Dtype *theta_data, const Dtype* theta_data_, const Dtype *x_w_data, Dtype *bottom_0_diff, Dtype *bottom_1_diff, const Dtype *top_diff, const Dtype *top_data, const Dtype *bottom_0_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp = 0; const int n = index / (channels_ * height_ * width_); temp = index % (channels_ * height_ * width_); const int c = temp / (height_ * width_); temp = temp % (height_ * width_); const int h = temp / width_; const int w = temp % width_; int index_x = ((n * 2 + 1) * height_ + h) * width_ + w; int index_y = ((n * 2 + 0) * height_ + h) * width_ + w; int xw_floor = (int)floor(x_w_data[ index_x ]); int yw_floor = (int)floor(x_w_data[ index_y ]); int xw_ceil = (int)ceil(x_w_data[ index_x ]); int yw_ceil = (int)ceil(x_w_data[ index_y ]); if (x_w_data[ index_x ] < 0) { xw_floor = 0; xw_ceil = 0; } if (x_w_data[ index_x ] >= height_-1) { xw_floor = height_-1; xw_ceil = height_-1; } if (x_w_data[ index_y ] < 0) { yw_floor = 0; yw_ceil = 0; } if (x_w_data[ index_y ] >= width_-1) { yw_floor = width_-1; yw_ceil = width_-1; } int bottom_0_index = ((n * channels_ + c) * height_ + h) * width_ + w; int offset = (n * channels_ + c) * height_; Dtype I0 = bottom_0_data[ (offset + xw_floor) * width_ + yw_floor ]; Dtype I1 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_floor ]; Dtype I2 = bottom_0_data[ (offset + xw_floor) * width_ + yw_ceil ]; Dtype I3 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_ceil ]; bottom_1_diff[ index_x ] += ( -1*theta_data_[index_y]*I0 + theta_data_[index_y]*I1 - theta_data[index_y] *I2 + theta_data[index_y] *I3 ) * top_diff[(offset + h) * width_ + w]; bottom_1_diff[ index_y ] += ( -1*theta_data_[index_x]*I0 - theta_data[index_x] *I1 + theta_data_[index_x]*I2 + theta_data[index_x] *I3 ) * top_diff[(offset + h) * width_ + w]; caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data_[ index_y ]*top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_floor) * width_ + yw_floor )); caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data_[ index_y ]*top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_floor )); caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data[ index_y ] *top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_floor) * width_ + yw_ceil )); caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data[ index_y ] *top_diff[bottom_0_index], bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_ceil )); } } template <typename Dtype> void WarpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data_0 = bottom[0]->gpu_data(); // image const Dtype* bottom_data_1 = bottom[1]->gpu_data(); // optical flow Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* theta_data = theta.mutable_gpu_data(); Dtype* theta_data_ = theta_.mutable_gpu_data(); Dtype* x_w_data = x_w.mutable_gpu_data(); const int num_kernels = num_ * channels_ * height_ * width_; caffe_gpu_set(bottom[0]->count(), (Dtype)0., top_data); switch (outliers_) { case WarpParameter_WarpType_TRUNCATE: truncate_interp2_fwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>> (num_kernels, bottom_data_0, bottom_data_1, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data, top_data); break; case WarpParameter_WarpType_NEAREST: nearest_interp2_fwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>> (num_kernels, bottom_data_0, bottom_data_1, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data, top_data); break; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void WarpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0] || propagate_down[1]) { caffe_gpu_set(bottom[0]->count(), (Dtype)0., bottom[0]->mutable_gpu_diff()); caffe_gpu_set(bottom[1]->count(), (Dtype)0., bottom[1]->mutable_gpu_diff()); const Dtype* theta_data = theta.mutable_gpu_data(); const Dtype* theta_data_ = theta_.mutable_gpu_data(); const Dtype* x_w_data = x_w.mutable_gpu_data(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_0_data = bottom[0]->gpu_data(); const Dtype* bottom_1_data = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_0_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_1_diff = bottom[1]->mutable_gpu_diff(); const int num_kernels = num_ * channels_ * height_ * width_; switch (outliers_) { case WarpParameter_WarpType_NEAREST: nearest_interp2_bwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>> (num_kernels, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data, bottom_0_diff, bottom_1_diff, top_diff, top_data, bottom_0_data); break; case WarpParameter_WarpType_TRUNCATE: truncate_interp2_bwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>> (num_kernels, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data, bottom_0_diff, bottom_1_diff, top_diff, top_data, bottom_0_data); break; } CUDA_POST_KERNEL_CHECK; //caffe_gpu_mul(top[0]->count(), top_diff, bottom_0_diff, bottom_0_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WarpLayer); } // namespace caffe
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/limits.hpp" namespace cv { namespace cuda { namespace device { namespace stereobp { /////////////////////////////////////////////////////////////// /////////////////////// load constants //////////////////////// /////////////////////////////////////////////////////////////// __constant__ int cndisp; __constant__ float cmax_data_term; __constant__ float cdata_weight; __constant__ float cmax_disc_term; __constant__ float cdisc_single_jump; void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump) { cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int )) ); cudaSafeCall( cudaMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) ); } /////////////////////////////////////////////////////////////// ////////////////////////// comp data ////////////////////////// /////////////////////////////////////////////////////////////// template <int cn> struct PixDiff; template <> struct PixDiff<1> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *ls; } __device__ __forceinline__ float operator()(const uchar* rs) const { return ::abs((int)l - *rs); } uchar l; }; template <> struct PixDiff<3> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *((uchar3*)ls); } __device__ __forceinline__ float operator()(const uchar* rs) const { const float tr = 0.299f; const float tg = 0.587f; const float tb = 0.114f; float val = tb * ::abs((int)l.x - rs[0]); val += tg * ::abs((int)l.y - rs[1]); val += tr * ::abs((int)l.z - rs[2]); return val; } uchar3 l; }; template <> struct PixDiff<4> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *((uchar4*)ls); } __device__ __forceinline__ float operator()(const uchar* rs) const { const float tr = 0.299f; const float tg = 0.587f; const float tb = 0.114f; uchar4 r = *((uchar4*)rs); float val = tb * ::abs((int)l.x - r.x); val += tg * ::abs((int)l.y - r.y); val += tr * ::abs((int)l.z - r.z); return val; } uchar4 l; }; template <int cn, typename D> __global__ void comp_data(const PtrStepSzb left, const PtrStepb right, PtrStep<D> data) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < left.rows - 1 && x > 0 && x < left.cols - 1) { const uchar* ls = left.ptr(y) + x * cn; const PixDiff<cn> pixDiff(ls); const uchar* rs = right.ptr(y) + x * cn; D* ds = data.ptr(y) + x; const size_t disp_step = data.step * left.rows / sizeof(D); for (int disp = 0; disp < cndisp; disp++) { if (x - disp >= 1) { float val = pixDiff(rs - disp * cn); ds[disp * disp_step] = saturate_cast<D>(fmin(cdata_weight * val, cdata_weight * cmax_data_term)); } else { ds[disp * disp_step] = saturate_cast<D>(cdata_weight * cmax_data_term); } } } } template<typename T, typename D> void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream); template <> void comp_data_gpu<uchar, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<1, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<1, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar3, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<3, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar3, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<3, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar4, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<4, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar4, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<4, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////// //////////////////////// data step down /////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst_cols && y < dst_rows) { for (int d = 0; d < cndisp; ++d) { float dst_reg = src.ptr(d * src_rows + (2*y+0))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + (2*y+0))[(2*x+1)]; dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+1)]; dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg); } } } template<typename T> void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(dst_cols, threads.x); grid.y = divUp(dst_rows, threads.y); data_step_down<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); /////////////////////////////////////////////////////////////// /////////////////// level up messages //////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void level_up_message(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst_cols && y < dst_rows) { const size_t dst_disp_step = dst.step * dst_rows / sizeof(T); const size_t src_disp_step = src.step * src_rows / sizeof(T); T* dstr = dst.ptr(y ) + x; const T* srcr = src.ptr(y/2) + x/2; for (int d = 0; d < cndisp; ++d) dstr[d * dst_disp_step] = srcr[d * src_disp_step]; } } template <typename T> void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(dst_cols, threads.x); grid.y = divUp(dst_rows, threads.y); int src_idx = (dst_idx + 1) & 1; level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mus[src_idx], (PtrStepSz<T>)mus[dst_idx]); cudaSafeCall( cudaGetLastError() ); level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mds[src_idx], (PtrStepSz<T>)mds[dst_idx]); cudaSafeCall( cudaGetLastError() ); level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mls[src_idx], (PtrStepSz<T>)mls[dst_idx]); cudaSafeCall( cudaGetLastError() ); level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mrs[src_idx], (PtrStepSz<T>)mrs[dst_idx]); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream); template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void calc_min_linear_penalty(T* dst, size_t step) { float prev = dst[0]; float cur; for (int disp = 1; disp < cndisp; ++disp) { prev += cdisc_single_jump; cur = dst[step * disp]; if (prev < cur) { cur = prev; dst[step * disp] = saturate_cast<T>(prev); } prev = cur; } prev = dst[(cndisp - 1) * step]; for (int disp = cndisp - 2; disp >= 0; disp--) { prev += cdisc_single_jump; cur = dst[step * disp]; if (prev < cur) { cur = prev; dst[step * disp] = saturate_cast<T>(prev); } prev = cur; } } template <typename T> __device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step) { float minimum = device::numeric_limits<float>::max(); for(int i = 0; i < cndisp; ++i) { float dst_reg = msg1[msg_disp_step * i]; dst_reg += msg2[msg_disp_step * i]; dst_reg += msg3[msg_disp_step * i]; dst_reg += data[data_disp_step * i]; if (dst_reg < minimum) minimum = dst_reg; dst[msg_disp_step * i] = saturate_cast<T>(dst_reg); } calc_min_linear_penalty(dst, msg_disp_step); minimum += cmax_disc_term; float sum = 0; for(int i = 0; i < cndisp; ++i) { float dst_reg = dst[msg_disp_step * i]; if (dst_reg > minimum) { dst_reg = minimum; dst[msg_disp_step * i] = saturate_cast<T>(minimum); } sum += dst_reg; } sum /= cndisp; for(int i = 0; i < cndisp; ++i) dst[msg_disp_step * i] -= sum; } template <typename T> __global__ void one_iteration(int t, int elem_step, T* u, T* d, T* l, T* r, const PtrStep<T> data, int cols, int rows) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); if ((y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1)) { T* us = u + y * elem_step + x; T* ds = d + y * elem_step + x; T* ls = l + y * elem_step + x; T* rs = r + y * elem_step + x; const T* dt = data.ptr(y) + x; size_t msg_disp_step = elem_step * rows; size_t data_disp_step = data.step * rows / sizeof(T); message(us + elem_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step); message(ds - elem_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step); message(us + elem_step, ds - elem_step, rs - 1, dt, rs, msg_disp_step, data_disp_step); message(us + elem_step, ds - elem_step, ls + 1, dt, ls, msg_disp_step, data_disp_step); } } template <typename T> void calc_all_iterations_gpu(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(cols, threads.x << 1); grid.y = divUp(rows, threads.y); int elem_step = (int)(u.step / sizeof(T)); for(int t = 0; t < iters; ++t) { one_iteration<T><<<grid, threads, 0, stream>>>(t, elem_step, (T*)u.data, (T*)d.data, (T*)l.data, (T*)r.data, (PtrStepSz<T>)data, cols, rows); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream); template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void output(const int elem_step, const T* u, const T* d, const T* l, const T* r, const T* data, PtrStepSz<short> disp) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1) { const T* us = u + (y + 1) * elem_step + x; const T* ds = d + (y - 1) * elem_step + x; const T* ls = l + y * elem_step + (x + 1); const T* rs = r + y * elem_step+ (x - 1); const T* dt = data + y * elem_step + x; size_t disp_step = disp.rows * elem_step; int best = 0; float best_val = numeric_limits<float>::max(); for (int d = 0; d < cndisp; ++d) { float val = us[d * disp_step]; val += ds[d * disp_step]; val += ls[d * disp_step]; val += rs[d * disp_step]; val += dt[d * disp_step]; if (val < best_val) { best_val = val; best = d; } } disp.ptr(y)[x] = saturate_cast<short>(best); } } template <typename T> void output_gpu(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); int elem_step = static_cast<int>(u.step/sizeof(T)); output<T><<<grid, threads, 0, stream>>>(elem_step, (const T*)u.data, (const T*)d.data, (const T*)l.data, (const T*)r.data, (const T*)data.data, disp); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void output_gpu<short>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream); template void output_gpu<float>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream); } // namespace stereobp }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
the_stack
#define _4HALF2_ 4 #define _8HALF_ 8 #define _INT4_TO_4INT_ 4 #define _INT4_TO_8HALF_ 8 #define _INT4_TO_4FLOAT_ 4 #include <cuda_fp16.h> #include "cudakernel/common/macro.h" __global__ void MergeConvSplitResults( int4 *input, int4 *output, int split_height_v1, int split_width_v8, int out_hw, int split, int has_bias, const int4 *bias, int has_relu, const __half2 clip_min, bool has_clip, const __half2 clip_max, int has_prelu, const void *prelu, bool has_elt, const int4 *pre_data, int has_elt_relu, const __half2 elt_clip_min, bool has_elt_clip, const __half2 elt_clip_max, int has_elt_prelu, const void *elt_prelu, const __half leaky, const __half elt_leaky, bool has_concat, int concat_offset_v8, int concat_stride_v8) { #if (__CUDA_ARCH__ >= 600) && (__CUDACC_VER_MAJOR__ >= 9) int k_id = blockIdx.y * blockDim.x + threadIdx.x; int64_t nhw_id = blockIdx.x; int off = nhw_id * split_width_v8 + k_id; const int4 ZEROv4 = {0, 0, 0, 0}; bool is_in_range = k_id < split_width_v8; int4 merge_v4, split_v4, bias_v4; __half2 *h2_merge = (__half2 *)&merge_v4; __half2 *h2_split = (__half2 *)&split_v4; __half2 *h2_bias = (__half2 *)&bias_v4; merge_v4 = is_in_range ? input[off] : ZEROv4; for (int i = 1; i < split; i++) { split_v4 = is_in_range ? input[off + i * split_height_v1 * split_width_v8] : ZEROv4; for (int j = 0; j < _4HALF2_; j++) h2_merge[j] = __hadd2(h2_merge[j], h2_split[j]); } if (has_bias) { bias_v4 = is_in_range ? ((int4 *)bias)[k_id] : ZEROv4; #pragma unroll for (int j = 0; j < _4HALF2_; j++) h2_merge[j] = __hadd2(h2_merge[j], h2_bias[j]); } int *merge_v1 = (int *)&merge_v4; __half *h_merge = (__half *)&merge_v4; if (has_relu) { if (has_relu == 1) { #pragma unroll for (int i = 0; i < _4HALF2_; i++) merge_v1[i] = __vmaxs2(merge_v1[i], 0); } else { __half *h_merge = (__half*)merge_v1; #pragma unroll for (int i = 0; i < _INT4_TO_8HALF_; i++) { h_merge[i] = __expf((float)h_merge[i]) / (1.f + __expf((float)h_merge[i])); } } } else if (has_clip) { #pragma unroll for (int i = 0; i < _4HALF2_; i++) { h2_merge[i].x = __hgt(h2_merge[i].x, clip_max.x) ? clip_max.x : h2_merge[i].x; h2_merge[i].y = __hgt(h2_merge[i].y, clip_max.x) ? clip_max.x : h2_merge[i].y; h2_merge[i].x = __hlt(h2_merge[i].x, clip_min.x) ? clip_min.x : h2_merge[i].x; h2_merge[i].y = __hlt(h2_merge[i].y, clip_min.x) ? clip_min.x : h2_merge[i].y; } } else if (has_prelu) { if (has_prelu == 1) { #pragma unroll for (int i = 0; i < _INT4_TO_8HALF_; i++) if (__hlt(h_merge[i], 0)) h_merge[i] = __hmul(h_merge[i], leaky); } if (has_prelu == 2) { int4 scale_v4 = ((int4 *)prelu)[k_id]; __half *h_scale = (__half *)&scale_v4; #pragma unroll for (int i = 0; i < _INT4_TO_8HALF_; i++) if (__hlt(h_merge[i], 0)) h_merge[i] = __hmul(h_merge[i], h_scale[i]); } if (has_prelu == 3) { int4 elt_v4 = ((int4 *)prelu)[off]; __half *h_elt = (__half *)&elt_v4; #pragma unroll for (int i = 0; i < _INT4_TO_8HALF_; i++) if (__hlt(h_merge[i], 0)) h_merge[i] = __hmul(h_merge[i], h_elt[i]); } } if (has_elt) { int4 eltV4 = is_in_range ? pre_data[off] : ZEROv4; __half2 *h2Elt = (__half2 *)&eltV4; for (int i = 0; i < _INT4_TO_4INT_; i++) h2_merge[i] = __hadd2(h2_merge[i], h2Elt[i]); } if (has_elt_relu) { if (has_elt_relu == 1) { for (int i = 0; i < _4HALF2_; i++) merge_v1[i] = __vmaxs2(merge_v1[i], 0); } else { __half *h_merge = (__half*)merge_v1; #pragma unroll for (int i = 0; i < _INT4_TO_8HALF_; i++) { h_merge[i] = __expf((float)h_merge[i]) / (1.f + __expf((float)h_merge[i])); } } } else if (has_elt_clip) { for (int i = 0; i < _4HALF2_; i++) { h2_merge[i].x = __hgt(h2_merge[i].x, elt_clip_max.x) ? elt_clip_max.x : h2_merge[i].x; h2_merge[i].y = __hgt(h2_merge[i].y, elt_clip_max.x) ? elt_clip_max.x : h2_merge[i].y; h2_merge[i].x = __hlt(h2_merge[i].x, elt_clip_min.x) ? elt_clip_min.x : h2_merge[i].x; h2_merge[i].y = __hlt(h2_merge[i].y, elt_clip_min.x) ? elt_clip_min.x : h2_merge[i].y; } } else if (has_elt_prelu) { if (has_prelu == 1) { for (int i = 0; i < _INT4_TO_8HALF_; i++) if (__hlt(h_merge[i], 0)) h_merge[i] = __hmul(h_merge[i], elt_leaky); } if (has_elt_prelu == 2) { int4 scale_v4 = ((int4 *)prelu)[k_id]; __half *h_scale = (__half *)&scale_v4; for (int i = 0; i < _INT4_TO_8HALF_; i++) if (__hlt(h_merge[i], 0)) h_merge[i] = __hmul(h_merge[i], h_scale[i]); } if (has_elt_prelu == 3) { int4 elt_v4 = ((int4 *)prelu)[off]; __half *h_elt = (__half *)&elt_v4; for (int i = 0; i < _INT4_TO_8HALF_; i++) if (__hlt(h_merge[i], 0)) h_merge[i] = __hmul(h_merge[i], h_elt[i]); } } int concat_v8_off = 0; if (has_concat) { concat_v8_off = concat_offset_v8 + nhw_id * concat_stride_v8; off = concat_v8_off + k_id; } if (is_in_range) output[off] = merge_v4; #endif } __global__ void MergeConvSplitResultsFp32( int4* input, int* output, int split_height_v1, int split_width_v8, int out_hw, int split, int has_bias, const int4* bias, int has_relu, const float clip_min, bool has_clip, const float clip_max, int has_prelu, const void* prelu, bool has_elt, const int4* pre_data, int has_elt_relu, const float elt_clip_min, bool has_elt_clip, const float elt_clip_max, int has_elt_prelu, const void* elt_prelu, const float leaky, const float elt_leaky, bool has_concat, int concat_offset_v8, int concat_stride_v8, float out_scale, float pre_scale) { #if (__CUDA_ARCH__ >= 600) && (__CUDACC_VER_MAJOR__ >= 9) int k_id = blockIdx.y * blockDim.x + threadIdx.x; int64_t nhw_id = blockIdx.x; int off = nhw_id * split_width_v8 + k_id; const int4 ZEROv4 = {0, 0, 0, 0}; bool is_in_range = k_id < split_width_v8; int4 merge_v4, split_v4, bias_v4; float * f_merge = (float *) &merge_v4; float * f_split = (float *) &split_v4; float * f_bias = (float *) &bias_v4; merge_v4 = is_in_range ? input[off] : ZEROv4; for(int i = 1; i < split; i++) { split_v4 = is_in_range ? input[off + i * split_height_v1 * split_width_v8] : ZEROv4; for(int j = 0; j < _INT4_TO_4FLOAT_; j++) f_merge[j] = f_merge[j] + f_split[j]; } if(has_bias) { bias_v4 = is_in_range ? ((int4 *) bias) [k_id] : ZEROv4; #pragma unroll for(int j = 0; j < _INT4_TO_4FLOAT_; j++) f_merge[j] = f_merge[j] + f_bias[j]; } int * merge_v1 = (int *) &merge_v4; //float * h_merge = (float *) &merge_v4; if(has_relu) { if(has_relu == 1){ for(int i = 0; i < _INT4_TO_4FLOAT_; i++) merge_v1[i] = merge_v1[i] >= 0? merge_v1[i] : 0.f; } else { for(int i = 0; i < _INT4_TO_4FLOAT_; i++) f_merge[i] = __expf(f_merge[i]) / (1.f + __expf(f_merge[i])); } } else if(has_clip) { #pragma unroll for(int i = 0; i < _INT4_TO_4FLOAT_; i++) { f_merge[i] = (f_merge[i] > clip_max) ? clip_max : f_merge[i]; f_merge[i] = (f_merge[i] < clip_min) ? clip_min : f_merge[i]; } } else if(has_prelu) { if(has_prelu == 1) { #pragma unroll for(int i = 0; i < _INT4_TO_4FLOAT_; i++) if(f_merge[i] < 0) f_merge[i] = f_merge[i] * leaky; } if(has_prelu == 2) { int4 scale_v4 = ( (int4 *) prelu) [k_id]; float * h_scale = (float *) &scale_v4; #pragma unroll for(int i = 0; i < _INT4_TO_4FLOAT_; i++) if(f_merge[i] < 0) f_merge[i] = f_merge[i] * h_scale[i]; } if(has_prelu == 3) { int4 elt_v4 = ( (int4 *) prelu) [off]; float* h_elt = (float *) &elt_v4; #pragma unroll for(int i = 0; i < _INT4_TO_4FLOAT_; i++) if(f_merge[i] < 0) f_merge[i] = f_merge[i] * h_elt[i]; } } if(has_elt) { int elt_v4 = is_in_range ? ((int *)pre_data)[off] : 0; int8_t *elt = (int8_t *) &elt_v4; #pragma unroll for(int i = 0; i < _INT4_TO_4INT_; i++) f_merge[i] += (int)elt[i] * pre_scale; } if(has_elt_relu) { if(has_elt_relu == 1) { for(int i = 0; i < _INT4_TO_4FLOAT_; i++) merge_v1[i] = (merge_v1[i] >= 0)? merge_v1[i] : 0; } else{ for(int i = 0; i < _INT4_TO_4FLOAT_; i++) f_merge[i] = __expf(f_merge[i]) / (1.f + __expf(f_merge[i])); } } else if(has_elt_clip) { for(int i = 0; i < _INT4_TO_4FLOAT_; i++) { f_merge[i] = (f_merge[i] > elt_clip_max) ? elt_clip_max : f_merge[i]; f_merge[i] = (f_merge[i] < elt_clip_min) ? elt_clip_min : f_merge[i]; } } else if(has_elt_prelu) { if(has_prelu == 1) { for(int i = 0; i < _INT4_TO_4FLOAT_; i++) if((f_merge[i] < 0)) f_merge[i] = (f_merge[i] * elt_leaky); } if(has_elt_prelu == 2) { int4 scale_v4 = ((int4 *) prelu) [k_id]; float* h_scale = (float *) &scale_v4; for(int i = 0; i < _INT4_TO_4FLOAT_; i++) if((f_merge[i] < 0)) f_merge[i] = (f_merge[i] * h_scale[i]); } if(has_elt_prelu == 3) { int4 elt_v4 = ((int4 *)prelu) [off]; float* h_elt = (float *) &elt_v4; for(int i = 0; i < _INT4_TO_4FLOAT_; i++) if((f_merge[i] < 0)) f_merge[i] = (f_merge[i] * h_elt[i]); } } #define packchar4(_outData, x, y, z, w){ \ if (x>127) x = 127; \ if (x<-128) x = -128; \ if (y>127) y = 127; \ if (y<-128) y = -128; \ if (z>127) z = 127; \ if (z<-128) z = -128; \ if (w>127) w = 127; \ if (w<-128) w = -128; \ x = (0xffu & (int8_t)x); \ y = (0xffu & (int8_t)y) << 8; \ z = (0xffu & (int8_t)z) << 16; \ w = (0xffu & (int8_t)w) << 24; \ _outData = w | z | y | x;/*(x,y,z,w)*/\ } for(int i = 0; i < _INT4_TO_4FLOAT_; i++) merge_v1[i] = __float2int_rn(f_merge[i]*out_scale); int outData; packchar4(outData, merge_v1[0], merge_v1[1], merge_v1[2], merge_v1[3]); #undef packchar4 int concat_v8_off = 0; if(has_concat){ concat_v8_off = concat_offset_v8 + nhw_id * concat_stride_v8; off = concat_v8_off + k_id; } if(is_in_range) output[off] = outData; #endif }
the_stack
#include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" //#include "caffe/vision_layers.hpp" #include "ctpn_layers.hpp" namespace caffe { template <typename Dtype> __device__ Dtype sigmoid(const Dtype x) { return Dtype(1) / (Dtype(1) + exp(-x)); } template <typename Dtype> __device__ Dtype tanh(const Dtype x) { return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1); } template <typename Dtype> __global__ void ClipAdd(const int nthreads, const int dim, int t, const Dtype* clip, const Dtype* add_vec, Dtype* data) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const Dtype clip_t = clip ? clip[n] : Dtype(t > 0); data[index] += clip_t * add_vec[index]; } } template <typename Dtype> __global__ void ActivationForward(const int nthreads, const int H, const Dtype* pre_gate, Dtype* gate) { CUDA_KERNEL_LOOP(index, nthreads) { const int d = index % (4*H); gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]); } } template <typename Dtype> __global__ void LSTMForward(const int nthreads, const int H, const int t, const Dtype* c_prev, const Dtype* gate, const Dtype* clip, Dtype* c_t, Dtype* h_t) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / H; const int d = index % H; const Dtype* offset = gate + 4*H*n; const Dtype i_t = offset[d]; const Dtype f_t = offset[H + d]; const Dtype o_t = offset[2*H + d]; const Dtype g_t = offset[3*H + d]; const Dtype c_t_1 = c_prev[index]; const Dtype clip_t = clip ? clip[n] : Dtype(t > 0); c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t; h_t[index] = o_t * tanh(c_t[index]); } } template <typename Dtype> __global__ void LSTMBackward(const int nthreads, const int H, const int t, const Dtype* c_prev, const Dtype* gate, const Dtype* c_t, const Dtype* clip, Dtype* dc_t, const Dtype* dh_t, Dtype* dc_prev, Dtype* gate_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / H; const int d = index % H; const Dtype* gate_t = gate + 4*H*n; const Dtype i_t = gate_t[d]; const Dtype f_t = gate_t[H + d]; const Dtype o_t = gate_t[2*H + d]; const Dtype g_t = gate_t[3*H + d]; const Dtype c_t_1 = c_prev[index]; const Dtype c = c_t[index]; const Dtype tanh_c = tanh(c); const Dtype clip_t = clip ? clip[n] : Dtype(t > 0); Dtype* dc_t_1 = dc_prev + index; Dtype* gate_diff_t = gate_diff + 4*H*n; Dtype* di_t = gate_diff_t + d; Dtype* df_t = gate_diff_t + H + d; Dtype* do_t = gate_diff_t + 2*H + d; Dtype* dg_t = gate_diff_t + 3*H + d; // Output gate : tanh(c(t)) * h_diff(t) *do_t = dh_t[index] * tanh_c; // Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1) dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c); // c_diff(t-1) += f(t) * c_diff(t) *dc_t_1 = clip_t * dc_t[index] * f_t; // Forget gate : c(t-1) * c_diff(t) *df_t = clip_t * dc_t[index] * c_t_1; // Input gate : g(t) * c_diff(t) *di_t = dc_t[index] * g_t; // Input modulation gate : i(t) * c_diff(t) *dg_t = dc_t[index] * i_t; } } template <typename Dtype> __global__ void ActivationBackward(const int nthreads, const int H, const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff, Dtype* pre_gate_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int d = index % (4 * H); const Dtype gate_val = gate[index]; if (d < 3 * H) { pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val); } else { pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val); } if (clip_threshold > Dtype(0)) { if (pre_gate_diff[index] < -clip_threshold) { pre_gate_diff[index] = -clip_threshold; } else if (pre_gate_diff[index] > clip_threshold) { pre_gate_diff[index] = clip_threshold; } } } } template <typename Dtype> void LstmLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(top[0]->gpu_data(), top_.gpu_data()); Dtype* top_data = top_.mutable_gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* clip = NULL; if (bottom.size() > 1) { clip = bottom[1]->gpu_data(); CHECK_EQ(bottom[1]->num(), bottom[1]->count()); } const Dtype* weight_i = this->blobs_[0]->gpu_data(); const Dtype* weight_h = this->blobs_[1]->gpu_data(); const Dtype* bias = this->blobs_[2]->gpu_data(); Dtype* pre_gate_data = pre_gate_.mutable_gpu_data(); Dtype* gate_data = gate_.mutable_gpu_data(); Dtype* cell_data = cell_.mutable_gpu_data(); // Initialize previous state if (clip) { caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data()); caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data()); } else { caffe_gpu_set(c_0_.count(), Dtype(0.), c_0_.mutable_gpu_data()); caffe_gpu_set(h_0_.count(), Dtype(0.), h_0_.mutable_gpu_data()); } // Compute input to hidden forward propagation caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Dtype(1.), bottom_data, weight_i, Dtype(0.), pre_gate_data); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Dtype(1.), bias_multiplier_.gpu_data(), bias, Dtype(1.), pre_gate_data); // Compute recurrent forward propagation for (int t = 0; t < T_; ++t) { Dtype* h_t = top_data + top_.offset(t); Dtype* c_t = cell_data + cell_.offset(t); Dtype* pre_gate_t = pre_gate_data + pre_gate_.offset(t); Dtype* gate_t = gate_data + gate_.offset(t); const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL; const Dtype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data(); const Dtype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data(); caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Dtype(1.), h_t_1, weight_h, Dtype(0.), h_to_gate_.mutable_gpu_data()); ClipAdd<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>( 4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t); CUDA_POST_KERNEL_CHECK; ActivationForward<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>( 4*N_*H_, H_, pre_gate_t, gate_t); CUDA_POST_KERNEL_CHECK; LSTMForward<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>( N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t); CUDA_POST_KERNEL_CHECK; } // Preserve cell state and output value for truncated BPTT caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data()); caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data()); } template <typename Dtype> void LstmLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_data = top_.gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* clip = NULL; if (bottom.size() > 1) { clip = bottom[1]->gpu_data(); CHECK_EQ(bottom[1]->num(), bottom[1]->count()); } const Dtype* weight_i = this->blobs_[0]->gpu_data(); const Dtype* weight_h = this->blobs_[1]->gpu_data(); const Dtype* gate_data = gate_.gpu_data(); const Dtype* cell_data = cell_.gpu_data(); Dtype* top_diff = top_.mutable_gpu_diff(); Dtype* pre_gate_diff = pre_gate_.mutable_gpu_diff(); Dtype* gate_diff = gate_.mutable_gpu_diff(); Dtype* cell_diff = cell_.mutable_gpu_diff(); caffe_copy(N_*H_, c_T_.gpu_diff(), cell_diff + cell_.offset(T_-1)); for (int t = T_-1; t >= 0; --t) { Dtype* dh_t = top_diff + top_.offset(t); Dtype* dc_t = cell_diff + cell_.offset(t); Dtype* gate_diff_t = gate_diff + gate_.offset(t); Dtype* pre_gate_diff_t = pre_gate_diff + pre_gate_.offset(t); Dtype* dh_t_1 = t > 0 ? top_diff + top_.offset(t-1) : h_0_.mutable_gpu_diff(); Dtype* dc_t_1 = t > 0 ? cell_diff + cell_.offset(t-1) : c_0_.mutable_gpu_diff(); const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL; const Dtype* c_t = cell_data + cell_.offset(t); const Dtype* c_t_1 = t > 0 ? cell_data + cell_.offset(t-1) : c_0_.gpu_data(); const Dtype* gate_t = gate_data + gate_.offset(t); LSTMBackward<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>( N_*H_, H_, t, c_t_1, gate_t, c_t, clip_t, dc_t, dh_t, dc_t_1, gate_diff_t); CUDA_POST_KERNEL_CHECK; ActivationBackward<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>( 4*N_*H_, H_, clipping_threshold_, gate_t, gate_diff_t, pre_gate_diff_t); CUDA_POST_KERNEL_CHECK; // Backprop errors to the previous time step caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, N_, H_, 4*H_, Dtype(1.), pre_gate_diff_t, weight_h, Dtype(0.), h_to_h_.mutable_gpu_data()); ClipAdd<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>( N_*H_, H_, t, clip_t, h_to_h_.gpu_data(), dh_t_1); } if (this->param_propagate_down_[0]) { // Gradient w.r.t. input-to-hidden weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, I_, T_*N_, Dtype(1.), pre_gate_diff, bottom_data, Dtype(1.), this->blobs_[0]->mutable_gpu_diff()); } if (this->param_propagate_down_[1]) { // Gradient w.r.t. hidden-to-hidden weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, (T_-1)*N_, Dtype(1.), pre_gate_diff + pre_gate_.offset(1), top_data, Dtype(1.), this->blobs_[1]->mutable_gpu_diff()); // Add Gradient from previous time-step caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, 1, Dtype(1.), pre_gate_diff, h_0_.gpu_data(), Dtype(1.), this->blobs_[1]->mutable_gpu_diff()); } if (this->param_propagate_down_[2]) { // Gradient w.r.t. bias caffe_gpu_gemv(CblasTrans, T_*N_, 4*H_, Dtype(1.), pre_gate_diff, bias_multiplier_.gpu_data(), Dtype(1.), this->blobs_[2]->mutable_gpu_diff()); } if (propagate_down[0]) { // Gradient w.r.t. bottom data caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, I_, 4*H_, Dtype(1.), pre_gate_diff, weight_i, Dtype(0.), bottom[0]->mutable_gpu_diff()); } } INSTANTIATE_LAYER_GPU_FUNCS(LstmLayer); } // namespace caffe
the_stack
#include "Open3D/Core/Kernel/BinaryEW.h" #include "Open3D/Core/CUDAState.cuh" #include "Open3D/Core/CUDAUtils.h" #include "Open3D/Core/Dispatch.h" #include "Open3D/Core/Kernel/CUDALauncher.cuh" #include "Open3D/Core/Tensor.h" namespace open3d { namespace kernel { template <typename scalar_t> static OPEN3D_HOST_DEVICE void CUDAAddElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<scalar_t*>(dst) = *static_cast<const scalar_t*>(lhs) + *static_cast<const scalar_t*>(rhs); } template <typename scalar_t> static OPEN3D_HOST_DEVICE void CUDASubElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<scalar_t*>(dst) = *static_cast<const scalar_t*>(lhs) - *static_cast<const scalar_t*>(rhs); } template <typename scalar_t> static OPEN3D_HOST_DEVICE void CUDAMulElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<scalar_t*>(dst) = *static_cast<const scalar_t*>(lhs) * *static_cast<const scalar_t*>(rhs); } template <typename scalar_t> static OPEN3D_HOST_DEVICE void CUDADivElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<scalar_t*>(dst) = *static_cast<const scalar_t*>(lhs) / *static_cast<const scalar_t*>(rhs); } template <typename src_t, typename dst_t> static OPEN3D_HOST_DEVICE void CUDALogicalAndElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( static_cast<bool>(*static_cast<const src_t*>(lhs)) && static_cast<bool>(*static_cast<const src_t*>(rhs))); } template <typename src_t, typename dst_t> static OPEN3D_HOST_DEVICE void CUDALogicalOrElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( static_cast<bool>(*static_cast<const src_t*>(lhs)) || static_cast<bool>(*static_cast<const src_t*>(rhs))); } template <typename src_t, typename dst_t> static OPEN3D_HOST_DEVICE void CUDALogicalXorElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( static_cast<bool>(*static_cast<const src_t*>(lhs)) != static_cast<bool>(*static_cast<const src_t*>(rhs))); } template <typename src_t, typename dst_t> static OPEN3D_HOST_DEVICE void CUDAGtElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( *static_cast<const src_t*>(lhs) > *static_cast<const src_t*>(rhs)); } template <typename src_t, typename dst_t> static OPEN3D_HOST_DEVICE void CUDALtElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( *static_cast<const src_t*>(lhs) < *static_cast<const src_t*>(rhs)); } template <typename src_t, typename dst_t> static void OPEN3D_HOST_DEVICE CUDAGeqElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( *static_cast<const src_t*>(lhs) >= *static_cast<const src_t*>(rhs)); } template <typename src_t, typename dst_t> static void OPEN3D_HOST_DEVICE CUDALeqElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( *static_cast<const src_t*>(lhs) <= *static_cast<const src_t*>(rhs)); } template <typename src_t, typename dst_t> static void OPEN3D_HOST_DEVICE CUDAEqElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( *static_cast<const src_t*>(lhs) == *static_cast<const src_t*>(rhs)); } template <typename src_t, typename dst_t> static void OPEN3D_HOST_DEVICE CUDANeqElementKernel(const void* lhs, const void* rhs, void* dst) { *static_cast<dst_t*>(dst) = static_cast<dst_t>( *static_cast<const src_t*>(lhs) != *static_cast<const src_t*>(rhs)); } template <typename src_t, typename dst_t> static void LaunchBoolBinaryEWCUDAKernel(const Tensor& lhs, const Tensor& rhs, Tensor& dst, BinaryEWOpCode op_code, const Indexer& indexer) { switch (op_code) { case BinaryEWOpCode::LogicalAnd: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDALogicalAndElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::LogicalOr: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDALogicalOrElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::LogicalXor: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDALogicalXorElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Gt: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDAGtElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Lt: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDALtElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Ge: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDAGeqElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Le: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDALeqElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Eq: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDAEqElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Ne: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDANeqElementKernel<src_t, dst_t>(lhs, rhs, dst); }); break; default: break; } } void BinaryEWCUDA(const Tensor& lhs, const Tensor& rhs, Tensor& dst, BinaryEWOpCode op_code) { // It has been checked that // - lhs, rhs, dst are all in the same CUDA device // - lhs, rhs have the same dtype, dst also has the same dtype or is boolean Device src_device = lhs.GetDevice(); Dtype src_dtype = lhs.GetDtype(); Dtype dst_dtype = dst.GetDtype(); CUDADeviceSwitcher switcher(src_device); Indexer indexer({lhs, rhs}, dst, DtypePolicy::ASSERT_SAME_OR_BOOL_OUT); if (s_boolean_binary_ew_op_codes.find(op_code) != s_boolean_binary_ew_op_codes.end()) { DISPATCH_DTYPE_TO_TEMPLATE_WITH_BOOL(src_dtype, [&]() { if (src_dtype == dst_dtype) { // Inplace boolean op's output type is the same as the // input. e.g. np.logical_and(a, b, out=a), where a, b are // floats. LaunchBoolBinaryEWCUDAKernel<scalar_t, scalar_t>( lhs, rhs, dst, op_code, indexer); } else { // By default, output is boolean type. if (dst_dtype != Dtype::Bool) { utility::LogError( "Boolean op's output type must be boolean or the " "same type as the input."); } LaunchBoolBinaryEWCUDAKernel<scalar_t, bool>(lhs, rhs, dst, op_code, indexer); } }); } else { DISPATCH_DTYPE_TO_TEMPLATE(src_dtype, [&]() { switch (op_code) { case BinaryEWOpCode::Add: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDAAddElementKernel<scalar_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Sub: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDASubElementKernel<scalar_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Mul: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDAMulElementKernel<scalar_t>(lhs, rhs, dst); }); break; case BinaryEWOpCode::Div: CUDALauncher::LaunchBinaryEWKernel( indexer, [] OPEN3D_HOST_DEVICE(const void* lhs, void* rhs, void* dst) { CUDADivElementKernel<scalar_t>(lhs, rhs, dst); }); break; default: break; } }); } } } // namespace kernel } // namespace open3d
the_stack
#ifndef _DALI_KERNELS_REDUCE_REDUCE_AXES_GPU_IMPL_CUH #define _DALI_KERNELS_REDUCE_REDUCE_AXES_GPU_IMPL_CUH #include "dali/core/util.h" #include "dali/core/geom/vec.h" #include "dali/kernels/reduce/reductions.h" #include "dali/kernels/reduce/reduce_all_gpu_impl.cuh" #include "dali/kernels/reduce/reduce_common.cuh" #include "dali/kernels/reduce/online_reducer.h" #include "dali/kernels/reduce/reduce_drop_dims.h" namespace dali { namespace kernels { namespace reduce_impl { /** * A preprocessor bank should satisfy the concept: * * ``` * struct PreprocessorBank { * SingleValuePreprocessor Get(const i64vec<non_reduced_dims> &pos) const; * }; * ``` * where SingleValuePreprocessor is a unary functor. * * non_recuced_dims is either 1 or 2; if it's 1, the dimension 0 is outer (wrt reduced dime) * and dimesnion 1 is inner. * If non_reduced_dims is 1, then `pos` is the outer dimension. */ /** * @brief A no-op preprocessor bank, */ template <int non_reduced_ndim> struct IdentityPreprocessor { DALI_HOST_DEV DALI_FORCEINLINE dali::identity Get(const i64vec<non_reduced_ndim> &) const { return {}; } }; /** * @brief A position-independent bank, wrapping a functor */ template <int non_reduced_ndim, typename Functor> struct UniformPreprocessorBank { DALI_HOST_DEV DALI_FORCEINLINE Functor Get(const i64vec<non_reduced_ndim> &) const { return {}; } }; } // namespace reduce_impl /** * @brief This function is used when the reduction is no-op (reduced extent is 1) * * This function will apply preprocessing and postprocessing, but will not do * any actual reduction. * * @param pre_bank preprocessor bank, providing possibly distinct procssing * per output sample * @param post posptprocessing unary functor */ template <typename Out, typename In, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceNone(Out *out, const In *in, int64_t n, PreprocessorBank pre_bank, Postprocessor post) { const int64_t blk_size = blockDim.x * blockDim.y; // no restriction on block size const int64_t grid_stride = static_cast<int64_t>(gridDim.x) * blk_size; const int flat_tid = threadIdx.x + threadIdx.y * blockDim.x; int64_t base_idx = static_cast<int64_t>(blockIdx.x) * blk_size + flat_tid; for (int64_t index = base_idx; index < n; index += grid_stride) { auto pre = pre_bank.Get({index}); out[index] = ConvertSat<Out>(post(pre(in[index]))); } } /** * @brief This kernel is used when the reduction is no-op (reduced extent is 1) * * This function will apply preprocessing and postprocessing, but will not do * any actual reduction. * When the data batch contains only samples where the reduced extent happens to be 1, * this kernel will be used. * * @param pre_bank preprocessor bank, providing possibly distinct procssing * per output sample * @param post posptprocessing unary functor */ template <typename Out, typename In, typename PreprocessorBank = reduce_impl::IdentityPreprocessor<1>, typename Postprocessor = identity> __global__ void ReduceNoneKernel(Out *const *out, const In *const *in, const int64_t *lengths, PreprocessorBank *pre = nullptr, Postprocessor *post = nullptr) { int sample_idx = blockIdx.y; PreprocessorBank pre_bank = pre ? pre[sample_idx] : PreprocessorBank(); Postprocessor postprocessor = post ? post[sample_idx] : Postprocessor(); ReduceNone(out[sample_idx], in[sample_idx], lengths[sample_idx], pre_bank, postprocessor); } /** * @brief This kernel is used for reducing respective elements of tensors in across samples * * Grid and block are 1D and are used for indexing the samples; samples themselves are * iterated over by threads; * * @param pre_bank preprocessor bank, providing possibly distinct procssing * per output coordinate per input sample * @param post posptprocessing unary functor */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank = reduce_impl::IdentityPreprocessor<1>, typename Postprocessor = identity> __global__ void ReduceSamplesKernel(Out *out, const In *const *in, int64_t sample_size, int num_samples, Reduction R = {}, PreprocessorBank *pre = nullptr, Postprocessor post = {}) { OnlineReducer<Acc, Reduction> red; int64_t block_size = blockDim.x; int64_t grid_size = block_size * gridDim.x; for (int64_t ofs = threadIdx.x + blockIdx.x * block_size; ofs < sample_size; ofs += grid_size) { red.reset(); if (pre) { for (int i = 0; i < num_samples; i++) { auto pp = pre[i].Get({ofs}); red.add(pp(in[i][ofs]), R); } } else { auto pp = std::remove_reference_t<decltype(pre->Get({ofs}))>(); for (int i = 0; i < num_samples; i++) { red.add(pp(in[i][ofs]), R); } } out[ofs] = ConvertSat<Out>(post(red.result())); } } /** * @brief This function is used for reducing innermost dimension with small extent. * * The reduction is done in a single pass, with each thread completely reducing * the inner dimension. * * @param pre_bank preprocessor bank, providing possibly distinct procssing * per output sample * @param post posptprocessing unary functor */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceInnerSmall(Out *out, const In *in, int64_t n_outer, int n_inner, Reduction reduce, PreprocessorBank pre_bank, Postprocessor post) { const int64_t blk_size = blockDim.x * blockDim.y; // no restriction on block size const int64_t grid_stride = static_cast<int64_t>(gridDim.x) * blk_size; const int flat_tid = threadIdx.x + threadIdx.y * blockDim.x; int64_t base_idx = static_cast<int64_t>(blockIdx.x) * blk_size + flat_tid; for (int64_t outer = base_idx; outer < n_outer; outer += grid_stride) { auto pre = pre_bank.Get({outer}); const In *base = in + outer * n_inner; OnlineReducer<Acc, Reduction> red; red.reset(); for (int i = 0; i < n_inner; i++) red.add(pre(__ldg(base + i)), reduce); out[outer] = ConvertSat<Out>(post(red.result())); } } /** * @brief This function is used for reducing innermost dimension with medium extent. * * The reduction is done by a warp. * After sequential step, warp reduction is performed. */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceInnerMedium(Out *out, const In *in, int64_t n_outer, int n_inner, Reduction reduce, PreprocessorBank pre_bank, Postprocessor post) { const int64_t grid_stride = static_cast<int64_t>(gridDim.x) * blockDim.y; int64_t base_idx = static_cast<int64_t>(blockIdx.x) * blockDim.y + threadIdx.y; for (int64_t outer = base_idx; outer < n_outer; outer += grid_stride) { auto pre = pre_bank.Get({outer}); const In *base = in + outer * n_inner; OnlineReducer<Acc, Reduction> red; red.reset(); for (int i = threadIdx.x; i < n_inner; i += 32) red.add(pre(__ldg(base + i)), reduce); Acc v = red.result(); WarpReduce(v, reduce); if (threadIdx.x == 0) { out[outer] = ConvertSat<Out>(post(v)); } } } /** * @brief This kernel is used for reducing innermost dimension with large extent. * * The reduction needs at least one more level to complete. Output buffer * will contain n_outer * num_macroblocks elements. * * After this function is used, another level of reduction may be necessary, * depending on the value num_macroblocks. */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceInnerLarge(Out *out, const In *in, int64_t n_outer, int64_t n_inner, int num_macroblocks, int macroblock_size, Reduction reduce, PreprocessorBank pre_bank, Postprocessor post) { const int blk_size = 32*blockDim.y; // block must be warpSize * warpSize for BlockReduce const int total_blocks = n_outer * num_macroblocks; const int flat_tid = threadIdx.x + threadIdx.y * blockDim.x; int outer_shift = __ffs(num_macroblocks) - 1; int inner_mask = num_macroblocks - 1; for (int64_t idx = blockIdx.x; idx < total_blocks; idx += gridDim.x) { int64_t outer = idx >> outer_shift; int64_t inner_macroblock = idx & inner_mask; int64_t inner_start = inner_macroblock * macroblock_size; int64_t inner_end = cuda_min(n_inner, inner_start + macroblock_size); const In *base = &in[outer * n_inner]; Acc val = reduce.template neutral<Acc>(); auto pre = pre_bank.Get({outer}); // reduce macroblock to a block - each thread reduces its own block-strided slice bool first = true; for (int64_t inner = inner_start + flat_tid; inner < inner_end; inner += blk_size) { auto x = pre(__ldg(base + inner)); if (first) { val = x; first = false; } else { reduce(val, x); } } if (idx != blockIdx.x) // not needed in first iteration: __syncthreads(); // make sure that the shared memory used by BlockReduce is ready if (BlockReduce(val, reduce)) out[idx] = ConvertSat<Out>(post(val)); } } template <typename Out, typename In> struct ReduceSampleDesc { Out *out; const In *in; int64_t n_outer; // volume of the outermost (non-reduced) dimensions int64_t n_reduced; // volume of the reduced int64_t n_inner; // volume of the innermost (non-reduced) dimensions /** number of macroblocks in reduced dimension - must be a power of 2 for easier division */ int num_macroblocks; /** * @brief Size, in elements, of the macroblock in reduced dimension - does *not* need to be * aligned on block boundary. */ int macroblock_size; }; template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceInner(const ReduceSampleDesc<Out, In> &sample, Reduction reduce, PreprocessorBank pre_bank, Postprocessor post) { int64_t n_outer = sample.n_outer; int64_t n_reduced = sample.n_reduced; Out *out = sample.out; const In *in = sample.in; if (n_reduced == 1) { ReduceNone(out, in, n_outer, pre_bank, post); } else if (n_reduced < 32 && sample.num_macroblocks == 1) { ReduceInnerSmall<Acc>(out, in, n_outer, n_reduced, reduce, pre_bank, post); } else if (n_reduced < 1024 && sample.num_macroblocks == 1) { ReduceInnerMedium<Acc>(out, in, n_outer, n_reduced, reduce, pre_bank, post); } else { ReduceInnerLarge<Acc>(out, in, n_outer, n_reduced, sample.num_macroblocks, sample.macroblock_size, reduce, pre_bank, post); } } template <typename Acc, typename Out, typename In, typename Reduction = reductions::sum, typename PreprocessorBank = reduce_impl::IdentityPreprocessor<1>, typename Postprocessor = identity> __global__ void ReduceInnerKernel(const ReduceSampleDesc<Out, In> *samples, Reduction reduce = {}, const PreprocessorBank *pre = nullptr, const Postprocessor *post = nullptr) { PreprocessorBank pre_bank = pre ? pre[blockIdx.y] : PreprocessorBank(); Postprocessor postprocessor = post ? post[blockIdx.y] : Postprocessor(); ReduceInner<Acc>(samples[blockIdx.y], reduce, pre_bank, postprocessor); } // Reduction over other dimensions (non-innermost) /** * Each *thread* performs independent, full reduction */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceMiddleSmall(const ReduceSampleDesc<Out, In> &sample, Reduction reduce, PreprocessorBank pre_bank, Postprocessor post) { int64_t n_outer = sample.n_outer; int n_reduced = sample.n_reduced; int64_t n_inner = sample.n_inner; int64_t n_non_reduced = n_inner * n_outer; int64_t outer_stride = n_inner * n_reduced; Out *out = sample.out; const In *in = sample.in; const int64_t blk_size = blockDim.x * blockDim.y; // no restriction on block size const int64_t grid_stride = static_cast<int64_t>(gridDim.x) * blk_size; const int flat_tid = threadIdx.x + threadIdx.y * blockDim.x; int64_t base_idx = static_cast<int64_t>(blockIdx.x) * blk_size + flat_tid; for (int64_t idx = base_idx; idx < n_non_reduced; idx += grid_stride) { int64_t outer, inner; outer = idx / n_inner; inner = idx % n_inner; const In *base = in + outer * outer_stride + inner; auto pre = pre_bank.Get({outer, inner}); OnlineReducer<Acc, Reduction> red; red.reset(); for (int i = 0; i < n_reduced; i++) red.add(pre(__ldg(base + i * n_inner)), reduce); out[idx] = ConvertSat<Out>(post(red.result())); } } namespace reduce_shared { extern __shared__ uint8_t shared_tmp[]; } // namespace reduce_shared /** * @brief Reduces the input by fetching contiguous warps of data per block. * * Each block processes a whole macroblock. Each threads accumulates values * assigned to a single output bin in the inner dimension. * * This function CANNOT work for inner extensts >32. * The macroblock size in reduced dimension is limited - see OnlineReducer for limits. */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceMiddleLargeInnerSmall(const ReduceSampleDesc<Out, In> &sample, Reduction r, PreprocessorBank pre_bank, Postprocessor post) { Acc (*shared_tmp)[33] = reinterpret_cast<Acc (*)[33]>(reduce_shared::shared_tmp); Out *out = sample.out; const In *in = sample.in; const int n_inner = sample.n_inner; const int num_macroblocks = sample.num_macroblocks; const int macroblock_size = sample.macroblock_size; const int64_t macroblock_stride = static_cast<int64_t>(macroblock_size) * n_inner; const int64_t total_macroblocks = sample.n_outer * num_macroblocks; int outer_shift = __ffs(num_macroblocks) - 1; int mblock_mask = num_macroblocks - 1; const int warps = blockDim.y; const int tid = threadIdx.x + 32 * threadIdx.y; const int lane = threadIdx.x; const int warp = threadIdx.y; const int blk_size = blockDim.x * blockDim.y; const int64_t outer_stride = n_inner * sample.n_reduced; // The algorithm: // The macroblocks are traversed sequentially with grid stride. // The reduced and inner dimensions are flattened and then each warp loads contiguous 32 // values. // // Each thread has an accumulator corresponding to one entry in inner dimension - but there // may be multiple threads with bins corresponding to the same entry, e.g. // for n_inner = 5 the bin assignment is: // 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 // The assignmnent is not balanced when n_inner is not a divisor of 32, so there are // 6 bins for 0, 1 and only for 2, 3, and 4. /// // For warp 0 in the block, the bins and offsets in the input are aligned. For subsequent // warps (and iterations within warp), they are not aligned. // warp 1 would see the following indices // 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 // modulo 5 these are // 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 // There are two problems: // - the bins the values should go to are no longer aligned with threads' bins. // - the bins and values cannot be assigned 1:1, because of the imbalance of bin distribution, // as indicated above. // The alignment is realized by shifting the input indices so that the fetched values go // into the appropriate bin. The line idx0 = idx + lane_offset does that. // Since after applying the offset some values are out of range of the warp's coverage, we need // a condition to guard against it (lane + lane_offset < 32). // This condition rejects some values, whereas some of the values in warp's coverage were not // consumed at all - this is handled by the second fetch from idx1. // This ensures that all the values covered by the warp were consumed. // The additional fetch does not hurt the performance significantly, since the values are already // in L1 cache. // // The inner loop has block stride, where each warp accumulates values into its own set of bins. // After the loop, the per-thread values are stored in shared memory and transposed. // Now we can conveniently combine bins gathered by distinct warps and each warp will take care // of a subset of bins and store the final reduced result. // To avoid the expensive division/modulo in the inner loop, we precalculate some values here: const int block_mod = blk_size % n_inner; int lane_offset0 = -((warp * 32) % n_inner); if (lane_offset0 < 0) lane_offset0 += n_inner; const int inner_bin = lane % n_inner; for (int w = warp; w < 32; w += warps) shared_tmp[w][lane] = r.template neutral<Acc>(); // thread's accumulator OnlineReducer<Acc, Reduction> red; for (int64_t macroblock = blockIdx.x; macroblock < total_macroblocks; macroblock += gridDim.x) { if (macroblock != blockIdx.x) __syncthreads(); // calculate the outer/macroblock coordinates int64_t outer = macroblock >> outer_shift; int64_t outer_ofs = outer * outer_stride; int mblock = macroblock & mblock_mask; int64_t macroblock_start = mblock * macroblock_stride; int64_t macroblock_end = cuda_min(macroblock_start + macroblock_stride, outer_stride); red.reset(); int lane_offset = lane_offset0; auto pre = pre_bank.Get(i64vec2(outer, inner_bin)); for (int64_t i = macroblock_start; i < macroblock_end; i += blk_size) { int64_t idx = i + tid; int64_t idx0 = idx + lane_offset; Acc v = lane + lane_offset < 32 && idx0 < macroblock_end ? pre(__ldg(in + outer_ofs + idx0)) : r.template neutral<Acc>(); if (lane + lane_offset >= n_inner && lane < n_inner) { int64_t idx1 = idx0 - n_inner; if (idx1 < macroblock_end) { r(v, pre(__ldg(in + outer_ofs + idx1))); } } red.add(v, r); lane_offset -= block_mod; if (lane_offset < 0) lane_offset += n_inner; } Acc acc = red.result(); // get the final result from the accumulator... shared_tmp[warp][lane] = acc; // ...and store it in shared memory to be transposed __syncthreads(); // Now the roles of warps and lanes are swapped - lanes now correspond to warps' partial // results and warps correspond to bins. for (int inner = warp; inner < n_inner; inner += warps) { // sum the bins corresponding to given position in inner dimension acc = shared_tmp[lane][inner]; for (int j = inner + n_inner; j < 32; j += n_inner) { r(acc, shared_tmp[lane][j]); } // combine warps' partial results WarpReduce(acc, r); if (lane == 0) { out[macroblock * n_inner + inner] = ConvertSat<Out>(post(acc)); } } } } /** * @brief Reduces the input in tiles shaped warp_size x macroblock_size. * * Each block processes a tile warp_size x macroblock_size. Then it jumps * by grid stride until data is exhausted. * * This function is not indented for use with small inner extent. * The macroblock size in reduced dimension is limited - see OnlineReducer for limits. */ template <typename Acc, typename Out, typename In, typename Reduction, typename PreprocessorBank, typename Postprocessor> __device__ void ReduceMiddleLargeInnerMedium(const ReduceSampleDesc<Out, In> &sample, Reduction r, PreprocessorBank pre_bank, Postprocessor post) { Acc (*shared_tmp)[33] = reinterpret_cast<Acc (*)[33]>(reduce_shared::shared_tmp); Out *out = sample.out; const In *in = sample.in; const int n_inner = sample.n_inner; const int64_t n_reduced = sample.n_reduced; const int num_macroblocks = sample.num_macroblocks; const int macroblock_size = sample.macroblock_size; int outer_shift = __ffs(num_macroblocks) - 1; int mblock_mask = num_macroblocks - 1; // Start of inner dimension is warp-aligned. // We need to know how many warps are required to span entire inner extent. int horz_warps = (n_inner + 31) / 32; // Caution: fast approximate division ahead! // // Rationale: grid size is going to be relatively small (up to a few thousand) // and number of horizontal warps is also fairly limited. // The approximation has been exhaustively tested with numerators up to 2^22 and // denominators up to 2^20 and the result is exact. // The values expected here are much smaller. float rcp_horz_warps = __frcp_ru(horz_warps); // round up - otherwise it's no good for // approximating _integer_ division const int total_out_blocks = sample.n_outer * num_macroblocks * horz_warps; const int warps = (blockDim.x * blockDim.y + 31) >> 5; const int tid = threadIdx.x + blockDim.x * threadIdx.y; const int lane = tid & 31; const int warp = tid >> 5; const int64_t outer_stride = n_inner * sample.n_reduced; for (int w = warp; w < 32; w += warps) shared_tmp[w][lane] = r.template neutral<Acc>(); for (int out_block = blockIdx.x; out_block < total_out_blocks; out_block += gridDim.x) { if (out_block != static_cast<int>(blockIdx.x)) __syncthreads(); // Caution: fast approximate division ahead! int macroblock = __float2int_rd(out_block * rcp_horz_warps); // integer division - round down int horz_warp = out_block - macroblock * horz_warps; // modulo int64_t outer = macroblock >> outer_shift; int64_t outer_ofs = outer * outer_stride; int mblock = macroblock & mblock_mask; int64_t macroblock_start = mblock * macroblock_size; int64_t macroblock_end = cuda_min(macroblock_start + macroblock_size, n_reduced); OnlineReducer<Acc, Reduction> red; red.reset(); int inner_base = horz_warp * 32; int inner = inner_base + lane; int64_t base_idx = outer_ofs + inner; Acc v; // Each warp does a full, parallel reduction over a range of inner dimension. if (inner < n_inner) { auto pre = pre_bank.Get({ outer, inner }); for (int64_t i = macroblock_start + warp; i < macroblock_end; i += warps) { int64_t offset = base_idx + i * n_inner; red.add(pre(__ldg(in + offset)), r); } v = red.result(); } else { v = r.template neutral<Acc>(); } shared_tmp[warp][lane] = v; __syncthreads(); for (int i = warp; i < 32 && i + inner_base < n_inner; i += warps) { v = shared_tmp[lane][i]; WarpReduce(v, r); if (lane == 0) { int inner = i + inner_base; out[macroblock * n_inner + inner] = ConvertSat<Out>(post(v)); } } } } template <typename Acc, typename Out, typename In, typename Reduction = reductions::sum, typename PreprocessorBank = reduce_impl::IdentityPreprocessor<2>, typename Postprocessor = identity> __global__ void ReduceMiddleKernel(const ReduceSampleDesc<Out, In> *samples, Reduction reduce = {}, const PreprocessorBank *pre = nullptr, const Postprocessor *post = nullptr) { auto sample = samples[blockIdx.y]; PreprocessorBank pre_bank = pre ? pre[blockIdx.y] : PreprocessorBank(); Postprocessor postprocessor = post ? post[blockIdx.y] : Postprocessor(); if (sample.n_reduced < 1024 && sample.num_macroblocks == 1) { ReduceMiddleSmall<Acc>(sample, reduce, pre_bank, postprocessor); } else if (sample.n_inner < 32) { ReduceMiddleLargeInnerSmall<Acc>(sample, reduce, pre_bank, postprocessor); } else { ReduceMiddleLargeInnerMedium<Acc>(sample, reduce, pre_bank, postprocessor); } } } // namespace kernels } // namespace dali #endif // _DALI_KERNELS_REDUCE_REDUCE_AXES_GPU_IMPL_CUH
the_stack
#include "WarpingSolverParameters.h" #include "WarpingSolverState.h" #include "WarpingSolverUtil.h" #include "WarpingSolverEquations.h" #include <assert.h> #include <stdio.h> #include <stdint.h> #include "CUDATimer.h" #ifdef _WIN32 #include <conio.h> #endif #ifdef _WIN32 #define EXPORT __declspec(dllexport) #else #define EXPORT #endif #define WARP_SIZE 32u #define WARP_MASK (WARP_SIZE-1u) #define DEBUG_PRINT_INFO 0 ///////////////////////////////////////////////////////////////////////// // Eval Residual ///////////////////////////////////////////////////////////////////////// __global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters) { const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; if (x == 0) state.d_sumResidual[0] = 0.0f; } __global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters) { const unsigned int N = input.N; // Number of block variables const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; float residual = 0.0f; if (x < N) { residual = evalFDevice(x, input, state, parameters); } // Must do shuffle in entire warp float r = warpReduce(residual); if ((threadIdx.x & WARP_MASK) == 0) { atomicAdd(state.d_sumResidual, r); } } float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) { float residual = 0.0f; const unsigned int N = input.N; // Number of block variables ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters); cudaSafeCall(cudaDeviceSynchronize()); timer.startEvent("EvalResidual"); EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters); timer.endEvent(); cudaSafeCall(cudaDeviceSynchronize()); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif cudaSafeCall(cudaMemcpy(&residual, &state.d_sumResidual[0], sizeof(float), cudaMemcpyDeviceToHost)); return residual; } // For the naming scheme of the variables see: // http://en.wikipedia.org/wiki/Conjugate_gradient_method // This code is an implementation of their PCG pseudo code __global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) { const unsigned int N = input.N; const int x = blockIdx.x * blockDim.x + threadIdx.x; float d = 0.0f; if (x < N && state.d_mask[x] == 0) { const float4 residuum = evalMinusJTFDevice(x, input, state, parameters); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0 state.d_r[x] = residuum; // store for next iteration const float4 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1 state.d_p[x] = p; d = dot(residuum, p); // x-th term of nomimator for computing alpha and denominator for computing beta } d = warpReduce(d); if ((threadIdx.x & WARP_MASK) == 0) { atomicAdd(state.d_scanAlpha, d); } } __global__ void PCGInit_Kernel2(unsigned int N, SolverState state) { const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N) { state.d_rDotzOld[x] = state.d_scanAlpha[0]; state.d_delta[x] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); } } void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) { const unsigned int N = input.N; const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const int shmem_size = sizeof(float)*THREADS_PER_BLOCK; if (blocksPerGrid > THREADS_PER_BLOCK) { std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl; while (1); } cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float))); timer.startEvent("PCGInit_Kernel1"); PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters); timer.endEvent(); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif timer.startEvent("PCGInit_Kernel2"); PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(N, state); timer.endEvent(); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif } ///////////////////////////////////////////////////////////////////////// // PCG Iteration Parts ///////////////////////////////////////////////////////////////////////// __global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) { const unsigned int N = input.N; // Number of block variables const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; float d = 0.0f; if (x < N && state.d_mask[x] == 0) { const float4 tmp = applyJTJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k state.d_Ap_X[x] = tmp; // store for next kernel call d = dot(state.d_p[x], tmp); // x-th term of denominator of alpha } d = warpReduce(d); if ((threadIdx.x & WARP_MASK) == 0) { atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block } } __global__ void PCGStep_Kernel2(SolverInput input, SolverState state) { const unsigned int N = input.N; const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; const float dotProduct = state.d_scanAlpha[0]; float b = 0.0f; if (x < N && state.d_mask[x] == 0) { float alpha = 0.0f; if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step float4 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum state.d_r[x] = r; // store for next kernel call float4 z = state.d_precondioner[x] * r; // apply preconditioner M^-1 state.d_z[x] = z; // save for next kernel call b = dot(z, r); // compute x-th term of the nominator of beta } b = warpReduce(b); if ((threadIdx.x & WARP_MASK) == 0) { atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block } } __global__ void PCGStep_Kernel3(SolverInput input, SolverState state) { const unsigned int N = input.N; const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N && state.d_mask[x] == 0) { const float rDotzNew = state.d_scanBeta[0]; // get new nominator const float rDotzOld = state.d_rDotzOld[x]; // get old denominator float beta = 0.0f; if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction } } void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) { const unsigned int N = input.N; // Number of block variables // Do PCG step const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const int shmem_size = sizeof(float)*THREADS_PER_BLOCK; if (blocksPerGrid > THREADS_PER_BLOCK) { std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl; while (1); } cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float))); timer.startEvent("PCGStep_Kernel1"); PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters); timer.endEvent(); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float))); timer.startEvent("PCGStep_Kernel2"); PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state); timer.endEvent(); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif timer.startEvent("PCGStep_Kernel3"); PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state); timer.endEvent(); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif } ///////////////////////////////////////////////////////////////////////// // Apply Update ///////////////////////////////////////////////////////////////////////// __global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters) { const unsigned int N = input.N; // Number of block variables const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N && state.d_mask[x] == 0) { state.d_x[x] = state.d_x[x] + state.d_delta[x]; } } void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) { const unsigned int N = input.N; // Number of block variables timer.startEvent("ApplyLinearUpdateDevice"); ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters); timer.endEvent(); cudaSafeCall(cudaDeviceSynchronize()); // Hm #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif } //////////////////////////////////////////////////////////////////// // Main GN Solver Loop //////////////////////////////////////////////////////////////////// extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters) { CUDATimer timer; for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++) { float residual = EvalResidual(input, state, parameters, timer); printf("%i: cost: %f\n", nIter, residual); Initialization(input, state, parameters, timer); for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) { PCGIteration(input, state, parameters, timer); } ApplyLinearUpdate(input, state, parameters, timer); //this should be also done in the last PCGIteration timer.nextIteration(); } float residual = EvalResidual(input, state, parameters, timer); printf("final cost: %f\n", residual); timer.evaluate(); return (double)residual; }
the_stack
__global__ void gcrs_m_1_w_4_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 4; int i,j; long result = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result; } __global__ void gcrs_m_1_w_5_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 5; int i,j; long result = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result; } __global__ void gcrs_m_1_w_6_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 6; int i,j; long result = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result; } __global__ void gcrs_m_1_w_7_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 7; int i,j; long result = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result; } __global__ void gcrs_m_1_w_8_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 8; int i,j; long result = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result; } __global__ void gcrs_m_2_w_4_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 4; int i,j; long result[2]; result[0] = 0; result[1] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; } __global__ void gcrs_m_2_w_5_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 5; int i,j; long result[2]; result[0] = 0; result[1] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; } __global__ void gcrs_m_2_w_6_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 6; int i,j; long result[2]; result[0] = 0; result[1] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; } __global__ void gcrs_m_2_w_7_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 7; int i,j; long result[2]; result[0] = 0; result[1] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; } __global__ void gcrs_m_2_w_8_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 8; int i,j; long result[2]; result[0] = 0; result[1] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; } __global__ void gcrs_m_3_w_4_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 4; int i,j; long result[3]; result[0] = 0; result[1] = 0; result[2] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; } __global__ void gcrs_m_3_w_5_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 5; int i,j; long result[3]; result[0] = 0; result[1] = 0; result[2] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; } __global__ void gcrs_m_3_w_6_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 6; int i,j; long result[3]; result[0] = 0; result[1] = 0; result[2] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; } __global__ void gcrs_m_3_w_7_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 7; int i,j; long result[3]; result[0] = 0; result[1] = 0; result[2] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; } __global__ void gcrs_m_3_w_8_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 8; int i,j; long result[3]; result[0] = 0; result[1] = 0; result[2] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; } __global__ void gcrs_m_4_w_4_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 4; int i,j; long result[4]; result[0] = 0; result[1] = 0; result[2] = 0; result[3] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; out[idx + 3 * size] = result[3]; } __global__ void gcrs_m_4_w_5_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 5; int i,j; long result[4]; result[0] = 0; result[1] = 0; result[2] = 0; result[3] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; out[idx + 3 * size] = result[3]; } __global__ void gcrs_m_4_w_6_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 6; int i,j; long result[4]; result[0] = 0; result[1] = 0; result[2] = 0; result[3] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; out[idx + 3 * size] = result[3]; } __global__ void gcrs_m_4_w_7_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 7; int i,j; long result[4]; result[0] = 0; result[1] = 0; result[2] = 0; result[3] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; out[idx + 3 * size] = result[3]; } __global__ void gcrs_m_4_w_8_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 8; int i,j; long result[4]; result[0] = 0; result[1] = 0; result[2] = 0; result[3] = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int worksize_perblock = blockDim.x / w * w; const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x; if (threadIdx.x >= worksize_perblock) { return; } if (idx >= size) { return; } int group_offset = (threadIdx.x / w) * w; int group_inner_offset = threadIdx.x % w; // row for each thread in the bitmatrix * row size which is k * w unsigned int bitInt = 0x01; unsigned int matrixInt; for ( i = 0; i < k; i++ ) { shared_data[threadIdx.x] = *(in + i*size + idx); __syncthreads(); #pragma unroll for ( j = 0; j < w; j++ ) { matrixInt = bm[index]; result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]); result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]); result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]); result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]); ++index; } __syncthreads(); } out[idx] = result[0]; out[idx + size] = result[1]; out[idx + 2 * size] = result[2]; out[idx + 3 * size] = result[3]; } void m_1_w_4_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_1_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_1_w_5_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_1_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_1_w_6_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_1_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_1_w_7_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_1_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_1_w_8_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_1_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_2_w_4_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_2_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_2_w_5_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_2_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_2_w_6_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_2_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_2_w_7_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_2_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_2_w_8_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_2_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_3_w_4_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_3_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_3_w_5_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_3_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_3_w_6_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_3_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_3_w_7_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_3_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_3_w_8_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_3_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_4_w_4_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_4_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_4_w_5_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_4_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_4_w_6_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_4_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_4_w_7_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_4_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void m_4_w_8_coding(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) { dim3 gridDim(blockDimX, 1, 1); dim3 blockDim(threadDimX, 1, 1); hipLaunchKernelGGL(gcrs_m_4_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0, k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong); } void (*coding_func_array[])(int k, int index, char *dataPtr, char *codeDevPtr, const unsigned int *bitMatrixPtr, int threadDimX,int blockDimX, int workSizePerGridInLong) = { m_1_w_4_coding,m_1_w_5_coding,m_1_w_6_coding,m_1_w_7_coding,m_1_w_8_coding, m_2_w_4_coding,m_2_w_5_coding,m_2_w_6_coding,m_2_w_7_coding,m_2_w_8_coding, m_3_w_4_coding,m_3_w_5_coding,m_3_w_6_coding,m_3_w_7_coding,m_3_w_8_coding, m_4_w_4_coding,m_4_w_5_coding,m_4_w_6_coding,m_4_w_7_coding,m_4_w_8_coding };
the_stack
// Visualizing Errors in Rendered High Dynamic Range Images // Eurographics 2021, // by Pontus Andersson, Jim Nilsson, Peter Shirley, and Tomas Akenine-Moller. // Pointer to the paper: https://research.nvidia.com/publication/2021-05_HDR-FLIP. // FLIP: A Difference Evaluator for Alternating Images // High Performance Graphics 2020, // by Pontus Andersson, Jim Nilsson, Tomas Akenine-Moller, // Magnus Oskarsson, Kalle Astrom, and Mark D. Fairchild. // Pointer to the paper: https://research.nvidia.com/publication/2020-07_FLIP. // Code by Pontus Andersson, Jim Nilsson, and Tomas Akenine-Moller. #pragma once #include <algorithm> #include <cstdlib> #define TINYEXR_IMPLEMENTATION #include "tinyexr.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" namespace FLIP { enum class CombineOperation { Add, Subtract, Multiply, L1, L2 }; struct FLIPConstants { float gqc = 0.7f; float gpc = 0.4f; float gpt = 0.95f; float gw = 0.082f; float gqf = 0.5f; }; } #include "color.cuh" #include "cudaKernels.cuh" namespace FLIP { const float PI = 3.14159265358979f; const dim3 DEFAULT_KERNEL_BLOCK_DIM = { 32, 32, 1 }; // 1.2s enum class CudaTensorState { UNINITIALIZED, ALLOCATED, HOST_ONLY, DEVICE_ONLY, SYNCHRONIZED }; template<typename T = color3> class tensor { private: CudaTensorState mState = CudaTensorState::UNINITIALIZED; protected: int3 mDim; int mArea, mVolume; dim3 mBlockDim, mGridDim; T* mvpHostData; T* mvpDeviceData; protected: bool allocateHost(void) { this->mvpHostData = (T*)malloc(this->mVolume * sizeof(T)); if (this->mvpHostData == nullptr) { return false; } return true; } bool allocateDevice(void) { int deviceVolume = this->mGridDim.x * this->mGridDim.y * this->mGridDim.z * this->mBlockDim.x * this->mBlockDim.y * this->mBlockDim.z; cudaError cudaError = cudaMalloc((void**)&(this->mvpDeviceData), deviceVolume * sizeof(T)); if (cudaError != cudaSuccess) { std::cerr << "cudaMalloc() failed: " << cudaGetErrorString(cudaError) << "\n"; this->~tensor(); return false; } return true; } void init(const int3 dim, bool bClear = false, T clearColor = T(0.0f)) { this->mDim = dim; this->mArea = dim.x * dim.y; this->mVolume = dim.x * dim.y * dim.z; this->mGridDim.x = (this->mDim.x + this->mBlockDim.x - 1) / this->mBlockDim.x; this->mGridDim.y = (this->mDim.y + this->mBlockDim.y - 1) / this->mBlockDim.y; this->mGridDim.z = (this->mDim.z + this->mBlockDim.z - 1) / this->mBlockDim.z; cudaError_t cudaError = cudaSetDevice(0); if (cudaError != cudaSuccess) { std::cerr << "cudaSetDevice() failed: " << cudaGetErrorString(cudaError) << "\n"; this->~tensor(); exit(-1); } allocateDevice(); allocateHost(); this->mState = CudaTensorState::ALLOCATED; if (bClear) { this->clear(clearColor); } } public: tensor(const dim3 blockDim = DEFAULT_KERNEL_BLOCK_DIM) { this->mBlockDim = blockDim; } tensor(const int width, const int height, const int depth, const dim3 blockDim = DEFAULT_KERNEL_BLOCK_DIM) { this->mBlockDim = blockDim; this->init({ width, height, depth }); } tensor(const int width, const int height, const int depth, const T clearColor, const dim3 blockDim = DEFAULT_KERNEL_BLOCK_DIM) { this->mBlockDim = blockDim; this->init({ width, height, depth }, true, clearColor); } tensor(const int3 dim, const T clearColor, const dim3 blockDim = DEFAULT_KERNEL_BLOCK_DIM) { this->mBlockDim = blockDim; this->init(dim, true, clearColor); } tensor(tensor& image, const dim3 blockDim = DEFAULT_KERNEL_BLOCK_DIM) { this->mBlockDim = blockDim; this->init(image.mDim); this->copy(image); } tensor(const color3* pColorMap, int size, const dim3 blockDim = DEFAULT_KERNEL_BLOCK_DIM) { this->mBlockDim = blockDim; this->init({ size, 1, 1 }); cudaError_t cudaError = cudaMemcpy(this->mvpDeviceData, pColorMap, size * sizeof(color3), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { std::cout << "copy() failed: " << cudaGetErrorString(cudaError) << "\n"; exit(-1); } this->mState = CudaTensorState::DEVICE_ONLY; } ~tensor(void) { free(this->mvpHostData); cudaFree(this->mvpDeviceData); } T* getHostData(void) { return this->mvpHostData; } T* getDeviceData(const int z = 0) { return this->mvpDeviceData + z * this->mArea; } inline int index(int x, int y = 0, int z = 0) { return (z * this->mDim.y + y) * mDim.x + x; } T get(int x, int y, int z) { this->synchronizeHost(); return this->mvpHostData[this->index(x, y, z)]; } void set(int x, int y, int z, T value) { this->synchronizeHost(); this->mvpHostData[this->index(x, y, z)] = value; this->mState = CudaTensorState::HOST_ONLY; } inline void setState(CudaTensorState state) { this->mState = state; } int3 getDimensions(void) const { return this->mDim; } int getWidth(void) const { return this->mDim.x; } int getHeight(void) const { return this->mDim.y; } int getDepth(void) const { return this->mDim.z; } void synchronizeHost(void) { if (this->mState == CudaTensorState::DEVICE_ONLY) { cudaError_t cudaError = cudaMemcpy(this->mvpHostData, this->mvpDeviceData, this->mVolume * sizeof(T), cudaMemcpyDeviceToHost); if (cudaError != cudaSuccess) { std::cout << "cudaMemcpy(), DEVICE -> HOST, failed: " << cudaGetErrorString(cudaError) << "\n"; exit(-1); } this->mState = CudaTensorState::SYNCHRONIZED; } } void synchronizeDevice(void) { if (this->mState == CudaTensorState::HOST_ONLY) { cudaError_t cudaError = cudaMemcpy(this->mvpDeviceData, this->mvpHostData, this->mVolume * sizeof(T), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { std::cout << "cudaMemcpy(), HOST -> DEVICE, failed: " << cudaGetErrorString(cudaError) << "\n"; exit(-1); } this->mState = CudaTensorState::SYNCHRONIZED; } } template <FLIP::ReduceOperation op> T reduce(void) { this->synchronizeDevice(); int blockSize = this->mBlockDim.x * this->mBlockDim.y * this->mBlockDim.z; int numBlocks = int(this->mVolume + blockSize - 1) / blockSize; int sharedMemSize = blockSize * sizeof(T); T* pBlockResults; cudaError_t cudaError = cudaMalloc((void**)&pBlockResults, (numBlocks + 1) * sizeof(T)); switch (blockSize) { case 1024: FLIP::kernelReduce<1024, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<1024, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 512: FLIP::kernelReduce<512, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<512, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 256: FLIP::kernelReduce<256, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<256, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 128: FLIP::kernelReduce<128, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<128, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 64: FLIP::kernelReduce<64, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<64, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 32: FLIP::kernelReduce<32, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<32, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 16: FLIP::kernelReduce<16, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<16, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 8: FLIP::kernelReduce<8, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<8, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 4: FLIP::kernelReduce<4, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<4, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 2: FLIP::kernelReduce<2, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<2, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; case 1: FLIP::kernelReduce<1, op> << <numBlocks, blockSize, sharedMemSize >> > (pBlockResults, this->mvpDeviceData, this->mVolume); FLIP::kernelReduce<1, op> << <1, blockSize, sharedMemSize >> > (pBlockResults + numBlocks, pBlockResults, numBlocks); break; } T result; cudaError = cudaMemcpy(&result, pBlockResults + numBlocks, sizeof(T), cudaMemcpyDeviceToHost); cudaFree(pBlockResults); return result; } void colorMap(tensor<float>& srcImage, tensor<color3>& colorMap) { srcImage.synchronizeDevice(); FLIP::kernelColorMap << <this->mGridDim, this->mBlockDim >> > (this->getDeviceData(), srcImage.getDeviceData(), colorMap.getDeviceData(), this->mDim, colorMap.getWidth()); checkStatus("kernelColorMap"); this->mState = CudaTensorState::DEVICE_ONLY; } void sRGB2YCxCz(void) { this->synchronizeDevice(); kernelsRGB2YCxCz << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim); checkStatus("kernelsRGB2YCxCz"); this->mState = CudaTensorState::DEVICE_ONLY; } void YCxCz2CIELab(void) { this->synchronizeDevice(); FLIP::kernelYCxCz2CIELab << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim); checkStatus("kernelYCxCz2CIELab"); this->mState = CudaTensorState::DEVICE_ONLY; } void YCxCz2Gray(tensor<color3>& srcImage) { this->synchronizeDevice(); FLIP::kernelYCxCz2Gray << <this->mGridDim, this->mBlockDim >> > (this->getDeviceData(), srcImage.getDeviceData(), this->mDim); checkStatus("kernelYCxCz2Gray"); this->mState = CudaTensorState::DEVICE_ONLY; } void LinearRGB2sRGB(void) { this->synchronizeDevice(); FLIP::kernelLinearRGB2sRGB << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim); checkStatus("kernelLinearRGB2sRGB"); this->mState = CudaTensorState::DEVICE_ONLY; } void multiplyAndAdd(T m, T a = T(0.0f)) { this->synchronizeDevice(); FLIP::kernelMultiplyAndAdd << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim, m, a); checkStatus("kernelMultiplyAndAdd"); this->mState = CudaTensorState::DEVICE_ONLY; } void normalize(void) { T sum = this->reduce<FLIP::ReduceOperation::Add>(); this->multiplyAndAdd(T(1.0f) / sum); } void checkStatus(std::string kernelName) { cudaError_t cudaError = cudaGetLastError(); if (cudaError != cudaSuccess) { std::cerr << kernelName << "() failed: " << cudaGetErrorString(cudaError) << "\n"; exit(-1); } // used if debugging if (true) { deviceSynchronize(kernelName); } } // used if debugging void deviceSynchronize(std::string kernelName) { cudaError_t cudaError = cudaDeviceSynchronize(); if (cudaError != cudaSuccess) { std::cerr << kernelName << "(): cudeDeviceSynchronize: " << cudaGetErrorString(cudaError) << "\n"; exit(-1); } } void clear(const T color = T(0.0f)) { FLIP::kernelClear << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim, color); checkStatus("kernelClear"); this->mState = CudaTensorState::DEVICE_ONLY; } void convolve(tensor& srcImage, tensor& filter) { srcImage.synchronizeDevice(); filter.synchronizeDevice(); FLIP::kernelConvolve << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, srcImage.mvpDeviceData, filter.mvpDeviceData, this->mDim, filter.mDim.x, filter.mDim.y); checkStatus("kernelConvolve"); this->mState = CudaTensorState::DEVICE_ONLY; } void combine(tensor& srcImageA, tensor& srcImageB, CombineOperation operation) { srcImageA.synchronizeDevice(); srcImageB.synchronizeDevice(); FLIP::kernelCombine << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, srcImageA.mvpDeviceData, srcImageB.mvpDeviceData, this->mDim, operation); checkStatus("kernelCombine"); this->mState = CudaTensorState::DEVICE_ONLY; } void clamp(float low = 0.0f, float high = 1.0f) { this->synchronizeDevice(); FLIP::kernelClamp << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim, low, high); checkStatus("kernelClamp"); this->mState = CudaTensorState::DEVICE_ONLY; } void toneMap(std::string tm) { int toneMapper = 1; if (tm == "reinhard") toneMapper = 0; if (tm == "aces") toneMapper = 1; if (tm == "hable") toneMapper = 2; FLIP::kernelToneMap << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, this->mDim, toneMapper); checkStatus("kernelToneMap"); this->mState = CudaTensorState::DEVICE_ONLY; } void copy(tensor<T>& srcImage) { srcImage.synchronizeDevice(); if (this->mDim.x == srcImage.getWidth() && this->mDim.y == srcImage.getHeight() && this->mDim.z == srcImage.getDepth()) { cudaError_t cudaError = cudaMemcpy(this->mvpDeviceData, srcImage.getDeviceData(), this->mVolume * sizeof(T), cudaMemcpyDeviceToDevice); if (cudaError != cudaSuccess) { std::cout << "copy() failed: " << cudaGetErrorString(cudaError) << "\n"; exit(-1); } } else { kernelBilinearCopy << <this->mGridDim, this->mBlockDim >> > (this->mvpDeviceData, srcImage.getDeviceData(), this->mDim, srcImage.getDimensions()); } this->mState = CudaTensorState::DEVICE_ONLY; } void copyFloat2Color3(tensor<float>& srcImage) { srcImage.synchronizeDevice(); FLIP::kernelFloat2Color3<<<this->mGridDim, this->mBlockDim>>>(this->mvpDeviceData, srcImage.getDeviceData(), this->mDim); checkStatus("kernelFloat2Color3"); this->mState = CudaTensorState::DEVICE_ONLY; } bool load(const std::string& fileName, const int z = 0) { bool bOk = false; std::string extension = fileName.substr(fileName.find_last_of(".") + 1); if (extension == "png") { bOk = this->pngLoad(fileName, z); } else if (extension == "exr") { bOk = this->exrLoad(fileName, z); } return bOk; } bool pngLoad(const std::string& filename, const int z = 0) { int width, height, bpp; unsigned char* pixels = stbi_load(filename.c_str(), &width, &height, &bpp, 3); if (!pixels) { return false; } if (this->mState == CudaTensorState::UNINITIALIZED) { this->init({ width, height, z + 1 }); } for (int y = 0; y < this->mDim.y; y++) { for (int x = 0; x < this->mDim.x; x++) { this->set(x, y, z, color3(&pixels[3 * this->index(x, y)])); } } delete[] pixels; return true; } inline static float fClamp(float value) { return std::max(0.0f, std::min(1.0f, value)); } bool pngSave(const std::string& filename, int z = 0) { unsigned char* pixels = new unsigned char[3 * this->mDim.x * this->mDim.y]; this->synchronizeHost(); for (int y = 0; y < this->mDim.y; y++) { for (int x = 0; x < this->mDim.x; x++) { int index = this->index(x, y); color3 color = this->mvpHostData[this->index(x, y, z)]; pixels[3 * index + 0] = (unsigned char)(255.0f * fClamp(color.x) + 0.5f); pixels[3 * index + 1] = (unsigned char)(255.0f * fClamp(color.y) + 0.5f); pixels[3 * index + 2] = (unsigned char)(255.0f * fClamp(color.z) + 0.5f); } } int ok = stbi_write_png(filename.c_str(), this->mDim.x, this->mDim.y, 3, pixels, 3 * this->mDim.x); delete[] pixels; return (ok != 0); } bool exrLoad(const std::string& fileName, const int z = 0) { EXRVersion exrVersion; EXRImage exrImage; EXRHeader exrHeader; InitEXRHeader(&exrHeader); InitEXRImage(&exrImage); int width, height; { int ret; const char* errorString; ret = ParseEXRVersionFromFile(&exrVersion, fileName.c_str()); if (ret != TINYEXR_SUCCESS || exrVersion.multipart || exrVersion.non_image) { std::cerr << "Unsupported EXR version or type!" << std::endl; return false; } ret = ParseEXRHeaderFromFile(&exrHeader, &exrVersion, fileName.c_str(), &errorString); if (ret != TINYEXR_SUCCESS) { std::cerr << "Error loading EXR header: " << errorString << std::endl; return false; } for (int i = 0; i < exrHeader.num_channels; i++) { if (exrHeader.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exrHeader.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } ret = LoadEXRImageFromFile(&exrImage, &exrHeader, fileName.c_str(), &errorString); if (ret != TINYEXR_SUCCESS) { std::cerr << "Error loading EXR file: " << errorString << std::endl; return false; } } width = exrImage.width; height = exrImage.height; if (this->mState == CudaTensorState::UNINITIALIZED) { this->init({ width, height, z + 1 }); } int idxR = -1; int idxG = -1; int idxB = -1; int numRecognizedChannels = 0; for (int c = 0; c < exrHeader.num_channels; c++) { std::string channelName = exrHeader.channels[c].name; std::transform(channelName.begin(), channelName.end(), channelName.begin(), ::tolower); if (channelName == "r") { idxR = c; ++numRecognizedChannels; } else if (channelName == "g") { idxG = c; ++numRecognizedChannels; } else if (channelName == "b") { idxB = c; ++numRecognizedChannels; } else if (channelName == "a") { ++numRecognizedChannels; } else { std::cerr << "Undefined EXR channel name: " << exrHeader.channels[c].name << std::endl; } } if (numRecognizedChannels < exrHeader.num_channels) { std::cerr << "EXR channels may be loaded in the wrong order." << std::endl; idxR = 0; idxG = 1; idxB = 2; } auto rawImgChn = reinterpret_cast<float**>(exrImage.images); bool loaded = false; // 1 channel images can be loaded into either scalar or vector formats if (exrHeader.num_channels == 1) { for (int y = 0; y < this->mDim.y; y++) { for (int x = 0; x < this->mDim.x; x++) { float color(rawImgChn[0][this->index(x, y)]); this->set(x, y, z, color3(color)); } } loaded = true; } // 2 channel images can only be loaded into vector2/3/4 formats if (exrHeader.num_channels == 2) { assert(idxR != -1 && idxG != -1); for (int y = 0; y < this->mDim.y; y++) { for (int x = 0; x < this->mDim.x; x++) { size_t linearIdx = this->index(x, y); color3 color; color.x = rawImgChn[idxR][linearIdx]; color.y = rawImgChn[idxG][linearIdx]; this->set(x, y, z, color); } } loaded = true; } // 3 channel images can only be loaded into vector3/4 formats if (exrHeader.num_channels == 3) { assert(idxR != -1 && idxG != -1 && idxB != -1); for (int y = 0; y < this->mDim.y; y++) { for (int x = 0; x < this->mDim.x; x++) { size_t linearIdx = this->index(x, y); color3 color; color.x = rawImgChn[idxR][linearIdx]; color.y = rawImgChn[idxG][linearIdx]; color.z = rawImgChn[idxB][linearIdx]; this->set(x, y, z, color); } } loaded = true; } // 4 channel images can only be loaded into vector4 formats if (exrHeader.num_channels == 4) { assert(idxR != -1 && idxG != -1 && idxB != -1); for (int y = 0; y < this->mDim.y; y++) { for (int x = 0; x < this->mDim.x; x++) { size_t linearIdx = this->index(x, y); color3 color; color.x = rawImgChn[idxR][linearIdx]; color.y = rawImgChn[idxG][linearIdx]; color.z = rawImgChn[idxB][linearIdx]; this->set(x, y, z, color); } } loaded = true; } FreeEXRHeader(&exrHeader); FreeEXRImage(&exrImage); if (!loaded) { std::cerr << "Insufficient target channels when loading EXR: need " << exrHeader.num_channels << std::endl; return false; } else { return true; } } }; }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <nppdefs.h> #include "cuda_util.h" #include "mat.h" #include "softmax_cuda.h" #include <iostream> __global__ void gpu_softmax_reduce_find_max_row(const float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { extern __shared__ float sh_buffer[]; //Shared working memory const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; const int index = channel * a_info.cstep + row * a_info.w + column; const int blockWidth = blockDim.x; const int blockHeight = blockDim.y; const int channel_shared_memory_step = blockHeight * blockWidth; float* max_value = sh_buffer; const int max_value_index = channel_shared_memory_step * threadIdx.z + threadIdx.y * blockWidth + threadIdx.x; max_value[max_value_index] = -NPP_MAXABS_32F; if (row >= a_info.h || column >= a_info.w || channel >= a_info.c) { return; } max_value[max_value_index] = a_input[index]; __syncthreads(); for (int i = (blockWidth+1) / 2; i > 0; i /= 2) { if (threadIdx.x < i) { if (max_value[max_value_index+i] > max_value[max_value_index]) { max_value[max_value_index] = max_value[max_value_index+i]; } } __syncthreads(); } if (threadIdx.x == 0) { const int scratchpad_memory_offset_sum = a_info.h * a_info.c; const int scratchpad_channel_memory_step = gridDim.x * a_info.h; const int scracthpad_index = scratchpad_memory_offset_sum + channel * scratchpad_channel_memory_step + row * gridDim.x + blockIdx.x; scratchpad_memory[scracthpad_index] = max_value[max_value_index]; // printf("CHECKPOINT MAX VALUE 1: channel: %d row: %d scratchpad_memory[channel_scratchpad_memory_step+row]: %f step: %d calculated: %f \n", // channel, row, scratchpad_memory[scracthpad_index], scracthpad_index, max_value[max_value_index]); } } __global__ void gpu_softmax_reduce_find_max_row_result( float* scratchpad_memory, dim3 result_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; extern __shared__ float sh_buffer[]; float *final_result = sh_buffer; const int final_result_channel_step = result_size.y * threadIdx.z; final_result[final_result_channel_step + row] = -NPP_MAXABS_32F; if (column >= result_size.x || row >= result_size.y || channel >= result_size.z) return; const int scratchpad_memory_offset_sum = result_size.y * result_size.z; int channel_scratchpad_memory_step = result_size.x * result_size.y; const int scratchpad_max_value_index_step = scratchpad_memory_offset_sum + channel * channel_scratchpad_memory_step + row * result_size.x; for (int i = 0; i < result_size.x; i++) { if (final_result[final_result_channel_step+row] < scratchpad_memory[scratchpad_max_value_index_step + i]) final_result[final_result_channel_step+row] = scratchpad_memory[scratchpad_max_value_index_step + i]; } channel_scratchpad_memory_step = result_size.y * channel; scratchpad_memory[channel_scratchpad_memory_step + row] = final_result[final_result_channel_step + row]; // printf("CHECKPOINT MAX VALUE: channel: %d row: %d scratchpad_memory[channel_scratchpad_memory_step+row]: %f step: %d calculated: %f \n", // channel, row, scratchpad_memory[channel_scratchpad_memory_step+row], channel_scratchpad_memory_step + row, final_result[final_result_channel_step + row]); } __global__ void gpu_softmax_reduce_sum_elements_row(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; //Shared working memory extern __shared__ float sh_buffer[]; float* sum_value = sh_buffer; const int blockWidth = blockDim.x; const int blockHeight = blockDim.y; const int channel_shared_memory_step = blockWidth * blockHeight; const int sum_value_index = channel_shared_memory_step * threadIdx.z + threadIdx.y * blockWidth + threadIdx.x; sum_value[sum_value_index] = 0; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) return; const int scratchpad_channel_memory_step = a_info.h; const float max_value = scratchpad_memory[scratchpad_channel_memory_step * channel + row]; float* ptr = a_input + channel * a_info.cstep + row * a_info.w + column; *ptr = static_cast<float>(exp(*ptr - max_value)); sum_value[sum_value_index] = *ptr; __syncthreads(); for (int i = (blockWidth+1) / 2; i > 0; i /= 2) { if (threadIdx.x < i) { sum_value[sum_value_index] += sum_value[sum_value_index + i]; } __syncthreads(); } if (threadIdx.x == 0) { //TODO check possible overwrite of scratchpad in case of unbalanced block execution const int scratchpad_memory_offset_sum = a_info.h * a_info.c; const int channel_scratchpad_memory_step = gridDim.x * a_info.h; const int scracthpad_index = scratchpad_memory_offset_sum + channel * channel_scratchpad_memory_step + row * gridDim.x + blockIdx.x; scratchpad_memory[scracthpad_index] = sum_value[sum_value_index]; // printf("CHECKPOINT SUM VALUE: channel: %d row: %d scratchpad_memory[scracthpad_index] : %f \n", channel, row, scratchpad_memory[scracthpad_index] ); } } __global__ void gpu_softmax_reduce_sum_elements_row_result( float* scratchpad_memory, const dim3 result_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; extern __shared__ float sh_buffer[]; float *final_result = sh_buffer; const int final_result_channel_offset = result_size.y * threadIdx.z; final_result[final_result_channel_offset + row] = 0; if (column >= result_size.x || row >= result_size.y || channel >= result_size.z) return; const int scratchpad_memory_offset_sum = result_size.y * result_size.z; const int channel_scratchpad_memory_step = result_size.x * result_size.y; const int scratchpad_max_value_index_step = scratchpad_memory_offset_sum + channel_scratchpad_memory_step * channel + row * result_size.x; for (int i = 0; i < result_size.x; i++) { final_result[final_result_channel_offset + row] = final_result[final_result_channel_offset + row] + scratchpad_memory[scratchpad_max_value_index_step + i]; } //TODO check possible overwrite of scratchpad in case of unbalanced block execution const int scratchpad_memory_channel_result_step = result_size.y; scratchpad_memory[scratchpad_memory_channel_result_step * channel + row] = final_result[final_result_channel_offset + row]; // printf("CHECKPOINT SUM VALUE: channel: %d row: %dscratchpad_memory[scratchpad_memory_channel_result_step * channel + row] : %f \n", channel, row, // scratchpad_memory[scratchpad_memory_channel_result_step * channel + row] ); } __global__ void gpu_softmax_divide_elements_row(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int channel = blockIdx.z * blockDim.z + threadIdx.z; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) return; const int channel_offset = a_info.h; const float sum = scratchpad_memory[channel_offset * channel + row]; float* ptr = a_input + channel * a_info.cstep + row * a_info.w + column; *ptr = *ptr / sum; } __global__ void gpu_softmax_reduce_find_max_column(const float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { //Shared working memory extern __shared__ float sh_buffer[]; const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; const int index = channel * a_info.cstep + row * a_info.w + column; const int blockWidth = blockDim.x; const int blockHeight = blockDim.y; const int channel_shared_memory_step = blockDim.x * (blockDim.y); float* max_value = sh_buffer; const int max_column_value_index = channel_shared_memory_step * threadIdx.z + threadIdx.y * blockWidth + threadIdx.x; max_value[max_column_value_index] = -NPP_MAXABS_32F; if (row >= a_info.h || column >= a_info.w || channel >= a_info.c) { return; } max_value[max_column_value_index] = a_input[index]; __syncthreads(); for (int j = (blockHeight + 1) / 2; j > 0; j /= 2) { if (threadIdx.y < j) { if (max_value[max_column_value_index] < max_value[max_column_value_index + j*blockWidth]) { max_value[max_column_value_index] = max_value[max_column_value_index + j*blockWidth]; } } __syncthreads(); } if (threadIdx.y == 0) { const int scratchpad_memory_offset_sum = a_info.w * a_info.c; const int scratchpad_channel_memory_step = gridDim.y * a_info.w; const int scracthpad_index = scratchpad_memory_offset_sum + channel * scratchpad_channel_memory_step + column * gridDim.y + blockIdx.y; scratchpad_memory[scracthpad_index] = max_value[max_column_value_index]; } } __global__ void gpu_softmax_reduce_find_max_column_result( float* scratchpad_memory, dim3 result_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; extern __shared__ float sh_buffer[]; float *final_result = sh_buffer; const int final_result_channel_step = result_size.x * threadIdx.z; final_result[final_result_channel_step + column] = -NPP_MAXABS_32F; if (column >= result_size.x || row >= result_size.y || channel >= result_size.z) return; const int scratchpad_memory_offset_sum = result_size.x * result_size.z; const int scratchpad_channel_memory_step = result_size.x * result_size.y; const int scratchpad_max_value_index_step = scratchpad_memory_offset_sum + channel * scratchpad_channel_memory_step + column * result_size.y; for (int i = 0; i < result_size.y; i++) { if (final_result[final_result_channel_step +column] < scratchpad_memory[scratchpad_max_value_index_step + i]) final_result[final_result_channel_step +column] = scratchpad_memory[scratchpad_max_value_index_step + i]; } const int channel_scratchpad_memory_step = result_size.x * channel; scratchpad_memory[channel_scratchpad_memory_step + column] = final_result[final_result_channel_step + column]; // printf("CHECKPOINT MAX VALUE: channel: %d column: %d scratchpad_memory[channel_scratchpad_memory_step + column] : %f \n", // channel, column, scratchpad_memory[channel_scratchpad_memory_step + column] ); } __global__ void gpu_softmax_reduce_sum_elements_column(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; //Shared working memory extern __shared__ float sh_buffer[]; float* sum_value = sh_buffer; const int blockWidth = blockDim.x; const int blockHeight = blockDim.y; const int channel_shared_memory_step = blockWidth * blockHeight; const int column_value_index = channel_shared_memory_step * threadIdx.z + threadIdx.y * blockWidth + threadIdx.x; sum_value[column_value_index] = 0; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) { return; } const int scratchpad_channel_memory_step = a_info.w; const float max_value = scratchpad_memory[scratchpad_channel_memory_step * channel + column]; float* ptr = a_input + channel * a_info.cstep + row * a_info.w + column; *ptr = static_cast<float>(exp(*ptr - max_value)); sum_value[column_value_index] = *ptr; __syncthreads(); for (int j = (blockHeight + 1) / 2; j > 0; j /= 2) { if (threadIdx.y < j) { sum_value[column_value_index] += sum_value[column_value_index + j * blockWidth]; } __syncthreads(); } if (threadIdx.y == 0) { const int scratchpad_memory_offset_sum = a_info.w * a_info.c; const int channel_scratchpad_memory_step = gridDim.y * a_info.w; const int scracthpad_index = scratchpad_memory_offset_sum + channel * channel_scratchpad_memory_step + column * gridDim.y + blockIdx.y; scratchpad_memory[scracthpad_index] = sum_value[column_value_index]; // printf("CHECKPOINT SUM VALUE: channel: %d column: %d row: %d scratchpad_memory[scracthpad_index] : %f \n", // channel, column, row, scratchpad_memory[scracthpad_index] ); } } __global__ void gpu_softmax_reduce_sum_elements_column_result( float* scratchpad_memory, const dim3 result_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; extern __shared__ float sh_buffer[]; float *final_result = sh_buffer; const int final_result_channel_offset = result_size.x * threadIdx.z; final_result[final_result_channel_offset + column] = 0; if (column >= result_size.x || row >= result_size.y || channel >= result_size.z) return; const int scratchpad_memory_offset_sum = result_size.x * result_size.z; const int channel_scratchpad_memory_step = result_size.x * result_size.y; const int scratchpad_max_value_index_step = scratchpad_memory_offset_sum + channel_scratchpad_memory_step * channel + column * result_size.y; for (int i = 0; i < result_size.y; i++) { final_result[final_result_channel_offset + column] = final_result[final_result_channel_offset + column] + scratchpad_memory[scratchpad_max_value_index_step + i]; } const int scratchpad_memory_channel_result_step = result_size.x; scratchpad_memory[scratchpad_memory_channel_result_step * channel + column] = final_result[final_result_channel_offset + column]; // if (channel == 0) // printf("CHECKPOINT SUM VALUE: channel: %d column: %d scratchpad_memory[scratchpad_memory_channel_result_step * channel + column] : %f \n", // channel, column, scratchpad_memory[scratchpad_memory_channel_result_step * channel + column] ); } __global__ void gpu_softmax_divide_elements_column(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int channel = blockIdx.z * blockDim.z + threadIdx.z; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) return; const int channel_offset = a_info.w; const float sum = scratchpad_memory[channel_offset * channel + column]; float* ptr = a_input + channel * a_info.cstep + row * a_info.w + column; *ptr = *ptr / sum; } __global__ void gpu_softmax_reduce_find_max_channel(const float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { //Shared working memory extern __shared__ float sh_buffer[]; const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; const int max_index = row * a_info.w + column; if (row >= a_info.h || column >= a_info.w || channel >= a_info.c) { return; } scratchpad_memory[max_index] = -NPP_MAXABS_32F; for (int c = 0; c < a_info.c; ++c) { const float* current_ptr = a_input + c * a_info.cstep + row * a_info.w + column; if (scratchpad_memory[max_index] < *current_ptr) scratchpad_memory[max_index] = *current_ptr; } // if (column == 0 && row==0) // printf("CHECKPOINT MAX VALUE: channel: %d scratchpad_memory[index]: %f index: %d \n", // channel, scratchpad_memory[index], index); } __global__ void gpu_softmax_reduce_calculates_elements_channel(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) { return; } const int index = row * a_info.w + column; const float max_value = scratchpad_memory[index]; float* ptr = a_input + channel * a_info.cstep + row * a_info.w + column; *ptr = static_cast<float>(exp(*ptr - max_value)); // if (row == 0 && column == 0) // printf("CHECKPOINT ZERO CALCULATED VALUE: channel: %d scratchpad_memory[index]: %f max: %f index: %d\n", // channel, *ptr, max_value, index); } __global__ void gpu_softmax_reduce_sum_elements_channel(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) { return; } const int index = row * a_info.w + column; scratchpad_memory[index] = 0; for (int c = 0; c < a_info.c; ++c) { float* ptr = a_input + c * a_info.cstep + row * a_info.w + column; scratchpad_memory[index] += *ptr; } // if (channel == 0) // printf("CHECKPOINT SUM VALUE: row: %d column: %d scratchpad_memory[index] : %f \n", // row, column, scratchpad_memory[index]); } __global__ void gpu_softmax_divide_elements_channel(float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { const int channel = blockIdx.z * blockDim.z + threadIdx.z; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column >= a_info.w || row >= a_info.h || channel >= a_info.c) return; const int sum_index = row * a_info.w + column; const int index = channel * a_info.cstep + row * a_info.w + column; const float sum = scratchpad_memory[sum_index]; float* ptr = a_input + index; *ptr = *ptr / sum; } namespace ncnn { int softmax_cuda_forward_inplace(float* a_input, const ncnn::CudaMatInfo& a_info, const int axis, float* gpu_scratchpad_memory, int gpu_scratchpad_memory_size) { int positive_axis = axis < 0 ? a_info.dims + axis : axis; if ((a_info.dims == 1) || (a_info.dims == 2 && positive_axis == 1) || (a_info.dims == 3 && positive_axis == 2)) { checkCudaErrors(cudaMemset(gpu_scratchpad_memory, 0, gpu_scratchpad_memory_size)); int thread_per_block_x = ((a_info.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = a_info.h; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = a_info.c; const int total_number_of_columns = a_info.w; const int total_number_of_rows = a_info.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_softmax_reduce_find_max_row<<<grid_size, block_size, (sizeof(float) * thread_per_block_x * thread_per_block_y * thread_per_block_z)>>> (a_input, a_info, gpu_scratchpad_memory); const dim3 block_size_reduce(1, thread_per_block_y, thread_per_block_z); const dim3 grid_size_reduce(1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); const dim3 result_size(grid_size.x, a_info.h, a_info.c); gpu_softmax_reduce_find_max_row_result<<<grid_size_reduce, block_size_reduce, sizeof(float) * total_number_of_rows * thread_per_block_z * 2>>> (gpu_scratchpad_memory, result_size); gpu_softmax_reduce_sum_elements_row<<<grid_size, block_size, (sizeof(float) * thread_per_block_x * thread_per_block_y * thread_per_block_z)>>> (a_input, a_info, gpu_scratchpad_memory); gpu_softmax_reduce_sum_elements_row_result<<<grid_size_reduce, block_size_reduce, sizeof(float) * total_number_of_rows * thread_per_block_z * 2>>> (gpu_scratchpad_memory, result_size); gpu_softmax_divide_elements_row<<<grid_size, block_size>>>(a_input, a_info, gpu_scratchpad_memory); } else if ((a_info.dims == 2 && positive_axis == 0) || (a_info.dims == 3 && positive_axis == 1)) { checkCudaErrors(cudaMemset(gpu_scratchpad_memory, 0, gpu_scratchpad_memory_size)); int thread_per_block_x = ((a_info.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((a_info.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = a_info.c; const int total_number_of_columns = a_info.w; const int total_number_of_rows = a_info.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_softmax_reduce_find_max_column<<<grid_size, block_size, (sizeof(float) * thread_per_block_x * thread_per_block_y * thread_per_block_z)>>> (a_input, a_info, gpu_scratchpad_memory); const dim3 block_size_reduce(thread_per_block_x, 1, thread_per_block_z); const dim3 grid_size_reduce((total_number_of_columns - 1) / thread_per_block_x + 1, 1, (total_number_of_channels - 1) / thread_per_block_z + 1); const dim3 result_size(a_info.w, grid_size.y, a_info.c); gpu_softmax_reduce_find_max_column_result<<<grid_size_reduce, block_size_reduce, sizeof(float) * total_number_of_columns*thread_per_block_z * 2>>>(gpu_scratchpad_memory, result_size); gpu_softmax_reduce_sum_elements_column<<<grid_size, block_size, (sizeof(float) * thread_per_block_x * thread_per_block_y * thread_per_block_z)>>> (a_input, a_info, gpu_scratchpad_memory); gpu_softmax_reduce_sum_elements_column_result<<<grid_size_reduce, block_size_reduce, sizeof(float) * total_number_of_columns*thread_per_block_z * 2>>> (gpu_scratchpad_memory, result_size); gpu_softmax_divide_elements_column<<<grid_size, block_size>>>(a_input, a_info, gpu_scratchpad_memory); } else if (a_info.dims == 3 && positive_axis == 0) { checkCudaErrors(cudaMemset(gpu_scratchpad_memory, 0, gpu_scratchpad_memory_size)); int thread_per_block_x = ((a_info.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((a_info.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = a_info.c; const int total_number_of_columns = a_info.w; const int total_number_of_rows = a_info.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, 1); gpu_softmax_reduce_find_max_channel<<<grid_size, block_size>>>(a_input, a_info, gpu_scratchpad_memory); const dim3 block_size_calculate(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size_calculate((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, total_number_of_channels); gpu_softmax_reduce_calculates_elements_channel<<<grid_size_calculate, block_size_calculate>>>(a_input, a_info, gpu_scratchpad_memory); const dim3 block_size_sum(thread_per_block_x, thread_per_block_y, 1); const dim3 grid_size_sum((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, 1); gpu_softmax_reduce_sum_elements_channel<<<grid_size_sum, block_size_sum>>>(a_input, a_info, gpu_scratchpad_memory); const dim3 block_size_div(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size_div((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_softmax_divide_elements_channel<<<grid_size_div, block_size_div>>>(a_input, a_info, gpu_scratchpad_memory); } return 0; } }
the_stack
#define TPB52 512 #define TPB50 512 /* ************************ */ __constant__ const uint2 buffer[152] = { {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC33,0xAE18A40B}, {0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC73,0x9E18A40B},{0x98173EC5,0xCAB2076D}, {0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC73,0x9E18A40B},{0x98173F04,0xCAB2076D},{0x749C51D0,0x4903ADFF}, {0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173F04,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF06,0x0D95DE39}, {0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79BD2,0x8FD19341}, {0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CB6,0x9A255629}, {0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7B6,0x5DB62599}, {0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C3FB,0xEABE394C}, {0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B52B,0x991112C7}, {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC3C,0xAE18A40B}, {0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC73,0x9E18A40B},{0x98173ece,0xcab2076d}, {0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC73,0x9E18A40B},{0x98173F04,0xCAB2076D},{0x749C51D9,0x4903ADFF}, {0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173F04,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF0F,0x0D95DE39}, {0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79BDB,0x8FD19341}, {0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CBF,0x9A255629}, {0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7BF,0x5DB62599}, {0x660FCC33,0xAE18A40B},{0x98173ec4,0xcab2076d},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C404,0xEABE394C}, {0x98173ec4,0xcab2076d},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B534,0x991112C7}, {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC45,0xAE18A40B} }; __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPB52, 3) #else __launch_bounds__(TPB50, 3) #endif void quark_skein512_gpu_hash_64(const uint32_t threads,uint64_t* g_hash, const uint32_t* g_nonceVector){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ // Skein uint2 p[8], h[9]; const uint32_t hashPosition = (g_nonceVector == NULL) ? thread : g_nonceVector[thread]; uint64_t *Hash = &g_hash[hashPosition<<3]; uint2x4 *phash = (uint2x4*)Hash; *(uint2x4*)&p[0] = __ldg4(&phash[0]); *(uint2x4*)&p[4] = __ldg4(&phash[1]); h[0] = p[0]; h[1] = p[1]; h[2] = p[2]; h[3] = p[3]; h[4] = p[4]; h[5] = p[5]; h[6] = p[6]; h[7] = p[7]; p[0] += buffer[0]; p[1] += buffer[1]; p[2] += buffer[2]; p[3] += buffer[3]; p[4] += buffer[4]; p[5] += buffer[5]; p[6] += buffer[6]; p[7] += buffer[7]; TFBIGMIX8e(); p[0] += buffer[8]; p[1] += buffer[9]; p[2] += buffer[10]; p[3] += buffer[11]; p[4] += buffer[12]; p[5] += buffer[13]; p[6] += buffer[14]; p[7] += buffer[15]; TFBIGMIX8o(); p[0] += buffer[16]; p[1] += buffer[17]; p[2] += buffer[18]; p[3] += buffer[19]; p[4] += buffer[20]; p[5] += buffer[21]; p[6] += buffer[22]; p[7] += buffer[23]; TFBIGMIX8e(); p[0] += buffer[24]; p[1] += buffer[25]; p[2] += buffer[26]; p[3] += buffer[27]; p[4] += buffer[28]; p[5] += buffer[29]; p[6] += buffer[30]; p[7] += buffer[31]; TFBIGMIX8o(); p[0] += buffer[32]; p[1] += buffer[33]; p[2] += buffer[34]; p[3] += buffer[35]; p[4] += buffer[36]; p[5] += buffer[37]; p[6] += buffer[38]; p[7] += buffer[39]; TFBIGMIX8e(); p[0] += buffer[40]; p[1] += buffer[41]; p[2] += buffer[42]; p[3] += buffer[43]; p[4] += buffer[44]; p[5] += buffer[45]; p[6] += buffer[46]; p[7] += buffer[47]; TFBIGMIX8o(); p[0] += buffer[48]; p[1] += buffer[49]; p[2] += buffer[50]; p[3] += buffer[51]; p[4] += buffer[52]; p[5] += buffer[53]; p[6] += buffer[54]; p[7] += buffer[55]; TFBIGMIX8e(); p[0] += buffer[56]; p[1] += buffer[57]; p[2] += buffer[58]; p[3] += buffer[59]; p[4] += buffer[60]; p[5] += buffer[61]; p[6] += buffer[62]; p[7] += buffer[63]; TFBIGMIX8o(); p[0] += buffer[64]; p[1] += buffer[65]; p[2] += buffer[66]; p[3] += buffer[67]; p[4] += buffer[68]; p[5] += buffer[69]; p[6] += buffer[70]; p[7] += buffer[71]; TFBIGMIX8e(); p[0] += buffer[72]; p[1] += buffer[73]; p[2] += buffer[74]; p[3] += buffer[75]; p[4] += buffer[76]; p[5] += buffer[77]; p[6] += buffer[78]; p[7] += buffer[79]; TFBIGMIX8o(); p[0] += buffer[80]; p[1] += buffer[81]; p[2] += buffer[82]; p[3] += buffer[83]; p[4] += buffer[84]; p[5] += buffer[85]; p[6] += buffer[86]; p[7] += buffer[87]; TFBIGMIX8e(); p[0] += buffer[88]; p[1] += buffer[89]; p[2] += buffer[90]; p[3] += buffer[91]; p[4] += buffer[92]; p[5] += buffer[93]; p[6] += buffer[94]; p[7] += buffer[95]; TFBIGMIX8o(); p[0] += buffer[96]; p[1] += buffer[97]; p[2] += buffer[98]; p[3] += buffer[99]; p[4] += buffer[100]; p[5] += buffer[101]; p[6] += buffer[102]; p[7] += buffer[103]; TFBIGMIX8e(); p[0] += buffer[104]; p[1] += buffer[105]; p[2] += buffer[106]; p[3] += buffer[107]; p[4] += buffer[108]; p[5] += buffer[109]; p[6] += buffer[110]; p[7] += buffer[111]; TFBIGMIX8o(); p[0] += buffer[112]; p[1] += buffer[113]; p[2] += buffer[114]; p[3] += buffer[115]; p[4] += buffer[116]; p[5] += buffer[117]; p[6] += buffer[118]; p[7] += buffer[119]; TFBIGMIX8e(); p[0] += buffer[120]; p[1] += buffer[121]; p[2] += buffer[122]; p[3] += buffer[123]; p[4] += buffer[124]; p[5] += buffer[125]; p[6] += buffer[126]; p[7] += buffer[127]; TFBIGMIX8o(); p[0] += buffer[128]; p[1] += buffer[129]; p[2] += buffer[130]; p[3] += buffer[131]; p[4] += buffer[132]; p[5] += buffer[133]; p[6] += buffer[134]; p[7] += buffer[135]; TFBIGMIX8e(); p[0] += buffer[136]; p[1] += buffer[137]; p[2] += buffer[138]; p[3] += buffer[139]; p[4] += buffer[140]; p[5] += buffer[141]; p[6] += buffer[142]; p[7] += buffer[143]; TFBIGMIX8o(); p[0] += buffer[144]; p[1] += buffer[145]; p[2] += buffer[146]; p[3] += buffer[147]; p[4] += buffer[148]; p[5] += buffer[149]; p[6] += buffer[150]; p[7] += buffer[151]; h[0]^= p[0]; h[1]^= p[1]; h[2]^= p[2]; h[3]^= p[3]; h[4]^= p[4]; h[5]^= p[5]; h[6]^= p[6]; h[7]^= p[7]; h[8] = h[0] ^ h[1] ^ h[2] ^ h[3] ^ h[4] ^ h[5] ^ h[6] ^ h[7] ^ vectorize(0x1BD11BDAA9FC1A22); uint32_t t0; uint2 t1,t2; t0 = 8; t1 = vectorize(0xFF00000000000000); t2 = t1+t0; p[5] = h[5] + 8U; p[0] = h[0] + h[1]; p[1] = ROL2(h[1], 46) ^ p[0]; p[2] = h[2] + h[3]; p[3] = ROL2(h[3], 36) ^ p[2]; p[4] = h[4] + p[5]; p[5] = ROL2(p[5], 19) ^ p[4]; p[6] = (h[6] + h[7] + t1); p[7] = ROL2(h[7], 37) ^ p[6]; p[2]+= p[1]; p[1] = ROL2(p[1], 33) ^ p[2]; p[4]+= p[7]; p[7] = ROL2(p[7], 27) ^ p[4]; p[6]+= p[5]; p[5] = ROL2(p[5], 14) ^ p[6]; p[0]+= p[3]; p[3] = ROL2(p[3], 42) ^ p[0]; p[4]+= p[1]; p[1] = ROL2(p[1], 17) ^ p[4]; p[6]+= p[3]; p[3] = ROL2(p[3], 49) ^ p[6]; p[0]+= p[5]; p[5] = ROL2(p[5], 36) ^ p[0]; p[2]+= p[7]; p[7] = ROL2(p[7], 39) ^ p[2]; p[6]+= p[1]; p[1] = ROL2(p[1], 44) ^ p[6]; p[0]+= p[7]; p[7] = ROL2(p[7], 9) ^ p[0]; p[2]+= p[5]; p[5] = ROL2(p[5], 54) ^ p[2]; p[4]+= p[3]; p[3] = ROR8(p[3]) ^ p[4]; p[0]+= h[1]; p[1]+= h[2]; p[2]+= h[3]; p[3]+= h[4]; p[4]+= h[5]; p[5]+= h[6] + t1; p[6]+= h[7] + t2; p[7]+= h[8] + 1U; TFBIGMIX8o(); p[0]+= h[2]; p[1]+= h[3]; p[2]+= h[4]; p[3]+= h[5]; p[4]+= h[6]; p[5]+= h[7] + t2; p[6]+= h[8] + t0; p[7]+= h[0] + 2U; TFBIGMIX8e(); p[0]+= h[3]; p[1]+= h[4]; p[2]+= h[5]; p[3]+= h[6]; p[4]+= h[7]; p[5]+= h[8] + t0; p[6]+= h[0] + t1; p[7]+= h[1] + 3U; TFBIGMIX8o(); p[0]+= h[4]; p[1]+= h[5]; p[2]+= h[6]; p[3]+= h[7]; p[4]+= h[8]; p[5]+= h[0] + t1; p[6]+= h[1] + t2; p[7]+= h[2] + 4U; TFBIGMIX8e(); p[0]+= h[5]; p[1]+= h[6]; p[2]+= h[7]; p[3]+= h[8]; p[4]+= h[0]; p[5]+= h[1] + t2; p[6]+= h[2] + t0; p[7]+= h[3] + 5U; TFBIGMIX8o(); p[0]+= h[6]; p[1]+= h[7]; p[2]+= h[8]; p[3]+= h[0]; p[4]+= h[1]; p[5]+= h[2] + t0; p[6]+= h[3] + t1; p[7]+= h[4] + 6U; TFBIGMIX8e(); p[0]+= h[7]; p[1]+= h[8]; p[2]+= h[0]; p[3]+= h[1]; p[4]+= h[2]; p[5]+= h[3] + t1; p[6]+= h[4] + t2; p[7]+= h[5] + 7U; TFBIGMIX8o(); p[0]+= h[8]; p[1]+= h[0]; p[2]+= h[1]; p[3]+= h[2]; p[4]+= h[3]; p[5]+= h[4] + t2; p[6]+= h[5] + t0; p[7]+= h[6] + 8U; TFBIGMIX8e(); p[0]+= h[0]; p[1]+= h[1]; p[2]+= h[2]; p[3]+= h[3]; p[4]+= h[4]; p[5]+= h[5] + t0; p[6]+= h[6] + t1; p[7]+= h[7] + 9U; TFBIGMIX8o(); p[0] = p[0] + h[1]; p[1] = p[1] + h[2]; p[2] = p[2] + h[3]; p[3] = p[3] + h[4]; p[4] = p[4] + h[5]; p[5] = p[5] + h[6] + t1; p[6] = p[6] + h[7] + t2;p[7] = p[7] + h[8] + 10U; TFBIGMIX8e(); p[0] = p[0] + h[2]; p[1] = p[1] + h[3]; p[2] = p[2] + h[4]; p[3] = p[3] + h[5]; p[4] = p[4] + h[6]; p[5] = p[5] + h[7] + t2; p[6] = p[6] + h[8] + t0;p[7] = p[7] + h[0] + 11U; TFBIGMIX8o(); p[0] = p[0] + h[3]; p[1] = p[1] + h[4]; p[2] = p[2] + h[5]; p[3] = p[3] + h[6]; p[4] = p[4] + h[7]; p[5] = p[5] + h[8] + t0; p[6] = p[6] + h[0] + t1;p[7] = p[7] + h[1] + 12U; TFBIGMIX8e(); p[0] = p[0] + h[4]; p[1] = p[1] + h[5]; p[2] = p[2] + h[6]; p[3] = p[3] + h[7]; p[4] = p[4] + h[8]; p[5] = p[5] + h[0] + t1; p[6] = p[6] + h[1] + t2;p[7] = p[7] + h[2] + 13U; TFBIGMIX8o(); p[0] = p[0] + h[5]; p[1] = p[1] + h[6]; p[2] = p[2] + h[7]; p[3] = p[3] + h[8]; p[4] = p[4] + h[0]; p[5] = p[5] + h[1] + t2; p[6] = p[6] + h[2] + t0;p[7] = p[7] + h[3] + 14U; TFBIGMIX8e(); p[0] = p[0] + h[6]; p[1] = p[1] + h[7]; p[2] = p[2] + h[8]; p[3] = p[3] + h[0]; p[4] = p[4] + h[1]; p[5] = p[5] + h[2] + t0; p[6] = p[6] + h[3] + t1;p[7] = p[7] + h[4] + 15U; TFBIGMIX8o(); p[0] = p[0] + h[7]; p[1] = p[1] + h[8]; p[2] = p[2] + h[0]; p[3] = p[3] + h[1]; p[4] = p[4] + h[2]; p[5] = p[5] + h[3] + t1; p[6] = p[6] + h[4] + t2;p[7] = p[7] + h[5] + 16U; TFBIGMIX8e(); p[0] = p[0] + h[8]; p[1] = p[1] + h[0]; p[2] = p[2] + h[1]; p[3] = p[3] + h[2]; p[4] = p[4] + h[3]; p[5] = p[5] + h[4] + t2; p[6] = p[6] + h[5] + t0;p[7] = p[7] + h[6] + 17U; TFBIGMIX8o(); p[0] = p[0] + h[0]; p[1] = p[1] + h[1]; p[2] = p[2] + h[2]; p[3] = p[3] + h[3]; p[4] = p[4] + h[4]; p[5] = p[5] + h[5] + t0; p[6] = p[6] + h[6] + t1;p[7] = p[7] + h[7] + 18U; phash = (uint2x4*)p; uint2x4 *outpt = (uint2x4*)Hash; outpt[0] = phash[0]; outpt[1] = phash[1]; } } __host__ void quark_skein512_cpu_hash_64(int thr_id,uint32_t threads, uint32_t *d_nonceVector, uint32_t *d_hash) { uint32_t tpb = TPB52; int dev_id = device_map[thr_id]; if (device_sm[dev_id] <= 500) tpb = TPB50; const dim3 grid((threads + tpb-1)/tpb); const dim3 block(tpb); quark_skein512_gpu_hash_64 << <grid, block >> >(threads, (uint64_t*)d_hash, d_nonceVector); } static __constant__ uint2 c_buffer[120]; // padded message (80 bytes + 72 bytes midstate + align) __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPB52, 3) #else __launch_bounds__(TPB50, 5) #endif void skein512_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint64_t *output64){ uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { // Skein uint2 h[9]; uint2 t0, t1, t2; uint32_t nonce = cuda_swab32(startNounce + thread); uint2 nonce2 = make_uint2(c_buffer[0].x, nonce); uint2 p[8]; p[1] = nonce2; h[0] = c_buffer[ 1]; h[1] = c_buffer[ 2]; h[2] = c_buffer[ 3]; h[3] = c_buffer[ 4]; h[4] = c_buffer[ 5]; h[5] = c_buffer[ 6]; h[6] = c_buffer[ 7]; h[7] = c_buffer[ 8]; h[8] = c_buffer[ 9]; t0 = vectorize(0x50ull); t1 = vectorize(0xB000000000000000ull); t2 = t0^t1; p[ 1]=nonce2 + h[1]; p[ 0]= c_buffer[10] + p[ 1]; p[ 2]=c_buffer[11]; p[ 3]=c_buffer[12]; p[ 4]=c_buffer[13]; p[ 5]=c_buffer[14]; p[ 6]=c_buffer[15]; p[ 7]=c_buffer[16]; // TFBIGMIX8e(); p[1] = ROL2(p[1], 46) ^ p[0]; p[2] += p[1]; p[0] += p[3]; p[1] = ROL2(p[1], 33) ^ p[2]; p[3] = c_buffer[17] ^ p[0]; p[4] += p[1]; p[6] += p[3]; p[0] += p[5]; p[2] += p[7]; p[1] = ROL2(p[1], 17) ^ p[4]; p[3] = ROL2(p[3], 49) ^ p[6]; p[5] = c_buffer[18] ^ p[0]; p[7] = c_buffer[19] ^ p[2]; p[6] += p[1]; p[0] += p[7]; p[2] += p[5]; p[4] += p[3]; p[1] = ROL2(p[1], 44) ^ p[6]; p[7] = ROL2(p[7], 9) ^ p[0]; p[5] = ROL2(p[5], 54) ^ p[2]; p[3] = ROR8(p[3]) ^ p[4]; p[ 0]+=h[1]; p[ 1]+=h[2]; p[ 2]+=h[3]; p[ 3]+=h[4]; p[ 4]+=h[5]; p[ 5]+=c_buffer[20]; p[ 7]+=c_buffer[21]; p[ 6]+=c_buffer[22]; TFBIGMIX8o(); p[ 0]+=h[2]; p[ 1]+=h[3]; p[ 2]+=h[4]; p[ 3]+=h[5]; p[ 4]+=h[6]; p[ 5]+=c_buffer[22]; p[ 7]+=c_buffer[23]; p[ 6]+=c_buffer[24]; TFBIGMIX8e(); p[ 0]+=h[3]; p[ 1]+=h[4]; p[ 2]+=h[5]; p[ 3]+=h[6]; p[ 4]+=h[7]; p[ 5]+=c_buffer[24]; p[ 7]+=c_buffer[25]; p[ 6]+=c_buffer[26]; TFBIGMIX8o(); p[ 0]+=h[4]; p[ 1]+=h[5]; p[ 2]+=h[6]; p[ 3]+=h[7]; p[ 4]+=h[8]; p[ 5]+=c_buffer[26]; p[ 7]+=c_buffer[27]; p[ 6]+=c_buffer[28]; TFBIGMIX8e(); p[ 0]+=h[5]; p[ 1]+=h[6]; p[ 2]+=h[7]; p[ 3]+=h[8]; p[ 4]+=h[0]; p[ 5]+=c_buffer[28]; p[ 7]+=c_buffer[29]; p[ 6]+=c_buffer[30]; TFBIGMIX8o(); p[ 0]+=h[6]; p[ 1]+=h[7]; p[ 2]+=h[8]; p[ 3]+=h[0]; p[ 4]+=h[1]; p[ 5]+=c_buffer[30]; p[ 7]+=c_buffer[31]; p[ 6]+=c_buffer[32]; TFBIGMIX8e(); p[ 0]+=h[7]; p[ 1]+=h[8]; p[ 2]+=h[0]; p[ 3]+=h[1]; p[ 4]+=h[2]; p[ 5]+=c_buffer[32]; p[ 7]+=c_buffer[33]; p[ 6]+=c_buffer[34]; TFBIGMIX8o(); p[ 0]+=h[8]; p[ 1]+=h[0]; p[ 2]+=h[1]; p[ 3]+=h[2]; p[ 4]+=h[3]; p[ 5]+=c_buffer[34]; p[ 7]+=c_buffer[35]; p[ 6]+=c_buffer[36]; TFBIGMIX8e(); p[ 0]+=h[0]; p[ 1]+=h[1]; p[ 2]+=h[2]; p[ 3]+=h[3]; p[ 4]+=h[4]; p[ 5]+=c_buffer[36]; p[ 7]+=c_buffer[37]; p[ 6]+=c_buffer[38]; TFBIGMIX8o(); p[ 0]+=h[1]; p[ 1]+=h[2]; p[ 2]+=h[3]; p[ 3]+=h[4]; p[ 4]+=h[5]; p[ 5]+=c_buffer[38]; p[ 7]+=c_buffer[39]; p[ 6]+=c_buffer[40]; TFBIGMIX8e(); p[ 0]+=h[2]; p[ 1]+=h[3]; p[ 2]+=h[4]; p[ 3]+=h[5]; p[ 4]+=h[6]; p[ 5]+=c_buffer[40]; p[ 7]+=c_buffer[41]; p[ 6]+=c_buffer[42]; TFBIGMIX8o(); p[ 0]+=h[3]; p[ 1]+=h[4]; p[ 2]+=h[5]; p[ 3]+=h[6]; p[ 4]+=h[7]; p[ 5]+=c_buffer[42]; p[ 7]+=c_buffer[43]; p[ 6]+=c_buffer[44]; TFBIGMIX8e(); p[ 0]+=h[4]; p[ 1]+=h[5]; p[ 2]+=h[6]; p[ 3]+=h[7]; p[ 4]+=h[8]; p[ 5]+=c_buffer[44]; p[ 7]+=c_buffer[45]; p[ 6]+=c_buffer[46]; TFBIGMIX8o(); p[ 0]+=h[5]; p[ 1]+=h[6]; p[ 2]+=h[7]; p[ 3]+=h[8]; p[ 4]+=h[0]; p[ 5]+=c_buffer[46]; p[ 7]+=c_buffer[47]; p[ 6]+=c_buffer[48]; TFBIGMIX8e(); p[ 0]+=h[6]; p[ 1]+=h[7]; p[ 2]+=h[8]; p[ 3]+=h[0]; p[ 4]+=h[1]; p[ 5]+=c_buffer[48]; p[ 7]+=c_buffer[49]; p[ 6]+=c_buffer[50]; TFBIGMIX8o(); p[ 0]+=h[7]; p[ 1]+=h[8]; p[ 2]+=h[0]; p[ 3]+=h[1]; p[ 4]+=h[2]; p[ 5]+=c_buffer[50]; p[ 7]+=c_buffer[51]; p[ 6]+=c_buffer[52]; TFBIGMIX8e(); p[ 0]+=h[8]; p[ 1]+=h[0]; p[ 2]+=h[1]; p[ 3]+=h[2]; p[ 4]+=h[3]; p[ 5]+=c_buffer[52]; p[ 7]+=c_buffer[53]; p[ 6]+=c_buffer[54]; TFBIGMIX8o(); p[ 0]+=h[0]; p[ 1]+=h[1]; p[ 2]+=h[2]; p[ 3]+=h[3]; p[ 4]+=h[4]; p[ 5]+=c_buffer[54]; p[ 7]+=c_buffer[55]; p[ 6]+=c_buffer[56]; p[0]^= c_buffer[57]; p[1]^= nonce2; t0 = vectorize(8); // extra t1 = vectorize(0xFF00000000000000ull); // etype t2 = t0^t1; h[0] = p[ 0]; h[1] = p[ 1]; h[2] = p[ 2]; h[3] = p[ 3]; h[4] = p[ 4]; h[5] = p[ 5]; h[6] = p[ 6]; h[7] = p[ 7]; h[8] = h[0] ^ h[1] ^ h[2] ^ h[3] ^ h[4] ^ h[5] ^ h[6] ^ h[7] ^ vectorize(0x1BD11BDAA9FC1A22); p[ 0] = p[ 1] = p[ 2] = p[ 3] = p[ 4] =p[ 5] =p[ 6] = p[ 7] = vectorize(0); #define h0 h[0] #define h1 h[1] #define h2 h[2] #define h3 h[3] #define h4 h[4] #define h5 h[5] #define h6 h[6] #define h7 h[7] #define h8 h[8] TFBIG_4e_UI2(0); TFBIG_4o_UI2(1); TFBIG_4e_UI2(2); TFBIG_4o_UI2(3); TFBIG_4e_UI2(4); TFBIG_4o_UI2(5); TFBIG_4e_UI2(6); TFBIG_4o_UI2(7); TFBIG_4e_UI2(8); TFBIG_4o_UI2(9); TFBIG_4e_UI2(10); TFBIG_4o_UI2(11); TFBIG_4e_UI2(12); TFBIG_4o_UI2(13); TFBIG_4e_UI2(14); TFBIG_4o_UI2(15); TFBIG_4e_UI2(16); TFBIG_4o_UI2(17); TFBIG_ADDKEY_UI2(p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], h, t, 18); uint64_t *outpHash = &output64[thread<<3]; #pragma unroll 8 for (int i = 0; i < 8; i++) outpHash[i] = devectorize(p[i]); } } __host__ void skein512_cpu_setBlock_80(void *pdata) { uint64_t message[20]; memcpy(&message[0], pdata, 80); uint64_t p[8]; uint64_t h[9]; uint64_t t0, t1, t2; h[0] = 0x4903ADFF749C51CEull; h[1] = 0x0D95DE399746DF03ull; h[2] = 0x8FD1934127C79BCEull; h[3] = 0x9A255629FF352CB1ull; h[4] = 0x5DB62599DF6CA7B0ull; h[5] = 0xEABE394CA9D5C3F4ull; h[6] = 0x991112C71A75B523ull; h[7] = 0xAE18A40B660FCC33ull; // h[8] = h[0] ^ h[1] ^ h[2] ^ h[3] ^ h[4] ^ h[5] ^ h[6] ^ h[7] ^ SPH_C64(0x1BD11BDAA9FC1A22); h[8] = 0xcab2076d98173ec4ULL; t0 = 64; // ptr t1 = 0x7000000000000000ull; t2 = 0x7000000000000040ull; memcpy(&p[0], &message[0], 64); TFBIG_4e_PRE(0); TFBIG_4o_PRE(1); TFBIG_4e_PRE(2); TFBIG_4o_PRE(3); TFBIG_4e_PRE(4); TFBIG_4o_PRE(5); TFBIG_4e_PRE(6); TFBIG_4o_PRE(7); TFBIG_4e_PRE(8); TFBIG_4o_PRE(9); TFBIG_4e_PRE(10); TFBIG_4o_PRE(11); TFBIG_4e_PRE(12); TFBIG_4o_PRE(13); TFBIG_4e_PRE(14); TFBIG_4o_PRE(15); TFBIG_4e_PRE(16); TFBIG_4o_PRE(17); TFBIG_ADDKEY_PRE(p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], h, t, 18); message[10] = message[0] ^ p[0]; message[11] = message[1] ^ p[1]; message[12] = message[2] ^ p[2]; message[13] = message[3] ^ p[3]; message[14] = message[4] ^ p[4]; message[15] = message[5] ^ p[5]; message[16] = message[6] ^ p[6]; message[17] = message[7] ^ p[7]; message[18] = t2; uint64_t buffer[128]; // buffer[ 0] = message[ 8]; buffer[ 0] = message[ 9]; h[0] = buffer[ 1] = message[10]; h[1] = buffer[ 2] = message[11]; h[2] = buffer[ 3] = message[12]; h[3] = buffer[ 4] = message[13]; h[4] = buffer[ 5] = message[14]; h[5] = buffer[ 6] = message[15]; h[6] = buffer[ 7] = message[16]; h[7] = buffer[ 8] = message[17]; h[8] = buffer[ 9] = h[0]^h[1]^h[2]^h[3]^h[4]^h[5]^h[6]^h[7]^0x1BD11BDAA9FC1A22ULL; t0 = 0x50ull; t1 = 0xB000000000000000ull; t2 = t0^t1; p[ 0] = message[ 8] + h[0]; p[ 2] = h[2]; p[ 3] = h[3]; p[ 4] = h[4]; p[ 5] = h[5] + t0; p[ 6] = h[6] + t1; p[ 7] = h[7]; p[2] += p[3]; p[4] += p[5]; p[6] += p[7]; p[3] = ROTL64(p[3], 36) ^ p[2]; p[5] = ROTL64(p[5], 19) ^ p[4]; p[7] = ROTL64(p[7], 37) ^ p[6]; p[4] += p[7]; p[6] += p[5]; p[7] = ROTL64(p[7], 27) ^ p[4]; p[5] = ROTL64(p[5], 14) ^ p[6]; buffer[10] = p[ 0]; buffer[11] = p[ 2]; buffer[12] = p[ 3]; buffer[13] = p[ 4]; buffer[14] = p[ 5]; buffer[15] = p[ 6]; buffer[16] = p[ 7]; buffer[17] = ROTL64(p[3], 42); buffer[18] = ROTL64(p[5], 36); buffer[19] = ROTL64(p[7], 39); buffer[20]=h[6]+t1; buffer[21]=h[8]+1; buffer[22]=h[7]+t2; buffer[23]=h[0]+2; buffer[24]=h[8]+t0; buffer[25]=h[1]+3; buffer[26]=h[0]+t1; buffer[27]=h[2]+4; buffer[28]=h[1]+t2; buffer[29]=h[3]+5; buffer[30]=h[2]+t0; buffer[31] = h[4]+6; buffer[32] = h[3]+t1; buffer[33] = h[5]+7; buffer[34] = h[4]+t2; buffer[35] = h[6]+8; buffer[36] = h[5]+t0; buffer[37] = h[7]+9; buffer[38] = h[6]+t1; buffer[39] = h[8]+10; buffer[40] = h[7]+t2; buffer[41] = h[0]+11; buffer[42] = h[8]+t0; buffer[43] = h[1]+12; buffer[44] = h[0]+t1; buffer[45] = h[2]+13; buffer[46] = h[1]+t2; buffer[47] = h[3]+14; buffer[48] = h[2]+t0; buffer[49] = h[4]+15; buffer[50] = h[3]+t1; buffer[51] = h[5]+16; buffer[52] = h[4]+t2; buffer[53] = h[6]+17; buffer[54] = h[5]+t0; buffer[55] = h[7]+18; buffer[56] = h[6]+t1; buffer[57] = message[ 8]; cudaMemcpyToSymbol(c_buffer, buffer, sizeof(buffer), 0, cudaMemcpyHostToDevice); CUDA_SAFE_CALL(cudaGetLastError()); } __host__ void skein512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *g_hash) { uint32_t tpb = TPB52; int dev_id = device_map[thr_id]; if (device_sm[dev_id] <= 500) tpb = TPB50; const dim3 grid((threads + tpb-1)/tpb); const dim3 block(tpb); uint64_t *d_hash = (uint64_t*) g_hash; // hash function is cut in 2 parts to reduce kernel size skein512_gpu_hash_80 <<< grid, block >>> (threads, startNounce, d_hash); }
the_stack
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])])) #define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0]) #define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1]) #define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])])) #define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); } #define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); } // very simple test kernel __global__ void identity(const int *input, int *output, long size) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { output[ix] = input[ix]; } } // very simple test kernel for int array __global__ void intArrayIdentity(const long *input, const int *inputBlob, long *output, int *outputBlob, long size) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { // copy int array long offset = input[ix]; const int *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long capacity = GET_ARRAY_CAPACITY(inArray); const long length = GET_ARRAY_LENGTH(inArray); const int *inArrayBody = GET_ARRAY_BODY(inArray); int *outArray = GET_BLOB_ADDRESS(outputBlob, offset); int *outArrayBody = GET_ARRAY_BODY(outArray); for (long i = 0; i < length; i++) { outArrayBody[i] = inArrayBody[i]; } output[ix] = offset; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } } // very simple test kernel for IntDataPoint class __global__ void IntDataPointIdentity(const long *inputX, const int *inputY, const int *inputBlob, long *outputX, int *outputY, int *outputBlob, long size) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { // copy int array long offset = inputX[ix]; const int *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long capacity = GET_ARRAY_CAPACITY(inArray); const long length = GET_ARRAY_LENGTH(inArray); const int *inArrayBody = GET_ARRAY_BODY(inArray); int *outArray = GET_BLOB_ADDRESS(outputBlob, offset); int *outArrayBody = GET_ARRAY_BODY(outArray); for (long i = 0; i < length; i++) { outArrayBody[i] = inArrayBody[i]; } outputX[ix] = offset; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); // copy int scalar value outputY[ix] = inputY[ix]; } } // very simple test kernel for int array with free var __global__ void intArrayAdd(const long *input, const int *inputBlob, long *output, int *outputBlob, long size, const int *inFreeArray) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { // copy int array long offset = input[ix]; const int *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long capacity = GET_ARRAY_CAPACITY(inArray); const long length = GET_ARRAY_LENGTH(inArray); const int *inArrayBody = GET_ARRAY_BODY(inArray); int *outArray = GET_BLOB_ADDRESS(outputBlob, offset); int *outArrayBody = GET_ARRAY_BODY(outArray); for (long i = 0; i < length; i++) { outArrayBody[i] = inArrayBody[i] + inFreeArray[i]; } output[ix] = offset; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } } // test kernel for multiple input columns __global__ void vectorLength(const double *x, const double *y, double *len, long size) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]); } } // test kernel for multiple input and multiple output columns, with different types __global__ void plusMinus(const double *base, const float *deviation, double *a, float *b, long size) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { a[ix] = base[ix] - deviation[ix]; b[ix] = base[ix] + deviation[ix]; } } // test kernel for two const arguments __global__ void applyLinearFunction(const short *x, short *y, long size, short a, short b) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { y[ix] = a + b * x[ix]; } } // test kernel for custom number of blocks + const argument // manual SIMD, to be ran on size / 8 threads, assumes size % 8 == 0 // note that key is reversed, since it's little endian __global__ void blockXOR(const char *input, char *output, long size, long key) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix * 8 < size) { ((long *)output)[ix] = ((const long *)input)[ix] ^ key; } } // another simple test kernel __global__ void multiplyBy2(int *in, int *out, long size) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < size) { out[ix] = in[ix] * 2; } } // another simple test kernel __global__ void multiplyBy2_self(int *in, int *out, long size) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < size) { out[ix] = in[ix] * 2; in[ix] = out[ix]; } } // test reduce kernel that sums elements __global__ void sum(int *input, int *output, long size, int stage, int totalStages) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; const int jump = 64 * 256; if (stage == 0) { if (ix < size) { assert(jump == blockDim.x * gridDim.x); int result = 0; for (long i = ix; i < size; i += jump) { result += input[i]; } input[ix] = result; } } else if (ix == 0) { const long count = (size < (long)jump) ? size : (long)jump; int result = 0; for (long i = 0; i < count; ++i) { result += input[i]; } output[0] = result; } } // test reduce kernel that sums elements __global__ void intArraySum(const long *input, const int *inputBlob, long *output, int *outputBlob, long size, int stage, int totalStages) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; const int jump = 64 * 256; if (stage == 0) { if (ix < size) { assert(jump == blockDim.x * gridDim.x); const int *accArray = GET_BLOB_ADDRESS(inputBlob, input[ix]); int *accArrayBody = const_cast<int *>GET_ARRAY_BODY(accArray); for (long i = ix + jump; i < size; i += jump) { long offset = input[i]; const int *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long length = GET_ARRAY_LENGTH(inArray); const int *inArrayBody = GET_ARRAY_BODY(inArray); for (long j = 0; j < length; j++) { accArrayBody[j] += inArrayBody[j]; } } } } else if (ix == 0) { const long count = (size < (long)jump) ? size : (long)jump; int *outArray = GET_BLOB_ADDRESS(outputBlob, input[ix]); int *outArrayBody = GET_ARRAY_BODY(outArray); long capacity = 0, length = 0; for (long i = 0; i < count; i++) { const long offset = input[i]; const int *inArray = GET_BLOB_ADDRESS(inputBlob, offset); capacity = GET_ARRAY_CAPACITY(inArray); length = GET_ARRAY_LENGTH(inArray); const int *inArrayBody = GET_ARRAY_BODY(inArray); if (i == 0) { for (long j = 0; j < length; j++) { outArrayBody[j] = 0; } } for (long j = 0; j < length; j++) { outArrayBody[j] += inArrayBody[j]; } } output[ix] = 0; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } } // map for DataPoint class __global__ void DataPointMap(const long *inputX, const int *inputY, const double *inputBlob, long *output, double *outputBlob, long size, const double *inFreeArray) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { // copy int array long offset = inputX[ix]; const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long capacity = GET_ARRAY_CAPACITY(inArray); const long length = GET_ARRAY_LENGTH(inArray); const double *inArrayBody = GET_ARRAY_BODY(inArray); double *outArray = GET_BLOB_ADDRESS(outputBlob, offset); double *outArrayBody = GET_ARRAY_BODY(outArray); for (long i = 0; i < length; i++) { outArrayBody[i] = inArrayBody[i] + inFreeArray[i]; } output[ix] = offset; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } } // reduce for DataPoint class __global__ void DataPointReduce(const long *input, const double *inputBlob, long *output, double *outputBlob, long size, int stage, int totalStages) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; const int jump = 64 * 256; if (stage == 0) { if (ix < size) { assert(jump == blockDim.x * gridDim.x); const double *accArray = GET_BLOB_ADDRESS(inputBlob, input[ix]); double *accArrayBody = const_cast<double *>GET_ARRAY_BODY(accArray); for (long i = ix + jump; i < size; i += jump) { long offset = input[i]; const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long length = GET_ARRAY_LENGTH(inArray); const double *inArrayBody = GET_ARRAY_BODY(inArray); for (long j = 0; j < length; j++) { accArrayBody[j] += inArrayBody[j]; } } } } else if (ix == 0) { const long count = (size < (long)jump) ? size : (long)jump; double *outArray = GET_BLOB_ADDRESS(outputBlob, input[ix]); double *outArrayBody = GET_ARRAY_BODY(outArray); long capacity = 0, length = 0; for (long i = 0; i < count; i++) { const long offset = input[i]; const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset); capacity = GET_ARRAY_CAPACITY(inArray); length = GET_ARRAY_LENGTH(inArray); const double *inArrayBody = GET_ARRAY_BODY(inArray); if (i == 0) { for (long j = 0; j < length; j++) { outArrayBody[j] = 0; } } for (long j = 0; j < length; j++) { outArrayBody[j] += inArrayBody[j]; } } output[ix] = 0; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } } // map for Logistic regression __device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) { double ans = 0.0; for(int i = 0; i < n; i++) { ans += x[i] * y[i]; } return ans; } __device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) { for(int i = 0; i < n; i++) { result[i] = x[i] * c; } } __device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) { dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n); } __global__ void LRMap(const long * __restrict__ inputX, const double * __restrict__ inputY, const double * __restrict__ inputBlob, long *output, double *outputBlob, long size, const double * __restrict__ inputW) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { // copy int array long offset = inputX[ix]; const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset); const long capacity = GET_ARRAY_CAPACITY(inArray); const long length = GET_ARRAY_LENGTH(inArray); const double * inArrayBody = GET_ARRAY_BODY(inArray); double *outArray = GET_BLOB_ADDRESS(outputBlob, offset); double *outArrayBody = GET_ARRAY_BODY(outArray); map(outArrayBody, inArrayBody, inputY[ix], inputW, length); output[ix] = offset; SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } } #define WARPSIZE 32 __device__ inline double atomicAddDouble(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #if (__CUDA_ARCH__ >= 300) __device__ inline double __shfl_double(double d, int lane) { // Split the double number into 2 32b registers. int lo, hi; asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d)); // Shuffle the two 32b registers. lo = __shfl(lo, lane); hi = __shfl(hi, lane); // Recreate the 64b number. asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi)); return d; } __device__ inline double warpReduceSum(double val) { int i = blockIdx.x * blockDim.x + threadIdx.x; #pragma unroll for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) { val += __shfl_double(val, (i + offset) % WARPSIZE); } return val; } __device__ inline double4 __shfl_double4(double4 d, int lane) { // Split the double number into 2 32b registers. int lox, loy, loz, low, hix, hiy, hiz, hiw; asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x)); asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y)); asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z)); asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w)); // Shuffle the two 32b registers. lox = __shfl(lox, lane); hix = __shfl(hix, lane); loy = __shfl(loy, lane); hiy = __shfl(hiy, lane); loz = __shfl(loz, lane); hiz = __shfl(hiz, lane); low = __shfl(low, lane); hiw = __shfl(hiw, lane); // Recreate the 64b number. asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix)); asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy)); asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz)); asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw)); return d; } __device__ inline double4 warpReduceVSum(double4 val4) { int i = blockIdx.x * blockDim.x + threadIdx.x; #pragma unroll for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) { double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE); val4.x += shiftedVal4.x; val4.y += shiftedVal4.y; val4.z += shiftedVal4.z; val4.w += shiftedVal4.w; } return val4; } __device__ double* deviceReduceKernel(const long * __restrict__ input, const double * __restrict__ inputBlob, double *out, long i, long n) { double sum = 0; for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) { const long offset = input[idx]; const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, offset); const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray); sum += inArrayBody[i]; } sum = warpReduceSum(sum); if ((threadIdx.x & (WARPSIZE - 1)) == 0) { atomicAddDouble(out, sum); } return out; } __device__ void deviceReduceArrayKernal(const long * __restrict__ input, const double * __restrict__ inputBlob, double *outputArrayBody, long length, long n) { long i = 0; // unrolled version while ((length - i) >= 4) { double4 sum4; sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0; for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) { const long offset = input[idx]; const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, offset); const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray); sum4.x += inArrayBody[i]; sum4.y += inArrayBody[i+1]; sum4.z += inArrayBody[i+2]; sum4.w += inArrayBody[i+3]; } sum4 = warpReduceVSum(sum4); double *outx = &outputArrayBody[i]; double *outy = &outputArrayBody[i+1]; double *outz = &outputArrayBody[i+2]; double *outw = &outputArrayBody[i+3]; if ((threadIdx.x & (WARPSIZE - 1)) == 0) { atomicAddDouble(outx, sum4.x); atomicAddDouble(outy, sum4.y); atomicAddDouble(outz, sum4.z); atomicAddDouble(outw, sum4.w); } i += 4; } for (; i < length; i++) { deviceReduceKernel(input, inputBlob, &outputArrayBody[i], i, n); } } #endif __global__ void LRReduce(const long * __restrict__ input, const double * __restrict__ inputBlob, long *output, double *outputBlob, long size, int stage, int totalStages) { int idx = blockDim.x * blockIdx.x + threadIdx.x; #if (__CUDA_ARCH__ >= 300) if ((stage == 0) && (idx < size)) { const double * __restrict__ inArray = GET_BLOB_ADDRESS(inputBlob, input[idx]); const long inArrayCapacity = GET_ARRAY_CAPACITY(inArray); const long inArrayLength = GET_ARRAY_LENGTH(inArray); output[0] = 0; double *outArray = GET_BLOB_ADDRESS(outputBlob, output[0]); double *outArrayBody = GET_ARRAY_BODY(outArray); if (idx < inArrayLength) { outArrayBody[idx] = 0; } deviceReduceArrayKernal(input, inputBlob, outArrayBody, inArrayLength, size); SET_ARRAY_CAPACITY(outArray, inArrayCapacity); SET_ARRAY_LENGTH(outArray, inArrayLength); } #else if ((stage == 0) && (idx == 0)) { output[idx] = 0; double *outArray = GET_BLOB_ADDRESS(outputBlob, output[idx]); double *outArrayBody = GET_ARRAY_BODY(outArray); long capacity = 0, length = 0; for (long i = 0; i < size; i++) { long offset = input[i]; const double *inArray = GET_BLOB_ADDRESS(inputBlob, offset); capacity = GET_ARRAY_CAPACITY(inArray); length = GET_ARRAY_LENGTH(inArray); const double * __restrict__ inArrayBody = GET_ARRAY_BODY(inArray); if (i == 0) { for (long j = 0; j < length; j++) { outArrayBody[j] = 0; } } for (long j = 0; j < length; j++) { outArrayBody[j] += inArrayBody[j]; } } SET_ARRAY_CAPACITY(outArray, capacity); SET_ARRAY_LENGTH(outArray, length); } #endif }
the_stack
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: BSD-3-Clause */ // Visualizing and Communicating Errors in Rendered Images // Ray Tracing Gems II, 2021, // by Pontus Andersson, Jim Nilsson, and Tomas Akenine-Moller. // Pointer to the chapter: https://research.nvidia.com/publication/2021-08_Visualizing-and-Communicating. // Visualizing Errors in Rendered High Dynamic Range Images // Eurographics 2021, // by Pontus Andersson, Jim Nilsson, Peter Shirley, and Tomas Akenine-Moller. // Pointer to the paper: https://research.nvidia.com/publication/2021-05_HDR-FLIP. // FLIP: A Difference Evaluator for Alternating Images // High Performance Graphics 2020, // by Pontus Andersson, Jim Nilsson, Tomas Akenine-Moller, // Magnus Oskarsson, Kalle Astrom, and Mark D. Fairchild. // Pointer to the paper: https://research.nvidia.com/publication/2020-07_FLIP. // Code by Pontus Andersson, Jim Nilsson, and Tomas Akenine-Moller. #include "color.cuh" namespace FLIP { static const color3 MapMagma[256] = { {0.001462f, 0.000466f, 0.013866f}, {0.002258f, 0.001295f, 0.018331f}, {0.003279f, 0.002305f, 0.023708f}, {0.004512f, 0.003490f, 0.029965f}, {0.005950f, 0.004843f, 0.037130f}, {0.007588f, 0.006356f, 0.044973f}, {0.009426f, 0.008022f, 0.052844f}, {0.011465f, 0.009828f, 0.060750f}, {0.013708f, 0.011771f, 0.068667f}, {0.016156f, 0.013840f, 0.076603f}, {0.018815f, 0.016026f, 0.084584f}, {0.021692f, 0.018320f, 0.092610f}, {0.024792f, 0.020715f, 0.100676f}, {0.028123f, 0.023201f, 0.108787f}, {0.031696f, 0.025765f, 0.116965f}, {0.035520f, 0.028397f, 0.125209f}, {0.039608f, 0.031090f, 0.133515f}, {0.043830f, 0.033830f, 0.141886f}, {0.048062f, 0.036607f, 0.150327f}, {0.052320f, 0.039407f, 0.158841f}, {0.056615f, 0.042160f, 0.167446f}, {0.060949f, 0.044794f, 0.176129f}, {0.065330f, 0.047318f, 0.184892f}, {0.069764f, 0.049726f, 0.193735f}, {0.074257f, 0.052017f, 0.202660f}, {0.078815f, 0.054184f, 0.211667f}, {0.083446f, 0.056225f, 0.220755f}, {0.088155f, 0.058133f, 0.229922f}, {0.092949f, 0.059904f, 0.239164f}, {0.097833f, 0.061531f, 0.248477f}, {0.102815f, 0.063010f, 0.257854f}, {0.107899f, 0.064335f, 0.267289f}, {0.113094f, 0.065492f, 0.276784f}, {0.118405f, 0.066479f, 0.286321f}, {0.123833f, 0.067295f, 0.295879f}, {0.129380f, 0.067935f, 0.305443f}, {0.135053f, 0.068391f, 0.315000f}, {0.140858f, 0.068654f, 0.324538f}, {0.146785f, 0.068738f, 0.334011f}, {0.152839f, 0.068637f, 0.343404f}, {0.159018f, 0.068354f, 0.352688f}, {0.165308f, 0.067911f, 0.361816f}, {0.171713f, 0.067305f, 0.370771f}, {0.178212f, 0.066576f, 0.379497f}, {0.184801f, 0.065732f, 0.387973f}, {0.191460f, 0.064818f, 0.396152f}, {0.198177f, 0.063862f, 0.404009f}, {0.204935f, 0.062907f, 0.411514f}, {0.211718f, 0.061992f, 0.418647f}, {0.218512f, 0.061158f, 0.425392f}, {0.225302f, 0.060445f, 0.431742f}, {0.232077f, 0.059889f, 0.437695f}, {0.238826f, 0.059517f, 0.443256f}, {0.245543f, 0.059352f, 0.448436f}, {0.252220f, 0.059415f, 0.453248f}, {0.258857f, 0.059706f, 0.457710f}, {0.265447f, 0.060237f, 0.461840f}, {0.271994f, 0.060994f, 0.465660f}, {0.278493f, 0.061978f, 0.469190f}, {0.284951f, 0.063168f, 0.472451f}, {0.291366f, 0.064553f, 0.475462f}, {0.297740f, 0.066117f, 0.478243f}, {0.304081f, 0.067835f, 0.480812f}, {0.310382f, 0.069702f, 0.483186f}, {0.316654f, 0.071690f, 0.485380f}, {0.322899f, 0.073782f, 0.487408f}, {0.329114f, 0.075972f, 0.489287f}, {0.335308f, 0.078236f, 0.491024f}, {0.341482f, 0.080564f, 0.492631f}, {0.347636f, 0.082946f, 0.494121f}, {0.353773f, 0.085373f, 0.495501f}, {0.359898f, 0.087831f, 0.496778f}, {0.366012f, 0.090314f, 0.497960f}, {0.372116f, 0.092816f, 0.499053f}, {0.378211f, 0.095332f, 0.500067f}, {0.384299f, 0.097855f, 0.501002f}, {0.390384f, 0.100379f, 0.501864f}, {0.396467f, 0.102902f, 0.502658f}, {0.402548f, 0.105420f, 0.503386f}, {0.408629f, 0.107930f, 0.504052f}, {0.414709f, 0.110431f, 0.504662f}, {0.420791f, 0.112920f, 0.505215f}, {0.426877f, 0.115395f, 0.505714f}, {0.432967f, 0.117855f, 0.506160f}, {0.439062f, 0.120298f, 0.506555f}, {0.445163f, 0.122724f, 0.506901f}, {0.451271f, 0.125132f, 0.507198f}, {0.457386f, 0.127522f, 0.507448f}, {0.463508f, 0.129893f, 0.507652f}, {0.469640f, 0.132245f, 0.507809f}, {0.475780f, 0.134577f, 0.507921f}, {0.481929f, 0.136891f, 0.507989f}, {0.488088f, 0.139186f, 0.508011f}, {0.494258f, 0.141462f, 0.507988f}, {0.500438f, 0.143719f, 0.507920f}, {0.506629f, 0.145958f, 0.507806f}, {0.512831f, 0.148179f, 0.507648f}, {0.519045f, 0.150383f, 0.507443f}, {0.525270f, 0.152569f, 0.507192f}, {0.531507f, 0.154739f, 0.506895f}, {0.537755f, 0.156894f, 0.506551f}, {0.544015f, 0.159033f, 0.506159f}, {0.550287f, 0.161158f, 0.505719f}, {0.556571f, 0.163269f, 0.505230f}, {0.562866f, 0.165368f, 0.504692f}, {0.569172f, 0.167454f, 0.504105f}, {0.575490f, 0.169530f, 0.503466f}, {0.581819f, 0.171596f, 0.502777f}, {0.588158f, 0.173652f, 0.502035f}, {0.594508f, 0.175701f, 0.501241f}, {0.600868f, 0.177743f, 0.500394f}, {0.607238f, 0.179779f, 0.499492f}, {0.613617f, 0.181811f, 0.498536f}, {0.620005f, 0.183840f, 0.497524f}, {0.626401f, 0.185867f, 0.496456f}, {0.632805f, 0.187893f, 0.495332f}, {0.639216f, 0.189921f, 0.494150f}, {0.645633f, 0.191952f, 0.492910f}, {0.652056f, 0.193986f, 0.491611f}, {0.658483f, 0.196027f, 0.490253f}, {0.664915f, 0.198075f, 0.488836f}, {0.671349f, 0.200133f, 0.487358f}, {0.677786f, 0.202203f, 0.485819f}, {0.684224f, 0.204286f, 0.484219f}, {0.690661f, 0.206384f, 0.482558f}, {0.697098f, 0.208501f, 0.480835f}, {0.703532f, 0.210638f, 0.479049f}, {0.709962f, 0.212797f, 0.477201f}, {0.716387f, 0.214982f, 0.475290f}, {0.722805f, 0.217194f, 0.473316f}, {0.729216f, 0.219437f, 0.471279f}, {0.735616f, 0.221713f, 0.469180f}, {0.742004f, 0.224025f, 0.467018f}, {0.748378f, 0.226377f, 0.464794f}, {0.754737f, 0.228772f, 0.462509f}, {0.761077f, 0.231214f, 0.460162f}, {0.767398f, 0.233705f, 0.457755f}, {0.773695f, 0.236249f, 0.455289f}, {0.779968f, 0.238851f, 0.452765f}, {0.786212f, 0.241514f, 0.450184f}, {0.792427f, 0.244242f, 0.447543f}, {0.798608f, 0.247040f, 0.444848f}, {0.804752f, 0.249911f, 0.442102f}, {0.810855f, 0.252861f, 0.439305f}, {0.816914f, 0.255895f, 0.436461f}, {0.822926f, 0.259016f, 0.433573f}, {0.828886f, 0.262229f, 0.430644f}, {0.834791f, 0.265540f, 0.427671f}, {0.840636f, 0.268953f, 0.424666f}, {0.846416f, 0.272473f, 0.421631f}, {0.852126f, 0.276106f, 0.418573f}, {0.857763f, 0.279857f, 0.415496f}, {0.863320f, 0.283729f, 0.412403f}, {0.868793f, 0.287728f, 0.409303f}, {0.874176f, 0.291859f, 0.406205f}, {0.879464f, 0.296125f, 0.403118f}, {0.884651f, 0.300530f, 0.400047f}, {0.889731f, 0.305079f, 0.397002f}, {0.894700f, 0.309773f, 0.393995f}, {0.899552f, 0.314616f, 0.391037f}, {0.904281f, 0.319610f, 0.388137f}, {0.908884f, 0.324755f, 0.385308f}, {0.913354f, 0.330052f, 0.382563f}, {0.917689f, 0.335500f, 0.379915f}, {0.921884f, 0.341098f, 0.377376f}, {0.925937f, 0.346844f, 0.374959f}, {0.929845f, 0.352734f, 0.372677f}, {0.933606f, 0.358764f, 0.370541f}, {0.937221f, 0.364929f, 0.368567f}, {0.940687f, 0.371224f, 0.366762f}, {0.944006f, 0.377643f, 0.365136f}, {0.947180f, 0.384178f, 0.363701f}, {0.950210f, 0.390820f, 0.362468f}, {0.953099f, 0.397563f, 0.361438f}, {0.955849f, 0.404400f, 0.360619f}, {0.958464f, 0.411324f, 0.360014f}, {0.960949f, 0.418323f, 0.359630f}, {0.963310f, 0.425390f, 0.359469f}, {0.965549f, 0.432519f, 0.359529f}, {0.967671f, 0.439703f, 0.359810f}, {0.969680f, 0.446936f, 0.360311f}, {0.971582f, 0.454210f, 0.361030f}, {0.973381f, 0.461520f, 0.361965f}, {0.975082f, 0.468861f, 0.363111f}, {0.976690f, 0.476226f, 0.364466f}, {0.978210f, 0.483612f, 0.366025f}, {0.979645f, 0.491014f, 0.367783f}, {0.981000f, 0.498428f, 0.369734f}, {0.982279f, 0.505851f, 0.371874f}, {0.983485f, 0.513280f, 0.374198f}, {0.984622f, 0.520713f, 0.376698f}, {0.985693f, 0.528148f, 0.379371f}, {0.986700f, 0.535582f, 0.382210f}, {0.987646f, 0.543015f, 0.385210f}, {0.988533f, 0.550446f, 0.388365f}, {0.989363f, 0.557873f, 0.391671f}, {0.990138f, 0.565296f, 0.395122f}, {0.990871f, 0.572706f, 0.398714f}, {0.991558f, 0.580107f, 0.402441f}, {0.992196f, 0.587502f, 0.406299f}, {0.992785f, 0.594891f, 0.410283f}, {0.993326f, 0.602275f, 0.414390f}, {0.993834f, 0.609644f, 0.418613f}, {0.994309f, 0.616999f, 0.422950f}, {0.994738f, 0.624350f, 0.427397f}, {0.995122f, 0.631696f, 0.431951f}, {0.995480f, 0.639027f, 0.436607f}, {0.995810f, 0.646344f, 0.441361f}, {0.996096f, 0.653659f, 0.446213f}, {0.996341f, 0.660969f, 0.451160f}, {0.996580f, 0.668256f, 0.456192f}, {0.996775f, 0.675541f, 0.461314f}, {0.996925f, 0.682828f, 0.466526f}, {0.997077f, 0.690088f, 0.471811f}, {0.997186f, 0.697349f, 0.477182f}, {0.997254f, 0.704611f, 0.482635f}, {0.997325f, 0.711848f, 0.488154f}, {0.997351f, 0.719089f, 0.493755f}, {0.997351f, 0.726324f, 0.499428f}, {0.997341f, 0.733545f, 0.505167f}, {0.997285f, 0.740772f, 0.510983f}, {0.997228f, 0.747981f, 0.516859f}, {0.997138f, 0.755190f, 0.522806f}, {0.997019f, 0.762398f, 0.528821f}, {0.996898f, 0.769591f, 0.534892f}, {0.996727f, 0.776795f, 0.541039f}, {0.996571f, 0.783977f, 0.547233f}, {0.996369f, 0.791167f, 0.553499f}, {0.996162f, 0.798348f, 0.559820f}, {0.995932f, 0.805527f, 0.566202f}, {0.995680f, 0.812706f, 0.572645f}, {0.995424f, 0.819875f, 0.579140f}, {0.995131f, 0.827052f, 0.585701f}, {0.994851f, 0.834213f, 0.592307f}, {0.994524f, 0.841387f, 0.598983f}, {0.994222f, 0.848540f, 0.605696f}, {0.993866f, 0.855711f, 0.612482f}, {0.993545f, 0.862859f, 0.619299f}, {0.993170f, 0.870024f, 0.626189f}, {0.992831f, 0.877168f, 0.633109f}, {0.992440f, 0.884330f, 0.640099f}, {0.992089f, 0.891470f, 0.647116f}, {0.991688f, 0.898627f, 0.654202f}, {0.991332f, 0.905763f, 0.661309f}, {0.990930f, 0.912915f, 0.668481f}, {0.990570f, 0.920049f, 0.675675f}, {0.990175f, 0.927196f, 0.682926f}, {0.989815f, 0.934329f, 0.690198f}, {0.989434f, 0.941470f, 0.697519f}, {0.989077f, 0.948604f, 0.704863f}, {0.988717f, 0.955742f, 0.712242f}, {0.988367f, 0.962878f, 0.719649f}, {0.988033f, 0.970012f, 0.727077f}, {0.987691f, 0.977154f, 0.734536f}, {0.987387f, 0.984288f, 0.742002f}, {0.987053f, 0.991438f, 0.749504f} }; }
the_stack
#include <cugraph/serialization/serializer.hpp> #include <utilities/graph_utils.cuh> #include <raft/device_atomics.cuh> #include <thrust/copy.h> #include <type_traits> namespace cugraph { namespace serializer { template <typename value_t> void serializer_t::serialize(value_t val) { auto byte_buff_sz = sizeof(value_t); auto it_end = begin_ + byte_buff_sz; raft::update_device( begin_, reinterpret_cast<byte_t const*>(&val), byte_buff_sz, handle_.get_stream()); begin_ = it_end; } template <typename value_t> value_t serializer_t::unserialize(void) { value_t val{}; auto byte_buff_sz = sizeof(value_t); raft::update_host(&val, reinterpret_cast<value_t const*>(cbegin_), 1, handle_.get_stream()); cbegin_ += byte_buff_sz; return val; } template <typename value_t> void serializer_t::serialize(value_t const* p_d_src, size_t size) { auto byte_buff_sz = size * sizeof(value_t); auto it_end = begin_ + byte_buff_sz; byte_t const* byte_buff = reinterpret_cast<byte_t const*>(p_d_src); thrust::copy_n(handle_.get_thrust_policy(), byte_buff, byte_buff_sz, begin_); begin_ = it_end; } template <typename value_t> rmm::device_uvector<value_t> serializer_t::unserialize(size_t size) { auto byte_buff_sz = size * sizeof(value_t); rmm::device_uvector<value_t> d_dest(size, handle_.get_stream()); byte_t* byte_buff = reinterpret_cast<byte_t*>(d_dest.data()); thrust::copy_n(handle_.get_thrust_policy(), cbegin_, byte_buff_sz, byte_buff); cbegin_ += byte_buff_sz; return d_dest; } // serialization of graph metadata, via device orchestration: // template <typename graph_t> void serializer_t::serialize(serializer_t::graph_meta_t<graph_t> const& gmeta) { using vertex_t = typename graph_t::vertex_type; using edge_t = typename graph_t::edge_type; using weight_t = typename graph_t::weight_type; if constexpr (!graph_t::is_multi_gpu) { using bool_t = typename graph_meta_t<graph_t>::bool_ser_t; serialize(gmeta.num_vertices_); serialize(gmeta.num_edges_); serialize(static_cast<bool_t>(gmeta.properties_.is_symmetric)); serialize(static_cast<bool_t>(gmeta.properties_.is_multigraph)); serialize(static_cast<bool_t>(gmeta.is_weighted_)); auto seg_off_sz_bytes = (gmeta.segment_offsets_ ? (*(gmeta.segment_offsets_)).size() : size_t{0}) * sizeof(vertex_t); if (seg_off_sz_bytes > 0) { auto it_end = begin_ + seg_off_sz_bytes; raft::update_device(begin_, reinterpret_cast<byte_t const*>((*(gmeta.segment_offsets_)).data()), seg_off_sz_bytes, handle_.get_stream()); begin_ = it_end; } } else { CUGRAPH_FAIL("Unsupported graph type for serialization."); } } // unserialization of graph metadata, via device orchestration: // template <typename graph_t> serializer_t::graph_meta_t<graph_t> serializer_t::unserialize( size_t graph_meta_sz_bytes, serializer_t::graph_meta_t<graph_t> const& empty_meta) // tag dispatching parameter { using vertex_t = typename graph_t::vertex_type; using edge_t = typename graph_t::edge_type; using weight_t = typename graph_t::weight_type; if constexpr (!graph_t::is_multi_gpu) { using bool_t = typename graph_meta_t<graph_t>::bool_ser_t; CUGRAPH_EXPECTS(graph_meta_sz_bytes >= 2 * sizeof(size_t) + 3 * sizeof(bool_t), "Un/serialization meta size mismatch."); size_t num_vertices = unserialize<size_t>(); size_t num_edges = unserialize<size_t>(); bool_t is_symmetric = unserialize<bool_t>(); bool_t is_multigraph = unserialize<bool_t>(); bool_t is_weighted = unserialize<bool_t>(); graph_properties_t properties{static_cast<bool>(is_symmetric), static_cast<bool>(is_multigraph)}; std::optional<std::vector<vertex_t>> segment_offsets{std::nullopt}; size_t seg_off_sz_bytes = graph_meta_sz_bytes - 2 * sizeof(size_t) - 3 * sizeof(bool_t); if (seg_off_sz_bytes > 0) { segment_offsets = std::vector<vertex_t>(seg_off_sz_bytes / sizeof(vertex_t), vertex_t{0}); raft::update_host((*segment_offsets).data(), reinterpret_cast<vertex_t const*>(cbegin_), seg_off_sz_bytes, handle_.get_stream()); cbegin_ += seg_off_sz_bytes; } return graph_meta_t<graph_t>{ num_vertices, num_edges, properties, static_cast<bool>(is_weighted), segment_offsets}; } else { CUGRAPH_FAIL("Unsupported graph type for unserialization."); return graph_meta_t<graph_t>{}; } } // graph serialization: // metadata argument (gvmeta) can be used for checking / testing; // template <typename graph_t> void serializer_t::serialize(graph_t const& graph, serializer_t::graph_meta_t<graph_t>& gvmeta) { using vertex_t = typename graph_t::vertex_type; using edge_t = typename graph_t::edge_type; using weight_t = typename graph_t::weight_type; if constexpr (!graph_t::is_multi_gpu) { size_t num_vertices = graph.get_number_of_vertices(); size_t num_edges = graph.get_number_of_edges(); auto&& gview = graph.view(); gvmeta = graph_meta_t<graph_t>{graph}; auto offsets = gview.get_matrix_partition_view().get_offsets(); auto indices = gview.get_matrix_partition_view().get_indices(); auto weights = gview.get_matrix_partition_view().get_weights(); // FIXME: remove when host_bcast() becomes available for vectors; // // for now, this must come first, because unserialize() // needs it at the beginning to extract graph metadata // to be able to finish the rest of the graph unserialization; // serialize(gvmeta); serialize(offsets, num_vertices + 1); serialize(indices, num_edges); if (weights) serialize(*weights, num_edges); } else { CUGRAPH_FAIL("Unsupported graph type for serialization."); } } // graph unserialization: // template <typename graph_t> graph_t serializer_t::unserialize(size_t device_sz_bytes, size_t host_sz_bytes) { using vertex_t = typename graph_t::vertex_type; using edge_t = typename graph_t::edge_type; using weight_t = typename graph_t::weight_type; if constexpr (!graph_t::is_multi_gpu) { graph_meta_t<graph_t> empty_meta{}; // tag-dispatching only // FIXME: remove when host_bcast() becomes available for vectors; // // for now, this must come first, because unserialize() // needs it at the beginning to extract graph metadata // to be able to finish the rest of the graph unserialization; // auto gvmeta = unserialize(host_sz_bytes, empty_meta); auto pair_sz = get_device_graph_sz_bytes(gvmeta); CUGRAPH_EXPECTS((pair_sz.first == device_sz_bytes) && (pair_sz.second == host_sz_bytes), "Un/serialization size mismatch."); vertex_t num_vertices = gvmeta.num_vertices_; edge_t num_edges = gvmeta.num_edges_; auto g_props = gvmeta.properties_; auto is_weighted = gvmeta.is_weighted_; auto seg_offsets = gvmeta.segment_offsets_; auto d_offsets = unserialize<edge_t>(num_vertices + 1); auto d_indices = unserialize<vertex_t>(num_edges); return graph_t( handle_, num_vertices, num_edges, g_props, std::move(d_offsets), std::move(d_indices), is_weighted ? std::optional<rmm::device_uvector<weight_t>>{unserialize<weight_t>(num_edges)} : std::nullopt, std::move(seg_offsets)); // RVO-ed } else { CUGRAPH_FAIL("Unsupported graph type for unserialization."); return graph_t{handle_}; } } // Manual template instantiations (EIDir's): // template void serializer_t::serialize(int32_t const* p_d_src, size_t size); template void serializer_t::serialize(int64_t const* p_d_src, size_t size); template void serializer_t::serialize(float const* p_d_src, size_t size); template void serializer_t::serialize(double const* p_d_src, size_t size); template rmm::device_uvector<int32_t> serializer_t::unserialize(size_t size); template rmm::device_uvector<int64_t> serializer_t::unserialize(size_t size); template rmm::device_uvector<float> serializer_t::unserialize(size_t size); template rmm::device_uvector<double> serializer_t::unserialize(size_t size); // serialize graph: // template void serializer_t::serialize( graph_t<int32_t, int32_t, float, false, false> const& graph, serializer_t::graph_meta_t<graph_t<int32_t, int32_t, float, false, false>>&); template void serializer_t::serialize( graph_t<int32_t, int64_t, float, false, false> const& graph, serializer_t::graph_meta_t<graph_t<int32_t, int64_t, float, false, false>>&); template void serializer_t::serialize( graph_t<int64_t, int64_t, float, false, false> const& graph, serializer_t::graph_meta_t<graph_t<int64_t, int64_t, float, false, false>>&); template void serializer_t::serialize( graph_t<int32_t, int32_t, double, false, false> const& graph, serializer_t::graph_meta_t<graph_t<int32_t, int32_t, double, false, false>>&); template void serializer_t::serialize( graph_t<int32_t, int64_t, double, false, false> const& graph, serializer_t::graph_meta_t<graph_t<int32_t, int64_t, double, false, false>>&); template void serializer_t::serialize( graph_t<int64_t, int64_t, double, false, false> const& graph, serializer_t::graph_meta_t<graph_t<int64_t, int64_t, double, false, false>>&); // unserialize graph: // template graph_t<int32_t, int32_t, float, false, false> serializer_t::unserialize(size_t, size_t); template graph_t<int32_t, int64_t, float, false, false> serializer_t::unserialize(size_t, size_t); template graph_t<int64_t, int64_t, float, false, false> serializer_t::unserialize(size_t, size_t); template graph_t<int32_t, int32_t, double, false, false> serializer_t::unserialize(size_t, size_t); template graph_t<int32_t, int64_t, double, false, false> serializer_t::unserialize(size_t, size_t); template graph_t<int64_t, int64_t, double, false, false> serializer_t::unserialize(size_t, size_t); } // namespace serializer } // namespace cugraph
the_stack
#include <nccl.h> #include <memory> #include <utility> namespace oneflow { namespace boxing { namespace collective { namespace { ncclRedOp_t GetNcclReduceOp(ReduceMethod reduce_method) { if (reduce_method == kReduceMethodSum) { return ncclRedOp_t::ncclSum; } else { UNIMPLEMENTED(); return ncclRedOp_t{}; } } std::string GetNcclUniqueIdRpcKey(const std::string& name, int64_t stream_id) { return "CollectiveBoxingExecutorNcclUniqueIdRpcKey-" + name + "-" + std::to_string(stream_id); } struct CopyParams { void* dst; const void* src; int64_t count; }; constexpr int64_t kMultiCopyParamsMaxSize = 128; constexpr int64_t kMultiCopyAlignSize = 32; int64_t GetMultiCopyAlignedSize(int64_t size) { return ((size + kMultiCopyAlignSize - 1) / kMultiCopyAlignSize) * kMultiCopyAlignSize; } struct MultiCopyParams { CopyParams params[kMultiCopyParamsMaxSize]; int64_t count; MultiCopyParams() : count(0), params{} {} void Add(void* dst, const void* src, int64_t count) { CHECK_LT(this->count, kMultiCopyParamsMaxSize); params[this->count].dst = dst; params[this->count].src = src; params[this->count].count = count; this->count += 1; } }; using BulkType = ulonglong2; __global__ void MultiCopyGpu(MultiCopyParams multi_params) { for (int64_t p = 0; p < multi_params.count; ++p) { const CopyParams params = multi_params.params[p]; auto* bulk_dst = reinterpret_cast<BulkType*>(params.dst); const auto* bulk_src = reinterpret_cast<const BulkType*>(params.src); const int64_t bulk_count = params.count / sizeof(BulkType); CUDA_1D_KERNEL_LOOP_T(int64_t, i, bulk_count) { bulk_dst[i] = bulk_src[i]; } const int64_t tail_offset = bulk_count * sizeof(BulkType); auto* tail_dst = reinterpret_cast<char*>(params.dst) + tail_offset; const auto* tail_src = reinterpret_cast<const char*>(params.src) + tail_offset; const int64_t tail_count = params.count - tail_offset; CUDA_1D_KERNEL_LOOP_T(int64_t, i, tail_count) { tail_dst[i] = tail_src[i]; } } } void MultiCopy(cudaStream_t stream, const MultiCopyParams& multi_params) { if (multi_params.count <= 0) { return; } CHECK_LE(multi_params.count, kMultiCopyParamsMaxSize); int64_t max_count = multi_params.params[0].count; for (int64_t i = 0; i < multi_params.count; ++i) { max_count = std::max(max_count, multi_params.params[i].count); } MultiCopyGpu<<<BlocksNum4ThreadsNum(max_count), kCudaThreadsNumPerBlock, 0, stream>>>( multi_params); } class CommRank final { public: OF_DISALLOW_COPY(CommRank); CommRank(int32_t device_id, int32_t global_rank, int32_t global_rank_count, int32_t local_rank, int32_t local_rank_count) : device_id_(device_id), global_rank_(global_rank), local_rank_(local_rank), nccl_comm_(nullptr) {} CommRank(CommRank&& rhs) noexcept { this->device_id_ = rhs.device_id_; this->global_rank_ = rhs.global_rank_; this->local_rank_ = rhs.local_rank_; this->nccl_comm_ = rhs.nccl_comm_; rhs.nccl_comm_ = nullptr; } ~CommRank() { if (nccl_comm_ != nullptr) { CudaCurrentDeviceGuard(device_id_); OF_NCCL_CHECK(ncclCommDestroy(nccl_comm_)); } } int32_t device_id() const { return device_id_; } ncclComm_t nccl_comm() const { return nccl_comm_; } void InitRank(ncclUniqueId unique_id, int32_t global_rank_count) { CudaCurrentDeviceGuard(device_id_); OF_NCCL_CHECK(ncclCommInitRank(&nccl_comm_, global_rank_count, unique_id, global_rank_)); } private: int32_t device_id_; int32_t global_rank_; int32_t local_rank_; ncclComm_t nccl_comm_; }; class CommGroup final { public: OF_DISALLOW_COPY(CommGroup); CommGroup() = default; ~CommGroup() = default; CommGroup(CommGroup&& rhs) noexcept { rank_vec_.swap(rhs.rank_vec_); global_rank_count_ = rhs.global_rank_count_; } void InitGroup(const DeviceSet& device_set, const std::string& unique_name) { CudaCurrentDeviceGuard guard; const int64_t this_machine_id = GlobalProcessCtx::Rank(); global_rank_count_ = device_set.device_size(); std::vector<int32_t> local_ranks; for (int32_t i = 0; i < global_rank_count_; ++i) { if (device_set.device(i).machine_id() == this_machine_id) { local_ranks.push_back(i); } } const int32_t local_rank_count = local_ranks.size(); CHECK_GT(local_rank_count, 0); ncclUniqueId nccl_unique_id{}; if (local_ranks.front() == 0) { OF_NCCL_CHECK(ncclGetUniqueId(&nccl_unique_id)); if (local_rank_count != global_rank_count_) { Global<CtrlClient>::Get()->PushKV(unique_name, NcclUniqueIdToString(nccl_unique_id)); } } else { Global<CtrlClient>::Get()->PullKV(unique_name, [&nccl_unique_id](const std::string& val) { NcclUniqueIdFromString(val, &nccl_unique_id); }); } rank_vec_.reserve(local_rank_count); OF_NCCL_CHECK(ncclGroupStart()); for (int32_t local_rank = 0; local_rank < local_ranks.size(); ++local_rank) { const int32_t global_rank = local_ranks.at(local_rank); const int32_t device_id = device_set.device(global_rank).device_id(); OF_CUDA_CHECK(cudaSetDevice(device_id)); rank_vec_.emplace_back(device_id, global_rank, global_rank_count_, local_rank, local_rank_count); rank_vec_.at(local_rank).InitRank(nccl_unique_id, global_rank_count_); } OF_NCCL_CHECK(ncclGroupEnd()); } int32_t global_rank_count() const { return global_rank_count_; } int32_t local_rank_count() const { return rank_vec_.size(); } const CommRank& GetCommRank(int32_t local_rank) const { return rank_vec_.at(local_rank); } private: std::vector<CommRank> rank_vec_; int32_t global_rank_count_ = 0; }; class StreamCtx { public: OF_DISALLOW_COPY(StreamCtx); StreamCtx(int32_t device_id, size_t fusion_buffer_size) : device_id_(device_id), fusion_buffer_size_(fusion_buffer_size) { CudaCurrentDeviceGuard guard(device_id_); int priority; OF_CUDA_CHECK(cudaDeviceGetStreamPriorityRange(nullptr, &priority)); OF_CUDA_CHECK(cudaStreamCreateWithPriority(&stream_, cudaStreamNonBlocking, priority)); OF_CUDA_CHECK(cudaMalloc(&fusion_buffer_, fusion_buffer_size_)); cb_event_poller_ = std::thread(&StreamCtx::PollEvent, this); } ~StreamCtx() { cb_event_chan_.Close(); cb_event_poller_.join(); CudaCurrentDeviceGuard guard(device_id_); OF_CUDA_CHECK(cudaStreamSynchronize(stream_)); OF_CUDA_CHECK(cudaStreamDestroy(stream_)); OF_CUDA_CHECK(cudaFree(fusion_buffer_)); } void PollEvent() { while (true) { std::pair<cudaEvent_t, std::function<void()>> cb_event; ChannelStatus status = cb_event_chan_.Receive(&cb_event); if (status == kChannelStatusErrorClosed) { break; } CHECK_EQ(status, kChannelStatusSuccess); OF_CUDA_CHECK(cudaEventSynchronize(cb_event.first)); cb_event.second(); OF_CUDA_CHECK(cudaEventDestroy(cb_event.first)); } } void AddCallback(const std::function<void()>& callback) { cudaEvent_t event; OF_CUDA_CHECK(cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); OF_CUDA_CHECK(cudaEventRecord(event, stream_)); CHECK_EQ(cb_event_chan_.Send(std::make_pair(event, callback)), kChannelStatusSuccess); } int32_t device_id() const { return device_id_; } cudaStream_t stream() const { return stream_; } size_t fusion_buffer_size() const { return fusion_buffer_size_; } char* fusion_buffer() const { return fusion_buffer_; } private: int32_t device_id_; cudaStream_t stream_ = nullptr; size_t fusion_buffer_size_; char* fusion_buffer_ = nullptr; Channel<std::pair<cudaEvent_t, std::function<void()>>> cb_event_chan_; std::thread cb_event_poller_; }; void LaunchFusedAllReduce(const CommGroup& comm_group, const std::vector<std::unique_ptr<StreamCtx>>& device_id2stream_ctx, const std::shared_ptr<RequestStore>& request_store, const std::vector<RequestId>& request_ids) { CHECK_LE(request_ids.size(), kMultiCopyParamsMaxSize); RequestEntry* first_request_entry = request_store->MutRequestEntry(request_ids.front()); const ncclDataType_t nccl_data_type = GetNcclDataType(first_request_entry->desc().op_desc().data_type()); const ncclRedOp_t nccl_reduce_op = GetNcclReduceOp(first_request_entry->desc().op_desc().reduce_method()); const int64_t size_of_data_type = GetSizeOfDataType(first_request_entry->desc().op_desc().data_type()); std::vector<int64_t> offset_vec; offset_vec.reserve(request_ids.size()); int64_t offset = 0; request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { offset_vec.emplace_back(offset); offset += GetMultiCopyAlignedSize(request_entry->size_in_bytes()); }); const int64_t elem_cnt = offset / size_of_data_type; for (int32_t local_rank = 0; local_rank < comm_group.local_rank_count(); ++local_rank) { MultiCopyParams copy_in_params; const CommRank& comm_rank = comm_group.GetCommRank(local_rank); const StreamCtx* stream_ctx = device_id2stream_ctx.at(comm_rank.device_id()).get(); CHECK_LE(offset, stream_ctx->fusion_buffer_size()); request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { copy_in_params.Add(stream_ctx->fusion_buffer() + offset_vec.at(i), request_entry->GetRuntimeRequest(local_rank)->send_buff, request_entry->size_in_bytes()); }); OF_CUDA_CHECK(cudaSetDevice(comm_rank.device_id())); MultiCopy(stream_ctx->stream(), copy_in_params); } OF_NCCL_CHECK(ncclGroupStart()); for (int32_t local_rank = 0; local_rank < comm_group.local_rank_count(); ++local_rank) { const CommRank& comm_rank = comm_group.GetCommRank(local_rank); const StreamCtx* stream_ctx = device_id2stream_ctx.at(comm_rank.device_id()).get(); OF_CUDA_CHECK(cudaSetDevice(comm_rank.device_id())); OF_NCCL_CHECK(ncclAllReduce(stream_ctx->fusion_buffer(), stream_ctx->fusion_buffer(), elem_cnt, nccl_data_type, nccl_reduce_op, comm_rank.nccl_comm(), stream_ctx->stream())); } OF_NCCL_CHECK(ncclGroupEnd()); for (int32_t local_rank = 0; local_rank < comm_group.local_rank_count(); ++local_rank) { MultiCopyParams copy_out_params; const CommRank& comm_rank = comm_group.GetCommRank(local_rank); const StreamCtx* stream_ctx = device_id2stream_ctx.at(comm_rank.device_id()).get(); request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { copy_out_params.Add(request_entry->GetRuntimeRequest(local_rank)->recv_buff, stream_ctx->fusion_buffer() + offset_vec.at(i), request_entry->size_in_bytes()); }); OF_CUDA_CHECK(cudaSetDevice(comm_rank.device_id())); MultiCopy(stream_ctx->stream(), copy_out_params); } } void LaunchAggregatedOps(const CommGroup& comm_group, const std::vector<std::unique_ptr<StreamCtx>>& device_id2stream_ctx, const std::shared_ptr<RequestStore>& request_store, const std::vector<RequestId>& request_ids) { OF_NCCL_CHECK(ncclGroupStart()); for (int32_t local_rank = 0; local_rank < comm_group.local_rank_count(); ++local_rank) { const CommRank& comm_rank = comm_group.GetCommRank(local_rank); const auto comm = comm_rank.nccl_comm(); const StreamCtx* stream_ctx = device_id2stream_ctx.at(comm_rank.device_id()).get(); OF_CUDA_CHECK(cudaSetDevice(comm_rank.device_id())); request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { const auto& op_desc = request_entry->desc().op_desc(); const std::shared_ptr<const RuntimeRequestInfo>& runtime_request_info = request_entry->GetRuntimeRequest(local_rank); const OpType op_type = op_desc.op_type(); const void* send_buff = runtime_request_info->send_buff; void* recv_buff = runtime_request_info->recv_buff; const int64_t elem_cnt = request_entry->elem_cnt(); const ncclDataType_t nccl_data_type = GetNcclDataType(op_desc.data_type()); const int32_t num_ranks = comm_group.global_rank_count(); if (op_type == OpType::kOpTypeAllReduce) { OF_NCCL_CHECK(ncclAllReduce(send_buff, recv_buff, elem_cnt, nccl_data_type, GetNcclReduceOp(op_desc.reduce_method()), comm, stream_ctx->stream())); } else if (op_type == OpType::kOpTypeAllGather) { CHECK_EQ(elem_cnt % num_ranks, 0); OF_NCCL_CHECK(ncclAllGather(send_buff, recv_buff, elem_cnt / num_ranks, nccl_data_type, comm, stream_ctx->stream())); } else if (op_type == OpType::kOpTypeReduceScatter) { CHECK_EQ(elem_cnt % num_ranks, 0); OF_NCCL_CHECK(ncclReduceScatter( send_buff, recv_buff, elem_cnt / num_ranks, nccl_data_type, GetNcclReduceOp(op_desc.reduce_method()), comm, stream_ctx->stream())); } else if (op_type == OpType::kOpTypeReduce) { OF_NCCL_CHECK(ncclReduce(send_buff, recv_buff, elem_cnt, nccl_data_type, GetNcclReduceOp(op_desc.reduce_method()), op_desc.root(), comm, stream_ctx->stream())); } else if (op_type == OpType::kOpTypeBroadcast) { OF_NCCL_CHECK(ncclBroadcast(send_buff, recv_buff, elem_cnt, nccl_data_type, op_desc.root(), comm, stream_ctx->stream())); } else if (op_type == OpType::kOpTypeAll2All) { #if NCCL_VERSION_CODE > 2700 const int64_t elem_per_rank = elem_cnt / num_ranks; const int64_t elem_per_chunk = elem_per_rank / num_ranks; const int64_t dtype_size = GetSizeOfDataType(op_desc.data_type()); const int64_t chunk_size = elem_per_chunk * dtype_size; for (int64_t j = 0; j < num_ranks; ++j) { OF_NCCL_CHECK(ncclSend(reinterpret_cast<const void*>( reinterpret_cast<const char*>(send_buff) + j * chunk_size), elem_per_chunk, nccl_data_type, j, comm, stream_ctx->stream())); OF_NCCL_CHECK(ncclRecv( reinterpret_cast<void*>(reinterpret_cast<char*>(recv_buff) + j * chunk_size), elem_per_chunk, nccl_data_type, j, comm, stream_ctx->stream())); } #else UNIMPLEMENTED(); #endif } else { UNIMPLEMENTED(); } }); } OF_NCCL_CHECK(ncclGroupEnd()); } void AddCallbackAndResetRuntimeRequest( const CommGroup& comm_group, const std::vector<std::unique_ptr<StreamCtx>>& device_id2stream_ctx, const std::shared_ptr<RequestStore>& request_store, const std::vector<RequestId>& request_ids) { std::vector<std::vector<std::shared_ptr<const RuntimeRequestInfo>>> saved_runtime_request_info( request_ids.size()); request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { saved_runtime_request_info.at(i) = std::move(request_entry->ResetRuntimeRequest()); }); for (int32_t local_rank = 0; local_rank < comm_group.local_rank_count(); ++local_rank) { const CommRank& comm_rank = comm_group.GetCommRank(local_rank); StreamCtx* stream_ctx = device_id2stream_ctx.at(comm_rank.device_id()).get(); auto runtime_request_info_vec = std::make_shared<std::vector<std::shared_ptr<const RuntimeRequestInfo>>>(); runtime_request_info_vec->reserve(request_ids.size()); request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { runtime_request_info_vec->emplace_back( std::move(saved_runtime_request_info.at(i).at(local_rank))); }); OF_CUDA_CHECK(cudaSetDevice(comm_rank.device_id())); stream_ctx->AddCallback([runtime_request_info_vec]() { for (auto& runtime_request_info : *runtime_request_info_vec) { runtime_request_info->callback(Maybe<void>::Ok()); } }); } } } // namespace struct NcclExecutorBackend::Impl { Impl(const CollectiveBoxingConf& conf, std::shared_ptr<RequestStore> request_store) : conf(conf), request_store(std::move(request_store)) { CHECK_GT(conf.nccl_num_streams(), 0); CHECK_GE(conf.nccl_fusion_threshold_mb(), 0); fusion_threshold = conf.nccl_fusion_threshold_mb() * 1024 * 1024; num_streams = conf.nccl_num_streams(); current_stream_id = 0; enable_mixed_fusion = (!conf.nccl_fusion_all_reduce_use_buffer()) && conf.nccl_enable_mixed_fusion(); int nccl_version; OF_NCCL_CHECK(ncclGetVersion(&nccl_version)); if (nccl_version == 21003) { LOG(WARNING) << "Current nccl version is 2.10.3, in this version, ncclGroup() with mixed " "datatype/element/collective could induce crash or corruption, so we will not " "fuse any request."; } InitStreamCtx(); InitIsOpTypeFusionEnabled(); } ~Impl() { stream_id2device_id2stream_ctx.clear(); device_set2stream_id2comm_group.clear(); } void InitCommGroup(int64_t job_id) { std::set<int64_t> local_device_ids; request_store->ForEachMutRequestEntryInJob( job_id, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { const auto& request = request_entry->desc(); if (request.op_desc().backend() != Backend::kBackendNCCL) { return; } if (!request_entry->HasRankOnThisNode()) { return; } const DeviceSet& device_set = request.device_set(); if (device_set2stream_id2comm_group.count(device_set) > 0) { return; } auto& stream_id2comm_group = device_set2stream_id2comm_group[device_set]; stream_id2comm_group.resize(num_streams); for (int32_t stream_id = 0; stream_id < num_streams; ++stream_id) { stream_id2comm_group.at(stream_id).InitGroup( device_set, GetNcclUniqueIdRpcKey(request.op_desc().name(), stream_id)); } for (int32_t j = 0; j < stream_id2comm_group.at(0).local_rank_count(); ++j) { local_device_ids.emplace(stream_id2comm_group.at(0).GetCommRank(j).device_id()); } }); for (int32_t stream_id = 0; stream_id < num_streams; ++stream_id) { for (const int64_t device_id : local_device_ids) { if (stream_id2device_id2stream_ctx.at(stream_id).at(device_id) == nullptr) { stream_id2device_id2stream_ctx.at(stream_id).at(device_id) = std::make_unique<StreamCtx>(device_id, fusion_threshold); } } } } void InitStreamCtx() { int32_t num_devices; OF_CUDA_CHECK(cudaGetDeviceCount(&num_devices)); stream_id2device_id2stream_ctx.resize(num_streams); for (int64_t stream_id = 0; stream_id < num_streams; ++stream_id) { stream_id2device_id2stream_ctx.at(stream_id).resize(num_devices); } } void InitIsOpTypeFusionEnabled() { op_type2fusion_enabled.resize(OpType_ARRAYSIZE, false); op_type2fusion_enabled.at(OpType::kOpTypeAllReduce) = conf.nccl_fusion_all_reduce(); op_type2fusion_enabled.at(OpType::kOpTypeAllGather) = conf.nccl_fusion_all_gather(); op_type2fusion_enabled.at(OpType::kOpTypeReduceScatter) = conf.nccl_fusion_reduce_scatter(); op_type2fusion_enabled.at(OpType::kOpTypeReduce) = conf.nccl_fusion_reduce(); op_type2fusion_enabled.at(OpType::kOpTypeBroadcast) = conf.nccl_fusion_broadcast(); op_type2fusion_enabled.at(OpType::kOpTypeAll2All) = false; } int32_t NextStreamId() { const int32_t stream_id = current_stream_id; current_stream_id = (current_stream_id + 1) % num_streams; return stream_id; } bool IsOpTypeFusionEnabled(OpType op_type) const { return op_type2fusion_enabled.at(op_type); } bool IsRequestEntryFusionEnabled(const RequestEntry* entry) const { return IsOpTypeFusionEnabled(entry->desc().op_desc().op_type()); } bool CanRequestEntryFuse(const RequestEntry* lhs, const RequestEntry* rhs) const { { int nccl_version; OF_NCCL_CHECK(ncclGetVersion(&nccl_version)); // Workaround for https://github.com/NVIDIA/nccl/issues/560 if (nccl_version == 21003) { return false; } } if (lhs->device_set_symbol() != rhs->device_set_symbol()) { return false; } if ((!IsRequestEntryFusionEnabled(lhs)) || (!IsRequestEntryFusionEnabled(rhs))) { return false; } if ((!enable_mixed_fusion) && lhs->desc().op_desc().op_type() != rhs->desc().op_desc().op_type()) { return false; } if (conf.nccl_fusion_all_reduce_use_buffer()) { if (lhs->desc().op_desc().op_type() == OpType::kOpTypeAllReduce && rhs->desc().op_desc().op_type() == OpType::kOpTypeAllReduce) { CHECK(lhs->desc().op_desc().has_reduce_method()); CHECK(rhs->desc().op_desc().has_reduce_method()); return lhs->desc().op_desc().reduce_method() == rhs->desc().op_desc().reduce_method() && lhs->desc().op_desc().data_type() == rhs->desc().op_desc().data_type(); } else if (lhs->desc().op_desc().op_type() == OpType::kOpTypeAllReduce || rhs->desc().op_desc().op_type() == OpType::kOpTypeAllReduce) { return false; } else { return true; } } else { return true; } } void GroupRequests(const std::vector<RequestId>& request_ids, const std::function<void(std::vector<RequestId>&&, void*)>& Handler) { std::vector<RequestId> group; int64_t group_size = 0; const int64_t fusion_max_ops = std::min(conf.nccl_fusion_max_ops(), kMultiCopyParamsMaxSize); request_store->ForEachMutRequestEntryForIdsInJob( request_ids, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { const auto& request = request_entry->desc(); const int64_t size = GetMultiCopyAlignedSize(request_entry->size_in_bytes()); if (group.empty() || !CanRequestEntryFuse(request_store->MutRequestEntry(group.back()), request_entry) || group_size + size > fusion_threshold || group.size() >= fusion_max_ops) { if (!group.empty()) { void* token = CreateGroupToken(group); Handler(std::move(group), token); group.clear(); group_size = 0; } } group.push_back(request_id); group_size += size; }); if (!group.empty()) { void* token = CreateGroupToken(group); Handler(std::move(group), token); } } struct GroupToken { GroupToken(const std::vector<RequestId>& group, std::vector<CommGroup>* stream_id2comm_group) : request_ids(group), stream_id2comm_group(stream_id2comm_group) {} std::vector<RequestId> request_ids; std::vector<CommGroup>* stream_id2comm_group; }; void* CreateGroupToken(const std::vector<RequestId>& group) { CHECK_GT(group.size(), 0); void* group_token; const DeviceSet& first_device_set = request_store->MutRequestEntry(group.front())->desc().device_set(); auto it = device_set2stream_id2comm_group.find(first_device_set); CHECK(it != device_set2stream_id2comm_group.end()); group_token = new GroupToken(group, &it->second); request_store->ForEachMutRequestEntryForIdsInJob( group, [&](RequestEntry* request_entry, int32_t i, const RequestId& request_id) { const DeviceSet& device_set = request_entry->desc().device_set(); CHECK(first_device_set == device_set); }); return group_token; } void DestroyGroupToken(void* group_token) { GroupToken* token = static_cast<GroupToken*>(group_token); delete token; } void ExecuteGroup(void* group_token) { GroupToken* token = static_cast<GroupToken*>(group_token); const std::vector<RequestId>& request_ids = token->request_ids; if (request_ids.empty()) { return; } const int32_t stream_id = NextStreamId(); CudaCurrentDeviceGuard device_guard; const auto& comm_group = token->stream_id2comm_group->at(stream_id); auto& device_id2stream_ctx = stream_id2device_id2stream_ctx.at(stream_id); if (request_store->MutRequestEntry(request_ids.front())->desc().op_desc().op_type() == OpType::kOpTypeAllReduce && conf.nccl_fusion_all_reduce_use_buffer() && request_ids.size() > 1) { LaunchFusedAllReduce(comm_group, device_id2stream_ctx, request_store, request_ids); } else { LaunchAggregatedOps(comm_group, device_id2stream_ctx, request_store, request_ids); } AddCallbackAndResetRuntimeRequest(comm_group, device_id2stream_ctx, request_store, request_ids); } CollectiveBoxingConf conf; int64_t fusion_threshold; int32_t num_streams; int32_t current_stream_id; bool enable_mixed_fusion; std::vector<bool> op_type2fusion_enabled; std::shared_ptr<RequestStore> request_store; HashMap<DeviceSet, std::vector<CommGroup>> device_set2stream_id2comm_group; std::vector<std::vector<std::unique_ptr<StreamCtx>>> stream_id2device_id2stream_ctx; }; NcclExecutorBackend::NcclExecutorBackend() = default; NcclExecutorBackend::~NcclExecutorBackend() = default; void NcclExecutorBackend::Init(std::shared_ptr<RequestStore> request_store) { impl_ = std::make_unique<Impl>(Global<ResourceDesc, ForSession>::Get()->collective_boxing_conf(), request_store); } void NcclExecutorBackend::InitJob(int64_t job_id) { CudaCurrentDeviceGuard guard; impl_->InitCommGroup(job_id); } void NcclExecutorBackend::DeinitJob(int64_t job_id) {} void NcclExecutorBackend::GroupRequests( const std::vector<RequestId>& request_ids, const std::function<void(std::vector<RequestId>&&, void*)>& Handler) { impl_->GroupRequests(request_ids, Handler); } void* NcclExecutorBackend::CreateGroupToken(const std::vector<RequestId>& group) { return impl_->CreateGroupToken(group); } void NcclExecutorBackend::DestroyGroupToken(void* group_token) { return impl_->DestroyGroupToken(group_token); } void NcclExecutorBackend::ExecuteGroup(void* group_token) { impl_->ExecuteGroup(group_token); } } // namespace collective } // namespace boxing } // namespace oneflow
the_stack
#include <assert.h> #include <stdio.h> #include <stdint.h> #include <sys/socket.h> // AF_LOCAL #include <poll.h> // POLL #include <sys/param.h> #include <rdma_dgram/rsocket.h> #define O_NONBLOCK 00004 #define O_GPUNET_BOUNCE 04000 #ifndef EWOULDBLOCK #define EWOULDBLOCK 11 #endif #ifndef UINT32_MAX #define UINT32_MAX 0xffffffffU #endif // this is dictated by rsocket.c #define RS_SNDLOWAT UNIT_MSGBUF_SIZE #define SOC_NONBLOCK(soc_e) ((soc_e)->e_flags & O_NONBLOCK) __device__ int gsocket( int domain, int type, int protocol){ int entry=g_soctable->findNewEntry(); return entry; } __device__ inline void free_socket(int soc){ GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); g_soctable->free_socket(soc); } __device__ void sync_ipc_bufs(gpu_socket *sock, cpu_ipc_entry *e) { sock->sbuf = (uint8_t*)readNoCache(&e->sbuf_addr); sock->sbuf_size = readNoCache(&e->sbuf_size); sock->sq_size = sock->sbuf_size / RS_SNDLOWAT; #define get_value_from_ipc_entry(prefix, name, type) sock-> prefix ## _ ## name = (type)readNoCache(&e-> prefix ## _ ## name); get_value_from_ipc_entry(dev, sbuf_free_queue_back, int*); get_value_from_ipc_entry(dev, sbuf_free_queue, int*); get_value_from_ipc_entry(host, sbuf_req_queue, ds_smsg_info*); get_value_from_ipc_entry(dev, sbuf_req_back, int*); get_value_from_ipc_entry(host, sbuf_req_back, int*); sock->rbuf_size = readNoCache(&e->rbuf_size); sock->rq_size = sock->rbuf_size / RS_SNDLOWAT; get_value_from_ipc_entry(dev, rbuf_recved_queue, ds_rmsg_info*); get_value_from_ipc_entry(dev, rbuf_recved_back, int*); get_value_from_ipc_entry(host, rbuf_ack_queue_back, int*); } __device__ int ll_connect_in(int soc, const struct sockaddr_in *addr, socklen_t addrlen, req_type_t req_type) { __shared__ int ret_val; __shared__ gpu_socket *sock; int entry; cpu_ipc_entry* e; GET_QUEUE_SLOT(entry,e); sock = &g_soctable->_sockets[soc]; e->cpu_sock = sock->cpu_sock; e->req_type = CONNECT_IPC_REQ; e->data_size = addrlen; memcpy_thread((char*)&e->addr.in, (char*)addr, (int)addrlen); ret_val = fire_and_wait(e); g_ipc_manager->freeEntry(entry); return ret_val; } __device__ int ll_bind_in(const struct sockaddr_in *addr, socklen_t addrlen, int sock_domain, int sock_proto, req_type_t req_type) { // all GPU failures should happen before getting to the CPU GPU_ASSERT(addr); __shared__ int ret_val; __shared__ int newsocket; __shared__ gpu_socket *sock; newsocket = gsocket(0,0,0); assert(newsocket != E_SOCTABLE_FULL); int entry; cpu_ipc_entry* e; GET_QUEUE_SLOT(entry,e); e->sock_domain = sock_domain; e->sock_proto = sock_proto; e->req_type = req_type; // BIND_IPC_REQ e->cpu_sock = -1; memcpy_thread((char*)&e->addr.in, (char*)addr, (int)addrlen); ret_val = fire_and_wait(e); g_ipc_manager->freeEntry(entry); if (ret_val >= 0) { sock = &g_soctable->_sockets[newsocket]; sock->cpu_sock = ret_val; // binding with the addr memcpy_thread((char*)sock->addr, (char*)addr, sizeof(*addr)); // get the buffers at bind/connect sync_ipc_bufs(sock, e); // sbuf_free_list_init(sock); // initialized on CPU. let's uncomment when CPU is too slow. sock->sbuf_free_queue_front = -1; sock->rbuf_recved_front = -1; sock->rbuf_ack_queue_back = 0; __threadfence(); // propagate to everybody } else { g_soctable->free_socket(newsocket); newsocket = ret_val; } return newsocket; } __device__ int single_thread_gbind_in(const sockaddr_in* addr, const socklen_t addrlen) { return ll_bind_in(addr, addrlen, AF_INET, IPPROTO_IBP, BIND_IPC_REQ); } __device__ int single_thread_gbind_bounce_in(const sockaddr_in* addr, const socklen_t addrlen) { return ll_bind_in(addr, addrlen, AF_INET, IPPROTO_IBP, BIND_IPC_REQ_BOUNCE); } __device__ int gconnect_in(int socket, const struct sockaddr_in* addr, const socklen_t addrlen) { __shared__ int retval; BEGIN_SINGLE_THREAD_PART { retval = ll_connect_in(socket, addr, addrlen, CONNECT_IPC_REQ); } END_SINGLE_THREAD_PART; return retval; } __device__ int gconnect_bounce_in(int socket, const struct sockaddr_in* addr, const socklen_t addrlen) { __shared__ int retval; BEGIN_SINGLE_THREAD_PART { retval = ll_connect_in(socket, addr, addrlen, CONNECT_IPC_REQ_BOUNCE); } END_SINGLE_THREAD_PART; return retval; } __device__ int ll_shutdown_close(int soc, req_type_t req, int how) { GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); __shared__ gpu_socket* soc_e; __shared__ int ret_val; int entry; cpu_ipc_entry* e; soc_e = &g_soctable->_sockets[soc]; GET_QUEUE_SLOT(entry, e); assert(e->status!=CPU_IPC_PENDING); e->cpu_sock = soc_e->cpu_sock; e->shutdown_flags = how; e->req_type = req; ret_val=fire_and_wait(e); g_ipc_manager->freeEntry(entry); if (req == CLOSE_IPC_REQ) g_soctable->free_socket(soc); return ret_val; } __device__ int single_thread_gshutdown(int soc, int how) { return ll_shutdown_close(soc,SHUTDOWN_IPC_REQ,how); } __device__ int single_thread_gclose(int soc) { return ll_shutdown_close(soc,CLOSE_IPC_REQ,SHUT_RDWR); } #ifdef GPUNET_PROFILE #define def_timer(n) __shared__ long long int _t[n]; #define set_timer(n) _t[(n)] = clock64(); #else #define def_timer(n) #define set_timer(n) #endif // single threaded. currently not used, but may be used in the future for optimization /* static void sbuf_free_list_init(gpu_socket *soc_e) { int count_msgs = soc_e->sbuf_size / RS_SNDLOWAT; for (int i = 0; i < count_msgs; i++) { soc_e->dev_sbuf_free_queue[i] = i; } soc_e->sbuf_free_queue_front = -1; } */ // single threaded __device__ static volatile uchar* sbuf_free_list_pop(gpu_socket *soc_e) { int index; // free_queue has size of sq_size + 1 if (++soc_e->sbuf_free_queue_front == soc_e->sq_size + 1) { soc_e->sbuf_free_queue_front = 0; } index = soc_e->dev_sbuf_free_queue[soc_e->sbuf_free_queue_front]; return soc_e->sbuf + RS_SNDLOWAT * index; } // single threaded // front: the item lastly consumed (initially -1) // back : the item to be produced (initially 0) // empty : (front + 1 == back mod sqsize+1) // filled: (front + rq_size == back or front == back) __device__ static bool sbuf_free_list_empty(gpu_socket *soc_e) { int sbuf_free_queue_front = soc_e->sbuf_free_queue_front; int sbuf_free_queue_back = *soc_e->dev_sbuf_free_queue_back; return (sbuf_free_queue_front + 1 == sbuf_free_queue_back) || ((sbuf_free_queue_back == 0) && (sbuf_free_queue_front == soc_e->sq_size)); } // get free buffer and mark it as used. this does not have to reach out the CPU, // but the CPU updates the metadata used by this function. // this function is the consumer of the free-list, and the producer is at the CPU. // however, this function should not explictly communicate with the CPU. The CPU would update the list // called from single thread __device__ static volatile uchar* gsend_get_free_buffer(gpu_socket *soc_e) { bool is_empty; while ((is_empty = sbuf_free_list_empty(soc_e)) && !SOC_NONBLOCK(soc_e)); if (is_empty) return NULL; return sbuf_free_list_pop(soc_e); } // this is the producer of the buffer to be transferred. the consumer is at the CPU, // not single threaded, but internally single-threaded __device__ void gsend_notify_cpu(gpu_socket *soc_e, volatile uint8_t *buf, int size, sockaddr *addr, socklen_t addrlen) { __shared__ volatile ds_smsg_info *msg_info; BEGIN_SINGLE_THREAD_PART { msg_info = &soc_e->host_sbuf_req_queue[*soc_e->dev_sbuf_req_back]; msg_info->msg_index = (buf - soc_e->sbuf) / RS_SNDLOWAT; msg_info->length = (uint16_t)size; msg_info->addrlen = (uint16_t)addrlen; } END_SINGLE_THREAD_PART; copy_block_dst_volatile(msg_info->addr, (uchar*)addr, (int)addrlen); __threadfence_system(); BEGIN_SINGLE_THREAD_PART { // the queue should be never filled because gsend_get_free_buffer() limits the max inflight messages to be less than or equal to sq_size // this condition is the reason why sbuf_req_queue is of size sq_size + 1 (capacity = sq_size) if (++(*soc_e->dev_sbuf_req_back) == soc_e->sq_size + 1) { *soc_e->dev_sbuf_req_back = 0; } *soc_e->host_sbuf_req_back = *soc_e->dev_sbuf_req_back; } END_SINGLE_THREAD_PART; } __device__ int gsendto(int soc, uchar* to_send, int size, struct sockaddr *destaddr, int addrlen) { GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); GPU_ASSERT(to_send && size); __shared__ gpu_socket* soc_e; __shared__ volatile uchar *buf; BEGIN_SINGLE_THREAD_PART { soc_e=&g_soctable->_sockets[soc]; // 1) get the availalble message buffer index from the free list buf = gsend_get_free_buffer(soc_e); } END_SINGLE_THREAD_PART; if (size != 0) { if (buf == NULL) { return -EWOULDBLOCK; } // 2) copy the buffer to the sbuf copy_block_dst_volatile(buf, to_send, size); // 3) then notify the buffer use to the CPU proxy gsend_notify_cpu(soc_e, buf, size, destaddr, addrlen); } return size; } __device__ static bool __grecv_is_empty(gpu_socket *soc_e, int rbuf_recved_front) { int rbuf_recved_back = *soc_e->dev_rbuf_recved_back; int rq_size = soc_e->rq_size; return (rbuf_recved_front + 1) == rbuf_recved_back || (rbuf_recved_front == rbuf_recved_back + rq_size); } __device__ static bool grecv_is_empty(gpu_socket *soc_e) { return __grecv_is_empty(soc_e, soc_e->rbuf_recved_front); } // called from single_thread. returns 0 when nothing is available // this is the consumer for rbuf buffer. the producer is in the rsocket_dgram. __device__ static int grecv_check(gpu_socket *soc_e, struct sockaddr *addr, socklen_t *addrlen, uint8_t** msgbuf) { // front: the item lastly consumed (initially -1) // back : the item to be produced (initially 0) // empty : (front + 1 == back mod rqsize) // filled: (front + rq_size == back or front == back) __shared__ int rbuf_recved_front; __shared__ volatile ds_rmsg_info *rmsg; __shared__ int msglen; BEGIN_SINGLE_THREAD_PART { rbuf_recved_front = soc_e->rbuf_recved_front; if (__grecv_is_empty(soc_e, rbuf_recved_front)) { rmsg = NULL; } else { // progress the front pointer before accessing. if (++rbuf_recved_front == soc_e->rq_size + 1) { rbuf_recved_front = 0; } rmsg = &soc_e->dev_rbuf_recved_queue[rbuf_recved_front]; msglen = rmsg->length; *addrlen = (socklen_t)rmsg->addrlen; *msgbuf = (uint8_t*)rmsg->ptr; } } END_SINGLE_THREAD_PART; if (rmsg == NULL) return 0; copy_block_dst_volatile((uchar*)addr, (uchar*)rmsg->addr, (size_t)*addrlen); BEGIN_SINGLE_THREAD_PART { soc_e->rbuf_recved_front = rbuf_recved_front; } END_SINGLE_THREAD_PART; return msglen; } // single threaded __device__ static void grecv_ack(gpu_socket *soc_e) { __threadfence_system(); if (++soc_e->rbuf_ack_queue_back == soc_e->rq_size + 1) { soc_e->rbuf_ack_queue_back = 0; } *soc_e->host_rbuf_ack_queue_back = soc_e->rbuf_ack_queue_back; } __device__ int grecvfrom(int soc, void* buf, int size, struct sockaddr *addr, socklen_t *addrlen) { GPU_ASSERT(soc>=0 && soc< SOC_TABLE_SIZE); GPU_ASSERT(buf&&size); __shared__ gpu_socket* soc_e; __shared__ int ret_val; __shared__ uint8_t *msgbuf; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[soc]; } END_SINGLE_THREAD_PART; // 1) see if there is any new data by accessing the CPU updated buffer (get the size and addr!) do { ret_val = grecv_check(soc_e, addr, addrlen, &msgbuf); } while (ret_val == 0 && !SOC_NONBLOCK(soc_e)); if (ret_val == 0) { return -EWOULDBLOCK; } assert(ret_val > 0); // 3) copying the data to the buffer copy_block_src_volatile((uchar*)buf, msgbuf, ret_val); BEGIN_SINGLE_THREAD_PART { // acking the cpu is necessary to free the buffer grecv_ack(soc_e); } END_SINGLE_THREAD_PART; return ret_val; } __device__ int gpoll(struct gpollfd* fds, size_t nfds, int nclock_timeout) { __shared__ long long clock_start; __shared__ size_t nth; __shared__ bool is_timeout; __shared__ int may_return; BEGIN_SINGLE_THREAD_PART { clock_start = clock64(); is_timeout = false; nth = blockDim.x * blockDim.y * blockDim.z; may_return = 0; } END_SINGLE_THREAD_PART; do { for(int i = TID; i < nfds; i+= nth) { struct gpollfd pfd = fds[i]; pfd.revents = 0; pfd.rbytes = 0; pfd.wbytes = 0; int soc = pfd.fd; if (soc < 0 || soc >= SOC_TABLE_SIZE) { // invalid socket. pfd.revents |= POLLNVAL; } else { // common case: the valid socket. gpu_socket *soc_e = &g_soctable->_sockets[soc]; if (!grecv_is_empty(soc_e)) { pfd.revents |= POLLIN; pfd.rbytes = RS_SNDLOWAT; may_return = 1; } if (!sbuf_free_list_empty(soc_e)) { pfd.revents |= POLLOUT; pfd.wbytes = RS_SNDLOWAT; may_return = 1; } } pfd.revents &= pfd.events | POLLERR | POLLNVAL | POLLHUP; fds[i] = pfd; } BEGIN_SINGLE_THREAD_PART { if (nclock_timeout > 0 && clock64() - clock_start > nclock_timeout) is_timeout = true; } END_SINGLE_THREAD_PART; } while (may_return == 0 && !is_timeout); return may_return; } __device__ void gsetsock_block(int socket, int blocking) { __shared__ gpu_socket* soc_e; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[socket]; if (!blocking) { soc_e->e_flags |= O_NONBLOCK; } else { soc_e->e_flags &= (~O_NONBLOCK); } } END_SINGLE_THREAD_PART; } __device__ void gsetsock_bounce(int socket, int bounce) { __shared__ gpu_socket* soc_e; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[socket]; if (bounce) soc_e->e_flags |= O_GPUNET_BOUNCE; } END_SINGLE_THREAD_PART; } __device__ int ggetsock_block(int socket) { __shared__ gpu_socket* soc_e; __shared__ int ret; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[socket]; ret = (soc_e->e_flags & O_NONBLOCK); } END_SINGLE_THREAD_PART; return (!ret); } __device__ void ggettimeofday(struct gtimeval *tv) { __shared__ int ipc_slot; __shared__ cpu_ipc_entry* e; BEGIN_SINGLE_THREAD_PART { GET_QUEUE_SLOT(ipc_slot,e); e->req_type = LOG_TIMESTAMP_REQ; fire_and_wait(e); tv->tv_sec = e->tv_sec; tv->tv_usec = e->tv_usec; g_ipc_manager->freeEntry(ipc_slot); } END_SINGLE_THREAD_PART; } __device__ void gtimersub(struct gtimeval *a, struct gtimeval *b, struct gtimeval *c) { c->tv_sec = 0; c->tv_usec = a->tv_usec - b->tv_usec; if (c->tv_usec < 0) { c->tv_usec += 1000000; c->tv_sec = -1; } c->tv_sec += (a->tv_sec - b->tv_sec); } __device__ void gputs_single(const char* str, int len) { __shared__ int ipc_slot; __shared__ cpu_ipc_entry* e; GET_QUEUE_SLOT(ipc_slot,e); // use local for copying string e->sock_domain = blockIdx.z * (blockDim.x * blockDim.y) + blockIdx.y * blockDim.x + blockIdx.x; e->req_type = PUTS_REQ; int len_copy = (len < GPU_LOCAL_SOC_MAX_PATH) ? len : GPU_LOCAL_SOC_MAX_PATH; strncpy_thread(e->addr.local, str, len_copy); e->addr.local[len_copy-1] = '\0'; fire_and_wait(e); g_ipc_manager->freeEntry(ipc_slot); } __device__ void gputs_single(const char* str, int len, unsigned int* threads) { gputs_single(str, len); for (int i = 0; i < blockDim.x; i++) { if (!(threads[i >> 5] & (1 << (i & 31)))) { gprintf4_single("thread %d missing\n", i, 0, 0, 0); } } } __device__ int ui2a(unsigned int num, unsigned int base,char * bf) { int n=0; unsigned int d=1; while (num/d >= base) d*=base; while (d!=0) { int dgt = num / d; num%= d; d/=base; if (n || dgt>0 || d==0) { *bf++ = dgt+(dgt<10 ? '0' : 'a'-10); ++n; } } *bf=0; return n; } __device__ int i2a (int num, char * bf) { if (num<0) { num=-num; *bf++ = '-'; } return ui2a(num,10,bf); } // printf for one integer __device__ void gprintf4_single(const char* str, int arg1, int arg2, int arg3, int arg4) { __shared__ int ipc_slot; __shared__ cpu_ipc_entry* e; char* buf, ch; __shared__ char bf[12]; assert(threadIdx.x == 0); GET_QUEUE_SLOT(ipc_slot,e); // use local for copying string e->sock_domain = blockIdx.z * (blockDim.x * blockDim.y) + blockIdx.y * blockDim.x + blockIdx.x; e->req_type = PUTS_REQ; buf = (char*)e->addr.local; int cnt = 0, len_copy; while ((ch=*(str++)) && (buf < (e->addr.local + GPU_LOCAL_SOC_MAX_PATH - 1))) { if (ch!='%') { *buf = ch; buf++; } else { ch=*(str++); if (ch == 'd'){ switch(cnt) { case 0: len_copy = i2a(arg1, bf); break; case 1: len_copy = i2a(arg2, bf); break; case 2: len_copy = i2a(arg3, bf); break; case 3: len_copy = i2a(arg4, bf); break; default: len_copy = i2a(arg4, bf); break; } strncpy_thread(buf, bf, len_copy); buf += len_copy; } else if (ch == 'x') { switch(cnt) { case 0: len_copy = ui2a(arg1, 16, bf); break; case 1: len_copy = ui2a(arg2, 16, bf); break; case 2: len_copy = ui2a(arg3, 16, bf); break; case 3: len_copy = ui2a(arg4, 16, bf); break; default: len_copy = ui2a(arg4, 16, bf); break; } strncpy_thread(buf, bf, len_copy); buf += len_copy; } if (ch != '%') cnt++; } } *buf = '\0'; fire_and_wait(e); g_ipc_manager->freeEntry(ipc_slot); } __device__ void gputs(const char* str, int len) { __shared__ unsigned int threads[32]; atomicOr(&threads[threadIdx.x >> 5], (1 << (threadIdx.x & 31))); BEGIN_SINGLE_THREAD_PART { gprintf4_single(str, 0, 0, 0, 0); } END_SINGLE_THREAD_PART; } __device__ int ggetcpusock(int sock) { gpu_socket *soc_e =&g_soctable->_sockets[sock]; return soc_e->cpu_sock; }
the_stack
#include "cupoch/geometry/image.h" #include "cupoch/geometry/rgbdimage.h" #include "cupoch/odometry/odometry.h" #include "cupoch/odometry/rgbdodometry_jacobian.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace odometry { namespace { struct initialize_correspondence_map_functor { initialize_correspondence_map_functor(uint8_t *correspondence_map, uint8_t *depth_buffer, int width) : correspondence_map_(correspondence_map), depth_buffer_(depth_buffer), width_(width){}; uint8_t *correspondence_map_; uint8_t *depth_buffer_; int width_; __device__ void operator()(size_t idx) { *(int *)(correspondence_map_ + idx * 2 * sizeof(int)) = -1; *(int *)(correspondence_map_ + (idx * 2 + 1) * sizeof(int)) = -1; *(float *)(depth_buffer_ + idx * sizeof(float)) = -1.0f; } }; std::tuple<std::shared_ptr<geometry::Image>, std::shared_ptr<geometry::Image>> InitializeCorrespondenceMap(int width, int height) { // initialization: filling with any (u,v) to (-1,-1) auto correspondence_map = std::make_shared<geometry::Image>(); auto depth_buffer = std::make_shared<geometry::Image>(); correspondence_map->Prepare(width, height, 2, 4); depth_buffer->Prepare(width, height, 1, 4); initialize_correspondence_map_functor func( thrust::raw_pointer_cast(correspondence_map->data_.data()), thrust::raw_pointer_cast(depth_buffer->data_.data()), width); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(width * height), func); return std::make_tuple(correspondence_map, depth_buffer); } __device__ inline void AddElementToCorrespondenceMap( uint8_t *correspondence_map, uint8_t *depth_buffer, int width, int u_s, int v_s, int u_t, int v_t, float transformed_d_t) { int exist_u_t, exist_v_t; float exist_d_t; exist_u_t = *geometry::PointerAt<int>(correspondence_map, width, 2, u_s, v_s, 0); exist_v_t = *geometry::PointerAt<int>(correspondence_map, width, 2, u_s, v_s, 1); if (exist_u_t != -1 && exist_v_t != -1) { exist_d_t = *geometry::PointerAt<float>(depth_buffer, width, u_s, v_s); if (transformed_d_t < exist_d_t) { // update nearer point as correspondence *geometry::PointerAt<int>(correspondence_map, width, 2, u_s, v_s, 0) = u_t; *geometry::PointerAt<int>(correspondence_map, width, 2, u_s, v_s, 1) = v_t; *geometry::PointerAt<float>(depth_buffer, width, u_s, v_s) = transformed_d_t; } } else { // register correspondence *geometry::PointerAt<int>(correspondence_map, width, 2, u_s, v_s, 0) = u_t; *geometry::PointerAt<int>(correspondence_map, width, 2, u_s, v_s, 1) = v_t; *geometry::PointerAt<float>(depth_buffer, width, u_s, v_s) = transformed_d_t; } } struct merge_correspondence_maps_functor { merge_correspondence_maps_functor(uint8_t *correspondence_map, uint8_t *depth_buffer, uint8_t *correspondence_map_part, uint8_t *depth_buffer_part, int width) : correspondence_map_(correspondence_map), depth_buffer_(depth_buffer), correspondence_map_part_(correspondence_map_part), depth_buffer_part_(depth_buffer_part), width_(width){}; uint8_t *correspondence_map_; uint8_t *depth_buffer_; uint8_t *correspondence_map_part_; uint8_t *depth_buffer_part_; int width_; __device__ void operator()(size_t idx) { int v_s = idx / width_; int u_s = idx % width_; int u_t = *geometry::PointerAt<int>(correspondence_map_part_, width_, 2, u_s, v_s, 0); int v_t = *geometry::PointerAt<int>(correspondence_map_part_, width_, 2, u_s, v_s, 1); if (u_t != -1 && v_t != -1) { float transformed_d_t = *geometry::PointerAt<float>( depth_buffer_part_, width_, u_s, v_s); AddElementToCorrespondenceMap(correspondence_map_, depth_buffer_, width_, u_s, v_s, u_t, v_t, transformed_d_t); } } }; void MergeCorrespondenceMaps(geometry::Image &correspondence_map, geometry::Image &depth_buffer, geometry::Image &correspondence_map_part, geometry::Image &depth_buffer_part) { merge_correspondence_maps_functor func( thrust::raw_pointer_cast(correspondence_map.data_.data()), thrust::raw_pointer_cast(depth_buffer.data_.data()), thrust::raw_pointer_cast(correspondence_map_part.data_.data()), thrust::raw_pointer_cast(depth_buffer_part.data_.data()), correspondence_map.width_); thrust::for_each( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(correspondence_map.width_ * correspondence_map.height_), func); } struct compute_correspondence_map { compute_correspondence_map(const uint8_t *depth_s, const uint8_t *depth_t, int width, int height, uint8_t *correspondence_map, uint8_t *depth_buffer, const Eigen::Vector3f &Kt, const Eigen::Matrix3f &KRK_inv, float max_depth_diff) : depth_s_(depth_s), depth_t_(depth_t), width_(width), height_(height), correspondence_map_(correspondence_map), depth_buffer_(depth_buffer), Kt_(Kt), KRK_inv_(KRK_inv), max_depth_diff_(max_depth_diff){}; const uint8_t *depth_s_; const uint8_t *depth_t_; int width_; int height_; uint8_t *correspondence_map_; uint8_t *depth_buffer_; const Eigen::Vector3f Kt_; const Eigen::Matrix3f KRK_inv_; const float max_depth_diff_; __device__ void operator()(size_t idx) { int v_s = idx / width_; int u_s = idx % width_; float d_s = *geometry::PointerAt<float>(depth_s_, width_, u_s, v_s); if (!isnan(d_s)) { Eigen::Vector3f uv_in_s = d_s * KRK_inv_ * Eigen::Vector3f(u_s, v_s, 1.0) + Kt_; float transformed_d_s = uv_in_s(2); int u_t = (int)(uv_in_s(0) / transformed_d_s + 0.5); int v_t = (int)(uv_in_s(1) / transformed_d_s + 0.5); if (u_t >= 0 && u_t < width_ && v_t >= 0 && v_t < height_) { float d_t = *geometry::PointerAt<float>(depth_t_, width_, u_t, v_t); if (!isnan(d_t) && std::abs(transformed_d_s - d_t) <= max_depth_diff_) { AddElementToCorrespondenceMap(correspondence_map_, depth_buffer_, width_, u_s, v_s, u_t, v_t, (float)d_s); } } } } }; struct compute_correspondence_functor { compute_correspondence_functor(const uint8_t *correspondence_map, int width) : correspondence_map_(correspondence_map), width_(width){}; const uint8_t *correspondence_map_; const int width_; __device__ Eigen::Vector4i operator()(size_t idx) const { int v_s = idx / width_; int u_s = idx % width_; int u_t = *(int *)(correspondence_map_ + idx * 2 * sizeof(int)); int v_t = *(int *)(correspondence_map_ + (idx * 2 + 1) * sizeof(int)); return Eigen::Vector4i(u_s, v_s, u_t, v_t); } }; void ComputeCorrespondence(const Eigen::Matrix3f intrinsic_matrix, const Eigen::Matrix4f &extrinsic, const geometry::Image &depth_s, const geometry::Image &depth_t, const OdometryOption &option, CorrespondenceSetPixelWise &correspondence) { const Eigen::Matrix3f K = intrinsic_matrix; const Eigen::Matrix3f K_inv = K.inverse(); const Eigen::Matrix3f R = extrinsic.block<3, 3>(0, 0); const Eigen::Matrix3f KRK_inv = K * R * K_inv; Eigen::Vector3f Kt = K * extrinsic.block<3, 1>(0, 3); std::shared_ptr<geometry::Image> correspondence_map; std::shared_ptr<geometry::Image> depth_buffer; std::tie(correspondence_map, depth_buffer) = InitializeCorrespondenceMap(depth_t.width_, depth_t.height_); std::shared_ptr<geometry::Image> correspondence_map_private; std::shared_ptr<geometry::Image> depth_buffer_private; std::tie(correspondence_map_private, depth_buffer_private) = InitializeCorrespondenceMap(depth_t.width_, depth_t.height_); compute_correspondence_map func_cm( thrust::raw_pointer_cast(depth_s.data_.data()), thrust::raw_pointer_cast(depth_t.data_.data()), depth_s.width_, depth_s.height_, thrust::raw_pointer_cast(correspondence_map_private->data_.data()), thrust::raw_pointer_cast(depth_buffer_private->data_.data()), Kt, KRK_inv, option.max_depth_diff_); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(depth_s.width_ * depth_s.height_), func_cm); MergeCorrespondenceMaps(*correspondence_map, *depth_buffer, *correspondence_map_private, *depth_buffer_private); correspondence.resize(correspondence_map->width_ * correspondence_map->height_); compute_correspondence_functor func_cc( thrust::raw_pointer_cast(correspondence_map->data_.data()), correspondence_map->width_); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(correspondence_map->width_ * correspondence_map->height_), correspondence.begin(), func_cc); auto end = thrust::remove_if(correspondence.begin(), correspondence.end(), [] __device__(const Eigen::Vector4i &pc) { return (pc[2] == -1 || pc[3] == -1); }); correspondence.resize(thrust::distance(correspondence.begin(), end)); } struct convert_depth_to_xyz_image_functor { convert_depth_to_xyz_image_functor(const uint8_t *depth, int width, uint8_t *image_xyz, float ox, float oy, float inv_fx, float inv_fy) : depth_(depth), width_(width), image_xyz_(image_xyz), ox_(ox), oy_(oy), inv_fx_(inv_fx), inv_fy_(inv_fy){}; const uint8_t *depth_; const int width_; uint8_t *image_xyz_; const float ox_; const float oy_; const float inv_fx_; const float inv_fy_; __device__ void operator()(size_t idx) { int y = idx / width_; int x = idx % width_; float *px = geometry::PointerAt<float>(image_xyz_, width_, 3, x, y, 0); float *py = geometry::PointerAt<float>(image_xyz_, width_, 3, x, y, 1); float *pz = geometry::PointerAt<float>(image_xyz_, width_, 3, x, y, 2); float z = *geometry::PointerAt<float>(depth_, width_, x, y); *px = (x - ox_) * z * inv_fx_; *py = (y - oy_) * z * inv_fy_; *pz = z; } }; std::shared_ptr<geometry::Image> ConvertDepthImageToXYZImage( const geometry::Image &depth, const Eigen::Matrix3f &intrinsic_matrix) { auto image_xyz = std::make_shared<geometry::Image>(); if (depth.num_of_channels_ != 1 || depth.bytes_per_channel_ != 4) { utility::LogError( "[ConvertDepthImageToXYZImage] Unsupported image format."); } const float inv_fx = 1.0 / intrinsic_matrix(0, 0); const float inv_fy = 1.0 / intrinsic_matrix(1, 1); const float ox = intrinsic_matrix(0, 2); const float oy = intrinsic_matrix(1, 2); image_xyz->Prepare(depth.width_, depth.height_, 3, 4); convert_depth_to_xyz_image_functor func( thrust::raw_pointer_cast(depth.data_.data()), depth.width_, thrust::raw_pointer_cast(image_xyz->data_.data()), ox, oy, inv_fx, inv_fy); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image_xyz->width_ * image_xyz->height_), func); return image_xyz; } std::vector<Eigen::Matrix3f> CreateCameraMatrixPyramid( const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, int levels) { std::vector<Eigen::Matrix3f> pyramid_camera_matrix; pyramid_camera_matrix.reserve(levels); for (int i = 0; i < levels; i++) { Eigen::Matrix3f level_camera_matrix; if (i == 0) level_camera_matrix = pinhole_camera_intrinsic.intrinsic_matrix_; else level_camera_matrix = 0.5 * pyramid_camera_matrix[i - 1]; level_camera_matrix(2, 2) = 1.; pyramid_camera_matrix.push_back(level_camera_matrix); } return pyramid_camera_matrix; } struct compute_gtg_functor { compute_gtg_functor(const uint8_t *xyz_t, int width) : xyz_t_(xyz_t), width_(width){}; const uint8_t *xyz_t_; const int width_; __device__ Eigen::Matrix6f operator()(const Eigen::Vector4i &corres) const { int u_t = corres(2); int v_t = corres(3); float x = *geometry::PointerAt<float>(xyz_t_, width_, 3, u_t, v_t, 0); float y = *geometry::PointerAt<float>(xyz_t_, width_, 3, u_t, v_t, 1); float z = *geometry::PointerAt<float>(xyz_t_, width_, 3, u_t, v_t, 2); Eigen::Vector6f g_r_1 = (Eigen::Vector6f() << 0.0, z, -y, 1.0, 0.0, 0.0).finished(); Eigen::Vector6f g_r_2 = (Eigen::Vector6f() << -z, 0.0, x, 0.0, 1.0, 0.0).finished(); Eigen::Vector6f g_r_3 = (Eigen::Vector6f() << y, -x, 0.0, 0.0, 0.0, 1.0).finished(); return g_r_1 * g_r_1.transpose() + g_r_2 * g_r_2.transpose() + g_r_3 * g_r_3.transpose(); } }; Eigen::Matrix6f CreateInformationMatrix( const Eigen::Matrix4f &extrinsic, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const geometry::Image &depth_s, const geometry::Image &depth_t, const OdometryOption &option) { CorrespondenceSetPixelWise correspondence; ComputeCorrespondence(pinhole_camera_intrinsic.intrinsic_matrix_, extrinsic, depth_s, depth_t, option, correspondence); auto xyz_t = ConvertDepthImageToXYZImage( depth_t, pinhole_camera_intrinsic.intrinsic_matrix_); // write q^* // see http://redwood-data.org/indoor/registration.html // note: I comes first and q_skew is scaled by factor 2. compute_gtg_functor func(thrust::raw_pointer_cast(xyz_t->data_.data()), xyz_t->width_); Eigen::Matrix6f init = Eigen::Matrix6f::Identity(); Eigen::Matrix6f GTG = thrust::transform_reduce( utility::exec_policy(0)->on(0), correspondence.begin(), correspondence.end(), func, init, thrust::plus<Eigen::Matrix6f>()); return GTG; } struct make_correspondence_pixel_pair { make_correspondence_pixel_pair(const uint8_t *image_s, const uint8_t *image_t, int width) : image_s_(image_s), image_t_(image_t), width_(width){}; const uint8_t *image_s_; const uint8_t *image_t_; int width_; __device__ thrust::tuple<float, float> operator()( const Eigen::Vector4i &corres) const { int u_s = corres(0); int v_s = corres(1); int u_t = corres(2); int v_t = corres(3); return thrust::make_tuple( *geometry::PointerAt<float>(image_s_, width_, u_s, v_s), *geometry::PointerAt<float>(image_t_, width_, u_t, v_t)); } }; void NormalizeIntensity(geometry::Image &image_s, geometry::Image &image_t, CorrespondenceSetPixelWise &correspondence) { if (image_s.width_ != image_t.width_ || image_s.height_ != image_t.height_) { utility::LogError( "[NormalizeIntensity] Size of two input images should be " "same"); } make_correspondence_pixel_pair func_tf( thrust::raw_pointer_cast(image_s.data_.data()), thrust::raw_pointer_cast(image_t.data_.data()), image_s.width_); auto means = thrust::transform_reduce( utility::exec_policy(0)->on(0), correspondence.begin(), correspondence.end(), func_tf, thrust::make_tuple(0.0f, 0.0f), add_tuple_functor<float, float>()); float mean_s = thrust::get<0>(means) / (float)correspondence.size(); float mean_t = thrust::get<1>(means) / (float)correspondence.size(); image_s.LinearTransform(0.5 / mean_s, 0.0); image_t.LinearTransform(0.5 / mean_t, 0.0); } inline std::shared_ptr<geometry::RGBDImage> PackRGBDImage( const geometry::Image &color, const geometry::Image &depth) { return std::make_shared<geometry::RGBDImage>( geometry::RGBDImage(color, depth)); } struct preprocess_depth_functor { preprocess_depth_functor(uint8_t *depth, float min_depth, float max_depth) : depth_(depth), min_depth_(min_depth), max_depth_(max_depth){}; uint8_t *depth_; const float min_depth_; const float max_depth_; __device__ void operator()(size_t idx) { float *p = (float *)(depth_ + idx * sizeof(float)); if ((*p < min_depth_ || *p > max_depth_ || *p <= 0)) *p = std::numeric_limits<float>::quiet_NaN(); } }; std::shared_ptr<geometry::Image> PreprocessDepth( cudaStream_t stream, const geometry::Image &depth_orig, const OdometryOption &option) { std::shared_ptr<geometry::Image> depth_processed = std::make_shared<geometry::Image>(); *depth_processed = depth_orig; preprocess_depth_functor func( thrust::raw_pointer_cast(depth_processed->data_.data()), option.min_depth_, option.max_depth_); thrust::for_each( utility::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(depth_processed->width_ * depth_processed->height_), func); return depth_processed; } inline bool CheckImagePair(const geometry::Image &image_s, const geometry::Image &image_t) { return (image_s.width_ == image_t.width_ && image_s.height_ == image_t.height_); } inline bool CheckRGBDImagePair(const geometry::RGBDImage &source, const geometry::RGBDImage &target) { return (CheckImagePair(source.color_, target.color_) && CheckImagePair(source.depth_, target.depth_) && CheckImagePair(source.color_, source.depth_) && CheckImagePair(target.color_, target.depth_) && source.color_.num_of_channels_ == 1 && source.depth_.num_of_channels_ == 1 && target.color_.num_of_channels_ == 1 && target.depth_.num_of_channels_ == 1 && source.color_.bytes_per_channel_ == 4 && target.color_.bytes_per_channel_ == 4 && source.depth_.bytes_per_channel_ == 4 && target.depth_.bytes_per_channel_ == 4); } std::tuple<std::shared_ptr<geometry::RGBDImage>, std::shared_ptr<geometry::RGBDImage>> InitializeRGBDOdometry( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const Eigen::Matrix4f &odo_init, const OdometryOption &option) { auto source_gray = source.color_.Filter(geometry::Image::FilterType::Gaussian3); auto target_gray = target.color_.Filter(geometry::Image::FilterType::Gaussian3); auto source_depth_preprocessed = PreprocessDepth(utility::GetStream(0), source.depth_, option); auto target_depth_preprocessed = PreprocessDepth(utility::GetStream(1), target.depth_, option); cudaSafeCall(cudaDeviceSynchronize()); auto source_depth = source_depth_preprocessed->Filter( geometry::Image::FilterType::Gaussian3); auto target_depth = target_depth_preprocessed->Filter( geometry::Image::FilterType::Gaussian3); CorrespondenceSetPixelWise correspondence; ComputeCorrespondence(pinhole_camera_intrinsic.intrinsic_matrix_, odo_init, *source_depth, *target_depth, option, correspondence); NormalizeIntensity(*source_gray, *target_gray, correspondence); auto source_out = PackRGBDImage(*source_gray, *source_depth); auto target_out = PackRGBDImage(*target_gray, *target_depth); return std::make_tuple(source_out, target_out); } template <typename JacobianType> struct compute_jacobian_and_residual_functor : public utility::multiple_jacobians_residuals_functor<Eigen::Vector6f, 2> { compute_jacobian_and_residual_functor(const uint8_t *source_color, const uint8_t *source_depth, const uint8_t *target_color, const uint8_t *target_depth, const uint8_t *source_xyz, const uint8_t *target_dx_color, const uint8_t *target_dx_depth, const uint8_t *target_dy_color, const uint8_t *target_dy_depth, int width, const Eigen::Matrix3f &intrinsic, const Eigen::Matrix4f &extrinsic, const Eigen::Vector4i *corresps) : source_color_(source_color), source_depth_(source_depth), target_color_(target_color), target_depth_(target_depth), source_xyz_(source_xyz), target_dx_color_(target_dx_color), target_dx_depth_(target_dx_depth), target_dy_color_(target_dy_color), target_dy_depth_(target_dy_depth), width_(width), intrinsic_(intrinsic), extrinsic_(extrinsic), corresps_(corresps){}; const uint8_t *source_color_; const uint8_t *source_depth_; const uint8_t *target_color_; const uint8_t *target_depth_; const uint8_t *source_xyz_; const uint8_t *target_dx_color_; const uint8_t *target_dx_depth_; const uint8_t *target_dy_color_; const uint8_t *target_dy_depth_; const int width_; const Eigen::Matrix3f intrinsic_; const Eigen::Matrix4f extrinsic_; const Eigen::Vector4i *corresps_; JacobianType jacobian_; __device__ void operator()(int i, Eigen::Vector6f J_r[2], float r[2]) const { jacobian_.ComputeJacobianAndResidual( i, J_r, r, source_color_, source_depth_, target_color_, target_depth_, source_xyz_, target_dx_color_, target_dx_depth_, target_dy_color_, target_dy_depth_, width_, intrinsic_, extrinsic_, corresps_); } }; template <typename JacobianType> std::tuple<bool, Eigen::Matrix4f> DoSingleIteration( int iter, int level, const geometry::RGBDImage &source, const geometry::RGBDImage &target, const geometry::Image &source_xyz, const geometry::RGBDImage &target_dx, const geometry::RGBDImage &target_dy, const Eigen::Matrix3f &intrinsic, const Eigen::Matrix4f &extrinsic_initial, const OdometryOption &option) { CorrespondenceSetPixelWise correspondence; ComputeCorrespondence(intrinsic, extrinsic_initial, source.depth_, target.depth_, option, correspondence); int corresps_count = (int)correspondence.size(); compute_jacobian_and_residual_functor<JacobianType> func( thrust::raw_pointer_cast(source.color_.data_.data()), thrust::raw_pointer_cast(source.depth_.data_.data()), thrust::raw_pointer_cast(target.color_.data_.data()), thrust::raw_pointer_cast(target.depth_.data_.data()), thrust::raw_pointer_cast(source_xyz.data_.data()), thrust::raw_pointer_cast(target_dx.color_.data_.data()), thrust::raw_pointer_cast(target_dx.depth_.data_.data()), thrust::raw_pointer_cast(target_dy.color_.data_.data()), thrust::raw_pointer_cast(target_dy.depth_.data_.data()), source.color_.width_, intrinsic, extrinsic_initial, thrust::raw_pointer_cast(correspondence.data())); utility::LogDebug("Iter : {:d}, Level : {:d}, ", iter, level); Eigen::Matrix6f JTJ; Eigen::Vector6f JTr; float r2; thrust::tie(JTJ, JTr, r2) = utility::ComputeJTJandJTr<Eigen::Matrix6f, Eigen::Vector6f, 2>( func, corresps_count); bool is_success; Eigen::Matrix4f extrinsic; thrust::tie(is_success, extrinsic) = utility::SolveJacobianSystemAndObtainExtrinsicMatrix(JTJ, JTr); if (!is_success) { utility::LogWarning("[ComputeOdometry] no solution!"); return std::make_tuple(false, Eigen::Matrix4f::Identity()); } else { return std::make_tuple(true, extrinsic); } } struct weight_reduce_functor { weight_reduce_functor(float sigma2, float nu) : sigma2_(sigma2), nu_(nu){}; const float sigma2_; const float nu_; __device__ float operator()(float r2) const { return r2 * (nu_ + 1.0) / (nu_ + r2 / sigma2_); } }; struct calc_weights_functor { calc_weights_functor(float nu) : nu_(nu){}; const float nu_; __device__ float operator()(float r2, float w_sum) const { return (nu_ + 1) / (nu_ + r2 / w_sum); } }; template <typename JacobianType> std::tuple<bool, Eigen::Matrix4f, float> DoSingleIterationWeighted( int iter, int level, const geometry::RGBDImage &source, const geometry::RGBDImage &target, const geometry::Image &source_xyz, const geometry::RGBDImage &target_dx, const geometry::RGBDImage &target_dy, const Eigen::Matrix3f &intrinsic, const Eigen::Matrix4f &extrinsic_initial, const Eigen::Vector6f &prev_twist, const Eigen::Vector6f &curr_vel, const OdometryOption &option, float sigma2) { CorrespondenceSetPixelWise correspondence; ComputeCorrespondence(intrinsic, extrinsic_initial, source.depth_, target.depth_, option, correspondence); int corresps_count = (int)correspondence.size(); compute_jacobian_and_residual_functor<JacobianType> func( thrust::raw_pointer_cast(source.color_.data_.data()), thrust::raw_pointer_cast(source.depth_.data_.data()), thrust::raw_pointer_cast(target.color_.data_.data()), thrust::raw_pointer_cast(target.depth_.data_.data()), thrust::raw_pointer_cast(source_xyz.data_.data()), thrust::raw_pointer_cast(target_dx.color_.data_.data()), thrust::raw_pointer_cast(target_dx.depth_.data_.data()), thrust::raw_pointer_cast(target_dy.color_.data_.data()), thrust::raw_pointer_cast(target_dy.depth_.data_.data()), source.color_.width_, intrinsic, extrinsic_initial, thrust::raw_pointer_cast(correspondence.data())); utility::LogDebug("Iter : {:d}, Level : {:d}, ", iter, level); Eigen::Matrix6f JTJ; Eigen::Vector6f JTr; float r2; float sigma2_new; thrust::tie(JTJ, JTr, r2, sigma2_new) = utility::ComputeWeightedJTJandJTr<Eigen::Matrix6f, Eigen::Vector6f, 2>( func, weight_reduce_functor(sigma2, option.nu_), calc_weights_functor(option.nu_), corresps_count); JTJ.diagonal() += option.inv_sigma_mat_diag_; JTr -= (option.inv_sigma_mat_diag_.array() * (prev_twist - curr_vel).array()) .matrix(); bool is_success; Eigen::Matrix4f extrinsic; thrust::tie(is_success, extrinsic) = utility::SolveJacobianSystemAndObtainExtrinsicMatrix(JTJ, JTr); if (!is_success) { utility::LogWarning("[ComputeOdometry] no solution!"); return std::make_tuple(false, Eigen::Matrix4f::Identity(), sigma2_new); } else { return std::make_tuple(true, extrinsic, sigma2_new); } } template <typename JacobianType> std::tuple<bool, Eigen::Matrix4f> ComputeMultiscale( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const Eigen::Matrix4f &extrinsic_initial, const OdometryOption &option) { std::vector<int> iter_counts = option.iteration_number_per_pyramid_level_; int num_levels = (int)iter_counts.size(); auto source_pyramid = source.CreatePyramid(num_levels); auto target_pyramid = target.CreatePyramid(num_levels); auto target_pyramid_dx = geometry::RGBDImage::FilterPyramid( target_pyramid, geometry::Image::FilterType::Sobel3Dx); auto target_pyramid_dy = geometry::RGBDImage::FilterPyramid( target_pyramid, geometry::Image::FilterType::Sobel3Dy); Eigen::Matrix4f result_odo = extrinsic_initial.isZero() ? Eigen::Matrix4f::Identity() : extrinsic_initial; std::vector<Eigen::Matrix3f> pyramid_camera_matrix = CreateCameraMatrixPyramid(pinhole_camera_intrinsic, (int)iter_counts.size()); for (int level = num_levels - 1; level >= 0; level--) { const Eigen::Matrix3f level_camera_matrix = pyramid_camera_matrix[level]; auto source_xyz_level = ConvertDepthImageToXYZImage( source_pyramid[level]->depth_, level_camera_matrix); auto source_level = PackRGBDImage(source_pyramid[level]->color_, source_pyramid[level]->depth_); auto target_level = PackRGBDImage(target_pyramid[level]->color_, target_pyramid[level]->depth_); auto target_dx_level = PackRGBDImage(target_pyramid_dx[level]->color_, target_pyramid_dx[level]->depth_); auto target_dy_level = PackRGBDImage(target_pyramid_dy[level]->color_, target_pyramid_dy[level]->depth_); for (int iter = 0; iter < iter_counts[num_levels - level - 1]; iter++) { Eigen::Matrix4f curr_odo; bool is_success; std::tie(is_success, curr_odo) = DoSingleIteration<JacobianType>( iter, level, *source_level, *target_level, *source_xyz_level, *target_dx_level, *target_dy_level, level_camera_matrix, result_odo, option); result_odo = curr_odo * result_odo; if (!is_success) { utility::LogWarning("[ComputeOdometry] no solution!"); return std::make_tuple(false, Eigen::Matrix4f::Identity()); } } } return std::make_tuple(true, result_odo); } template <typename JacobianType> std::tuple<bool, Eigen::Matrix4f, Eigen::Vector6f> ComputeMultiscaleWeighted( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const Eigen::Matrix4f &extrinsic_initial, const Eigen::Vector6f &prev_twist, const OdometryOption &option) { std::vector<int> iter_counts = option.iteration_number_per_pyramid_level_; int num_levels = (int)iter_counts.size(); auto source_pyramid = source.CreatePyramid(num_levels); auto target_pyramid = target.CreatePyramid(num_levels); auto target_pyramid_dx = geometry::RGBDImage::FilterPyramid( target_pyramid, geometry::Image::FilterType::Sobel3Dx); auto target_pyramid_dy = geometry::RGBDImage::FilterPyramid( target_pyramid, geometry::Image::FilterType::Sobel3Dy); Eigen::Matrix4f result_odo = extrinsic_initial.isZero() ? Eigen::Matrix4f::Identity() : extrinsic_initial; std::vector<Eigen::Matrix3f> pyramid_camera_matrix = CreateCameraMatrixPyramid(pinhole_camera_intrinsic, (int)iter_counts.size()); Eigen::Matrix4f curr_vel = Eigen::Matrix4f::Identity(); float sigma2 = option.sigma2_init_; for (int level = num_levels - 1; level >= 0; level--) { const Eigen::Matrix3f level_camera_matrix = pyramid_camera_matrix[level]; auto source_xyz_level = ConvertDepthImageToXYZImage( source_pyramid[level]->depth_, level_camera_matrix); auto source_level = PackRGBDImage(source_pyramid[level]->color_, source_pyramid[level]->depth_); auto target_level = PackRGBDImage(target_pyramid[level]->color_, target_pyramid[level]->depth_); auto target_dx_level = PackRGBDImage(target_pyramid_dx[level]->color_, target_pyramid_dx[level]->depth_); auto target_dy_level = PackRGBDImage(target_pyramid_dy[level]->color_, target_pyramid_dy[level]->depth_); for (int iter = 0; iter < iter_counts[num_levels - level - 1]; iter++) { Eigen::Matrix4f curr_odo; bool is_success; std::tie(is_success, curr_odo, sigma2) = DoSingleIterationWeighted<JacobianType>( iter, level, *source_level, *target_level, *source_xyz_level, *target_dx_level, *target_dy_level, level_camera_matrix, result_odo, prev_twist, utility::TransformMatrix4fToVector6f(curr_vel), option, sigma2); curr_vel = curr_odo * curr_vel; result_odo = curr_odo * result_odo; if (!is_success) { utility::LogWarning("[ComputeOdometry] no solution!"); return std::make_tuple(false, Eigen::Matrix4f::Identity(), Eigen::Vector6f::Zero()); } } } return std::make_tuple(true, result_odo, utility::TransformMatrix4fToVector6f(curr_vel)); } template <typename JacobianType> std::tuple<bool, Eigen::Matrix4f, Eigen::Vector6f, Eigen::Matrix6f> ComputeRGBDOdometryT( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const Eigen::Matrix4f &odo_init, const Eigen::Vector6f &prev_twist, const OdometryOption &option, bool is_weighted) { if (!CheckRGBDImagePair(source, target)) { utility::LogWarning( "[RGBDOdometry] Two RGBD pairs should be same in size."); return std::make_tuple(false, Eigen::Matrix4f::Identity(), Eigen::Vector6f::Zero(), Eigen::Matrix6f::Zero()); } std::shared_ptr<geometry::RGBDImage> source_processed, target_processed; std::tie(source_processed, target_processed) = InitializeRGBDOdometry( source, target, pinhole_camera_intrinsic, odo_init, option); Eigen::Matrix4f extrinsic; Eigen::Vector6f twist = Eigen::Vector6f::Zero(); bool is_success; if (is_weighted) { std::tie(is_success, extrinsic, twist) = ComputeMultiscaleWeighted<JacobianType>( *source_processed, *target_processed, pinhole_camera_intrinsic, odo_init, prev_twist, option); } else { std::tie(is_success, extrinsic) = ComputeMultiscale<JacobianType>( *source_processed, *target_processed, pinhole_camera_intrinsic, odo_init, option); } if (is_success) { Eigen::Matrix4f trans_output = extrinsic; Eigen::Matrix6f info_output = CreateInformationMatrix( extrinsic, pinhole_camera_intrinsic, source_processed->depth_, target_processed->depth_, option); return std::make_tuple(true, trans_output, twist, info_output); } else { return std::make_tuple(false, Eigen::Matrix4f::Identity(), twist, Eigen::Matrix6f::Identity()); } } template std::tuple<bool, Eigen::Matrix4f, Eigen::Vector6f, Eigen::Matrix6f> ComputeRGBDOdometryT<RGBDOdometryJacobianFromColorTerm>( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const Eigen::Matrix4f &odo_init, const Eigen::Vector6f &prev_twist, const OdometryOption &option, bool is_weighted); template std::tuple<bool, Eigen::Matrix4f, Eigen::Vector6f, Eigen::Matrix6f> ComputeRGBDOdometryT<RGBDOdometryJacobianFromHybridTerm>( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic, const Eigen::Matrix4f &odo_init, const Eigen::Vector6f &prev_twist, const OdometryOption &option, bool is_weighted); } // unnamed namespace std::tuple<bool, Eigen::Matrix4f, Eigen::Matrix6f> ComputeRGBDOdometry( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic /*= camera::PinholeCameraIntrinsic()*/, const Eigen::Matrix4f &odo_init /*= Eigen::Matrix4f::Identity()*/, const RGBDOdometryJacobian &jacobian_method /*=RGBDOdometryJacobianFromHybridTerm*/, const OdometryOption &option /*= OdometryOption()*/) { if (jacobian_method.jacobian_type_ == RGBDOdometryJacobian::COLOR_TERM) { auto res = ComputeRGBDOdometryT<RGBDOdometryJacobianFromColorTerm>( source, target, pinhole_camera_intrinsic, odo_init, Eigen::Vector6f::Zero(), option, false); return std::make_tuple(std::get<0>(res), std::get<1>(res), std::get<3>(res)); } else { auto res = ComputeRGBDOdometryT<RGBDOdometryJacobianFromHybridTerm>( source, target, pinhole_camera_intrinsic, odo_init, Eigen::Vector6f::Zero(), option, false); return std::make_tuple(std::get<0>(res), std::get<1>(res), std::get<3>(res)); } } std::tuple<bool, Eigen::Matrix4f, Eigen::Vector6f, Eigen::Matrix6f> ComputeWeightedRGBDOdometry( const geometry::RGBDImage &source, const geometry::RGBDImage &target, const camera::PinholeCameraIntrinsic &pinhole_camera_intrinsic /*= camera::PinholeCameraIntrinsic()*/, const Eigen::Matrix4f &odo_init /*= Eigen::Matrix4f::Identity()*/, const Eigen::Vector6f &prev_twist /*= Eigen::Vector6f::Zero()*/, const RGBDOdometryJacobian &jacobian_method /*=RGBDOdometryJacobianFromHybridTerm*/, const OdometryOption &option /*= OdometryOption()*/) { return ComputeRGBDOdometryT<RGBDOdometryJacobianFromHybridTerm>( source, target, pinhole_camera_intrinsic, odo_init, prev_twist, option, true); } } // namespace odometry } // namespace cupoch
the_stack
#include <torch/extension.h> //#include <torch/serialize/tensor.h> //#include <ATen/ATen.h> //#include <ATen/cuda/CUDAContext.h> #define CUDA_NUM_THREADS 256 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) #ifdef __cplusplus extern "C" { #endif __global__ void Max (const int n, const float *top_temp, float *top_data, float *mask, const int mask_index){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } if (top_data[index] < top_temp[index]) { top_data[index] = top_temp[index]; mask[index] = mask_index; } } __global__ void get_temp_grad (const int n, const float *gradOutput, const float *mask, float *top_grad, const int mask_index){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } if (((int) mask[index]) == mask_index) top_grad[index] = gradOutput[index]; } __global__ void MaxDepth (const int n, const float *bottom_data, const int step, const int depth, float *idx){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int base = index / step * step * depth + index % step; int k = 0; for (int i = 1; i < depth; i++) if (bottom_data[base + k * step] < bottom_data[base + i * step]) k = i; idx[index] = k; } __global__ void sga_down_forward (const int n, const float *filters, const int channel, const int height, const int width, const int depth, const int wsize, float *top_data){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int wsize=radius+1; int base = index / width * step * depth + index % width; //up->down int fbase = index / (channel*width) * step * wsize + index % width; int kp = 0; for (int row = 0; row < height; row++) { int shift = fbase + row * width; int base0 = base + row * width; int k = kp; kp = 0; /* if(row-1>=0) for(int i = 1; i < depth; i++){ if(top_data[base0-width+k*step]<top_data[base0-width+i*step]) k = i; */ for (int d = 0; d < depth; d++) { float temp = 0; int location = base0 + d * step; temp += top_data[location] * filters[shift]; if (row - 1 >= 0) temp += top_data[location - width] * filters[shift + step]; else temp += top_data[location] * filters[shift + step]; if (row - 1 >= 0 && d - 1 >= 0) temp += top_data[location - width - step] * filters[shift + 2 * step]; else temp += top_data[location] * filters[shift + 2 * step]; if (row - 1 >= 0 && d + 1 < depth) temp += top_data[location - width + step] * filters[shift + 3 * step]; else temp += top_data[location] * filters[shift + 3 * step]; if (row - 1 >= 0) temp += top_data[base0 - width + k * step] * filters[shift + 4 * step]; else temp += top_data[location] * filters[shift + 4 * step]; top_data[location] = temp; if (top_data[base0 + kp * step] < temp) kp = d; } } } __global__ void sga_down_data_backward (const int n, const float *filters, float *top_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / width * step * depth + index % width; //up->down int fbase = index / (channel*width) * step * wsize + index % width; //1 int base_idx = index / width * step + index % width; // for (int row = height - 1; row >= 0; row--) { int shift = fbase + row * width; for (int d = 0; d < depth; d++) { int location = base + d * step + row * width; float temp = top_diff[location]; if (row + 1 < height) temp += top_diff[location + width] * filters[shift + width + step]; if (row + 1 < height && d + 1 < depth) temp += top_diff[location + width + step] * filters[shift + width + 2 * step]; if (row + 1 < height && d - 1 >= 0) temp += top_diff[location + width - step] * filters[shift + width + 3 * step]; top_diff[location] = temp; bottom_diff[location] += temp * filters[shift]; } //2 if (row + 1 < height) { int k = idx[base_idx + row * width]; int location = base + k * step + row * width; float temp = 0; for (int d = 0; d < depth; d++) temp += top_diff[base + row * width + width + d * step] * filters[shift + width + 4 * step]; top_diff[location] += temp; bottom_diff[location] += temp * filters[shift]; } //2 } /* for(int d = 0; d < depth; d ++){ int shift = fbase; int location = base + d * step; bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]); // bottom_diff[location] += top_diff[location]; shift += width; location += width; bottom_diff[location] += top_diff[location] * filters[shift + 2*step]; } for(int row=1;row<height;row++){ int location = base + row * width; int shift = fbase + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 3*step]; location += (depth - 1)*step; bottom_diff[location] += top_diff[location] * filters[shift + 4*step]; } */ for (int row = 0; row < height; row++) { int location = base + row * width; int shift = fbase + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; location += (depth - 1) * step; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; } } __global__ void sga_down_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; for(int k=0; k<channel; k++){ int base = index / step * step * depth * channel + index % step + k*step*depth; //up->down int fbase = index / step * step * wsize + index % step; int row = index % step / width; for (int i = 0; i < depth; i++) filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if (row - 1 >= 0) { int location = fbase + step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step - width]; location = fbase + 2 * step; filters_diff[location] += temp_diff[base] * bottom_data[base]; for (int i = 1; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i - 1) * step - width]; location = fbase + 3 * step; filters_diff[location] += temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step]; for (int i = 0; i < depth - 1; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i + 1) * step - width]; } /* else{ for(int i=0; i<depth; i++){ float temp = temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; } } */ //1 if (row - 1 >= 0) { int location = fbase + 4 * step; int k = idx[index - width]; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + k * step - width]; } // /* else{ int location = fbase + 2*step; for(int i=0; i<depth; i++) filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; } */ } } __global__ void sga_up_forward (const int n, const float *filters, const int channel, const int height, const int width, const int depth, const int wsize, float *top_data){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int wsize=radius+1; int base = index / width * step * depth + index % width; //up->down int fbase = index / (width*channel) * step * wsize + index % width; int kp = 0; //1 for (int row = height - 1; row >= 0; row--) { int shift = fbase + row * width; //2 int base0 = base + row * width; int k = kp; kp = 0; //2 for (int d = 0; d < depth; d++) { float temp = 0; int location = base + d * step + row * width; temp += top_data[location] * filters[shift]; if (row + 1 < height) temp += top_data[location + width] * filters[shift + step]; else temp += top_data[location] * filters[shift + step]; if (row + 1 < height && d - 1 >= 0) temp += top_data[location + width - step] * filters[shift + 2 * step]; else temp += top_data[location] * filters[shift + 2 * step]; if (row + 1 < height && d + 1 < depth) temp += top_data[location + width + step] * filters[shift + 3 * step]; else temp += top_data[location] * filters[shift + 3 * step]; //3 if (row + 1 < height) temp += top_data[base0 + width + k * step] * filters[shift + 4 * step]; else temp += top_data[location] * filters[shift + 4 * step]; top_data[location] = temp; if (top_data[base0 + kp * step] < temp) kp = d; //3 } } } __global__ void sga_up_data_backward (const int n, const float *filters, float *top_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / width * step * depth + index % width; //up->down int fbase = index / (width*channel) * step * wsize + index % width; //1 int base_idx = index / width * step + index % width; // for (int row = 0; row < height; row++) { int shift = fbase + row * width; for (int d = 0; d < depth; d++) { int location = base + d * step + row * width; float temp = top_diff[location]; if (row - 1 >= 0) temp += top_diff[location - width] * filters[shift - width + step]; if (row - 1 >= 0 && d + 1 < depth) temp += top_diff[location - width + step] * filters[shift - width + 2 * step]; if (row - 1 >= 0 && d - 1 >= 0) temp += top_diff[location - width - step] * filters[shift - width + 3 * step]; top_diff[location] = temp; bottom_diff[location] += temp * filters[shift]; } //2 if (row - 1 >= 0) { int k = idx[base_idx + row * width]; int location = base + k * step + row * width; float temp = 0; for (int d = 0; d < depth; d++) temp += top_diff[base + row * width - width + d * step] * filters[shift - width + 4 * step]; top_diff[location] += temp; bottom_diff[location] += temp * filters[shift]; } //2 } /* for(int d = 0; d < depth; d ++){ int shift = fbase + width*(height-1); int location = base + width*(height-1) + d * step; bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]); // bottom_diff[location] += top_diff[location]; shift -= width; location -= width; bottom_diff[location] += top_diff[location] * filters[shift + 2*step]; } for(int row=0;row<height-1;row++){ int shift = fbase + row * width; int location = base + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 3*step]; location += (depth - 1)*step; bottom_diff[location] += top_diff[location] * filters[shift + 4*step]; }*/ for (int row = 0; row < height; row++) { int shift = fbase + row * width; int location = base + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; location += (depth - 1) * step; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; } } __global__ void sga_up_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } for(int k=0; k<channel; k++){ int step = height * width; int base = index / step * step * depth * channel + index % step + k*step*depth; //up->down // int base = index / step * step * depth + index % step; //up->down int fbase = index / step * step * wsize + index % step; int row = index % step / width; for (int i = 0; i < depth; i++) filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if (row + 1 < height) { int location = fbase + step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step + width]; location = fbase + 2 * step; filters_diff[location] += temp_diff[base] * bottom_data[base]; for (int i = 1; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i - 1) * step + width]; location = fbase + 3 * step; filters_diff[location] += temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step]; for (int i = 0; i < depth - 1; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i + 1) * step + width]; } /* else{ //int location = fbase + step; for(int i=0; i<depth; i++){ float temp = temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; } // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // location = fbase + 3*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // // location = fbase + 4*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ //1 if (row + 1 < height) { int location = fbase + 4 * step; int k = idx[index + width]; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + k * step + width]; } // /* else{ int location = fbase + 2*step; for(int i=0; i<depth; i++) filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ } } __global__ void sga_right_forward (const int n, const float *filters, const int channel, const int height, const int width, const int depth, const int wsize, float *top_data){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int wsize=radius+1; int base = index / height * step * depth + (index % height) * width; //up->down int fbase = index / (height*channel) * step * wsize + (index % height) * width; int kp = 0; for (int col = 0; col < width; col++) { int shift = fbase + col; //2 int base0 = base + col; int k = kp; kp = 0; //2 for (int d = 0; d < depth; d++) { float temp = 0; int location = base + d * step + col; temp += top_data[location] * filters[shift]; if (col - 1 >= 0) temp += top_data[location - 1] * filters[shift + step]; else temp += top_data[location] * filters[shift + step]; if (col - 1 >= 0 && d - 1 >= 0) temp += top_data[location - 1 - step] * filters[shift + 2 * step]; else temp += top_data[location] * filters[shift + 2 * step]; if (col - 1 >= 0 && d + 1 < depth) temp += top_data[location - 1 + step] * filters[shift + 3 * step]; else temp += top_data[location] * filters[shift + 3 * step]; //3 if (col - 1 >= 0) temp += top_data[base0 - 1 + k * step] * filters[shift + 4 * step]; else temp += top_data[location] * filters[shift + 4 * step]; top_data[location] = temp; if (top_data[base0 + kp * step] < temp) kp = d; //3 } } } __global__ void sga_right_data_backward (const int n, const float *filters, float *top_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / height * step * depth + (index % height) * width; //up->down int fbase = index / (height*channel) * step * wsize + (index % height) * width; //1 int base_idx = index / height * step + (index % height) * width; // for (int col = width - 1; col >= 0; col--) { int shift = fbase + col; for (int d = 0; d < depth; d++) { int location = base + d * step + col; float temp = top_diff[location]; if (col + 1 < width) temp += top_diff[location + 1] * filters[shift + 1 + step]; if (col + 1 < width && d + 1 < depth) temp += top_diff[location + 1 + step] * filters[shift + 1 + 2 * step]; if (col + 1 < width && d - 1 >= 0) temp += top_diff[location + 1 - step] * filters[shift + 1 + 3 * step]; top_diff[location] = temp; bottom_diff[location] += (temp * filters[shift]); } //2 if (col + 1 < width) { int k = idx[base_idx + col]; int location = base + k * step + col; float temp = 0; for (int d = 0; d < depth; d++) temp += top_diff[base + col + 1 + d * step] * filters[shift + 1 + 4 * step]; top_diff[location] += temp; bottom_diff[location] += temp * filters[shift]; } //2 } /* for(int d = 0; d < depth; d ++){ int shift = fbase;// + width*(height-1); int location = base;// + width*(height-1) + d * step; bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]); // bottom_diff[location] += top_diff[location]; shift += 1; location += 1; bottom_diff[location] += top_diff[location] * filters[shift + 2*step]; } for(int col=1;col<width;col++){ int shift = fbase + col; int location = base + col; bottom_diff[location] += top_diff[location] * filters[shift + 3*step]; location += (depth - 1)*step; bottom_diff[location] += top_diff[location] * filters[shift + 4*step]; }*/ for (int col = 0; col < width; col++) { int shift = fbase + col; int location = base + col; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; location += (depth - 1) * step; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; } } __global__ void sga_right_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; for(int k=0; k<channel; k++){ int base = index / step * step * depth * channel + index % step + k*step*depth; //up->down // int base = index / step * step * depth + index % step; //up->down int fbase = index / step * step * wsize + index % step; // int row = index%step/width; int col = index % step % width; for (int i = 0; i < depth; i++) filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if (col - 1 >= 0) { int location = fbase + step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step - 1]; location = fbase + 2 * step; filters_diff[location] += temp_diff[base] * bottom_data[base]; for (int i = 1; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i - 1) * step - 1]; location = fbase + 3 * step; filters_diff[location] += temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step]; for (int i = 0; i < depth - 1; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i + 1) * step - 1]; } /* else{ //int location = fbase + step; for(int i=0; i<depth; i++){ float temp = temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; } // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // location = fbase + 3*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // // location = fbase + 4*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ //1 if (col - 1 >= 0) { int location = fbase + 4 * step; int k = idx[index - 1]; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + k * step - 1]; } // /* else{ int location = fbase + 2*step; for(int i=0; i<depth; i++) filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ } } __global__ void sga_left_forward (const int n, const float *filters, const int channel, const int height, const int width, const int depth, const int wsize, float *top_data){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int wsize=radius+1; int base = index / height * step * depth + (index % height) * width; //up->down int fbase = index / (channel*height) * step * wsize + (index % height) * width; int kp = 0; for (int col = width - 1; col >= 0; col--) { int shift = fbase + col; //2 int base0 = base + col; int k = kp; kp = 0; //2 for (int d = 0; d < depth; d++) { float temp = 0; int location = base + d * step + col; temp += top_data[location] * filters[shift]; if (col + 1 < width) temp += top_data[location + 1] * filters[shift + step]; else temp += top_data[location] * filters[shift + step]; if (col + 1 < width && d - 1 >= 0) temp += top_data[location + 1 - step] * filters[shift + 2 * step]; else temp += top_data[location] * filters[shift + 2 * step]; if (col + 1 < width && d + 1 < depth) temp += top_data[location + 1 + step] * filters[shift + 3 * step]; else temp += top_data[location] * filters[shift + 3 * step]; //3 if (col + 1 < width) temp += top_data[base0 + 1 + k * step] * filters[shift + 4 * step]; else temp += top_data[location] * filters[shift + 4 * step]; top_data[location] = temp; if (top_data[base0 + kp * step] < temp) kp = d; //3 } } } __global__ void sga_left_data_backward (const int n, const float *filters, float *top_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / height * step * depth + (index % height) * width; //up->down int fbase = index / (height*channel) * step * wsize + (index % height) * width; //1 int base_idx = index / height * step + (index % height) * width; // for (int col = 0; col < width; col++) { int shift = fbase + col; for (int d = 0; d < depth; d++) { int location = base + d * step + col; float temp = top_diff[location]; if (col - 1 >= 0) temp += top_diff[location - 1] * filters[shift - 1 + step]; if (col - 1 >= 0 && d + 1 < depth) temp += top_diff[location - 1 + step] * filters[shift - 1 + 2 * step]; if (col - 1 >= 0 && d - 1 >= 0) temp += top_diff[location - 1 - step] * filters[shift - 1 + 3 * step]; top_diff[location] = temp; bottom_diff[location] += temp * filters[shift]; } //2 if (col - 1 >= 0) { int k = idx[base_idx + col]; int location = base + k * step + col; float temp = 0; for (int d = 0; d < depth; d++) temp += top_diff[base + col - 1 + d * step] * filters[shift - 1 + 4 * step]; top_diff[location] += temp; //top_diff[base + col - 1 + d*step] * filters[shift - 1 + 4*step]; bottom_diff[location] += temp * filters[shift]; } //2 } /* for(int d = 0; d < depth; d ++){ int shift = fbase + width-1;// + width*(height-1); int location = base + width-1;// + width*(height-1) + d * step; bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]); // bottom_diff[location] += top_diff[location]; shift -= 1; location -= 1; bottom_diff[location] += top_diff[location] * filters[shift + 2*step]; } for(int col=0;col<width-1;col++){ int shift = fbase + col; int location = base + col; bottom_diff[location] += top_diff[location] * filters[shift + 3*step]; location += (depth - 1)*step; bottom_diff[location] += top_diff[location] * filters[shift + 4*step]; }*/ for (int col = 0; col < width; col++) { int shift = fbase + col; int location = base + col; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; location += (depth - 1) * step; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; } } __global__ void sga_left_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const float *idx, const int channel, const int height, const int width, const int depth, const int wsize, float *filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; for(int k=0; k<channel; k++){ int base = index / step * step * depth * channel + index % step + k*step*depth; //up->down // int base = index / step * step * depth + index % step; //up->down int fbase = index / step * step * wsize + index % step; // int row = index%step/width; int col = index % step % width; for (int i = 0; i < depth; i++) filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if (col + 1 < width) { int location = fbase + step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step + 1]; location = fbase + 2 * step; filters_diff[location] += temp_diff[base] * bottom_data[base]; for (int i = 1; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i - 1) * step + 1]; location = fbase + 3 * step; filters_diff[location] += temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step]; for (int i = 0; i < depth - 1; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i + 1) * step + 1]; } /* else{ //int location = fbase + step; for(int i=0; i<depth; i++){ float temp = temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; } // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // location = fbase + 3*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // // location = fbase + 4*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ //1 if (col + 1 < width) { int location = fbase + 4 * step; int k = idx[index + 1]; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + k * step + 1]; } // /* else{ int location = fbase + 2*step; for(int i=0; i<depth; i++) filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; } */ } } void sga_kernel_forward (at::Tensor input, at::Tensor guidance_down, at::Tensor guidance_up, at::Tensor guidance_right, at::Tensor guidance_left, at::Tensor temp_out, at::Tensor output, at::Tensor mask){ int num = input.size(0); int channel = input.size(1); int depth = input.size(2); int height = input.size(3); int width = input.size(4); int wsize = guidance_down.size(1); //THCudaTensor_nElement(state, input); float *top_data = output.data<float>(); float *top_temp = temp_out.data<float>(); float *top_mask = mask.data<float>(); const float *bottom_data = input.data<float>(); const float *g0 = guidance_down.data<float>(); const float *g1 = guidance_up.data<float>(); const float *g2 = guidance_right.data<float>(); const float *g3 = guidance_left.data<float>(); int n = num * channel * width; int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; int N = input.numel (); // cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_down_forward <<< threads, CUDA_NUM_THREADS >>> (n, g0, channel, height, width, depth, wsize, top_temp); // cudaMemset( top_mask, 0, sizeof(float)*N); cudaMemcpy (top_data, top_temp, sizeof (float) * N, cudaMemcpyDeviceToDevice); cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_up_forward <<< threads, CUDA_NUM_THREADS >>> (n, g1, channel, height, width, depth, wsize, top_temp); Max <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, top_data, top_mask, 1); n = num * channel * height; threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_right_forward <<< threads, CUDA_NUM_THREADS >>> (n, g2, channel, height, width, depth, wsize, top_temp); Max <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, top_data, top_mask, 2); cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_left_forward <<< threads, CUDA_NUM_THREADS >>> (n, g3, channel, height, width, depth, wsize, top_temp); Max <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, top_data, top_mask, 3); // cudaMemset( top_temp, 0, sizeof(float)*THCudaTensor_nElement(state, top_temp)); } void sga_kernel_backward (at::Tensor input, at::Tensor guidance_down, at::Tensor guidance_up, at::Tensor guidance_right, at::Tensor guidance_left, at::Tensor temp_out, at::Tensor mask, at::Tensor max_idx, at::Tensor gradOutput, at::Tensor temp_grad, at::Tensor gradInput, at::Tensor grad_down, at::Tensor grad_up, at::Tensor grad_right, at::Tensor grad_left){ int num = input.size(0); int channel = input.size(1); int depth = input.size(2); int height = input.size(3); int width = input.size(4); int wsize = guidance_down.size(1); //THCudaTensor_nElement(state, input); float *top_grad = temp_grad.data<float>(); float *top_temp = temp_out.data<float>(); const float *top_mask = mask.data<float>(); const float *bottom_data = input.data<float>(); const float *grad_out = gradOutput.data<float>(); const float *g0 = guidance_down.data<float>(); const float *g1 = guidance_up.data<float>(); const float *g2 = guidance_right.data<float>(); const float *g3 = guidance_left.data<float>(); float *grad0 = grad_down.data<float>(); float *grad1 = grad_up.data<float>(); float *grad2 = grad_right.data<float>(); float *grad3 = grad_left.data<float>(); float *grad_input = gradInput.data<float>(); float *idx = max_idx.data<float>(); int N = input.numel (); // cudaStream_t stream = at::cuda::getCurrentCUDAStream(); //backward for left int n = num * channel * height; // cudaMemcpy(top_temp, bottom_data, sizeof(float)*N, cudaMemcpyDeviceToDevice); // sga_left_forward<<<(n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>> // (n,g3,height,width,depth,wsize,top_temp); cudaMemset (top_grad, 0, sizeof (float) * N); get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 3); N = num * channel * width * height; MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx); sga_left_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g3, top_grad, idx, channel, height, width, depth, wsize, grad_input); n = num * width * height; sga_left_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, channel, height, width, depth, wsize, grad3); //backward for down N = input.numel (); n = num * channel * width; cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_down_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g0, channel, height, width, depth, wsize, top_temp); cudaMemset (top_grad, 0, sizeof (float) * N); get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 0); N = num * channel * width * height; MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx); sga_down_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g0, top_grad, idx, channel, height, width, depth, wsize, grad_input); n = num * width * height; sga_down_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, channel, height, width, depth, wsize, grad0); // backward for up N = input.numel (); n = num * channel * width; cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_up_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g1, channel, height, width, depth, wsize, top_temp); cudaMemset (top_grad, 0, sizeof (float) * N); get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 1); N = num * channel * width * height; MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx); sga_up_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g1, top_grad, idx, channel, height, width, depth, wsize, grad_input); n = num * width * height; sga_up_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, channel, height, width, depth, wsize, grad1); //backward for right N = input.numel (); n = num * channel * height; cudaMemcpy (top_temp, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); sga_right_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g2, channel, height, width, depth, wsize, top_temp); cudaMemset (top_grad, 0, sizeof (float) * N); get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 2); N = num * channel * width * height; MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx); sga_right_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g2, top_grad, idx, channel, height, width, depth, wsize, grad_input); n = num * width * height; sga_right_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, channel, height, width, depth, wsize, grad2); } __global__ void lga_filtering_forward (const int n, const float *bottom_data, const float *filters, const int height, const int width, const int channel, const int radius, float *top_data){ int index = blockIdx.x * blockDim.x + threadIdx.x; // printf("OK\n"); // printf("%d, %.2f, %.2f\n", index, bottom_data[index], top_data[index]); if (index >= n) { return; } // top_data[index]=1.0; // assert(0); int step = height * width; int wsize = 2 * radius + 1; // int fsize=wsize*wsize*3; int fbase = index / (step * channel) * (step * wsize * wsize * 3) + index % step; int row = index % step / width; int col = index % width; int depth = index / step % channel; for (int d = -1; d <= 1; d++) { for (int r = -radius; r <= radius; r++) { for (int c = -radius; c <= radius; c++) { int rr = r + row; int cc = c + col; int dd = d + depth; int shift = 0; if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width && dd < channel) shift = r * width + c + d * step; int location = (d + 1) * (wsize * wsize) + (r + radius) * wsize + c + radius; top_data[index] += bottom_data[index + shift] * filters[fbase + location * step]; } } } // top_data[index]=1.0; // printf("%d, %d, %d, %.2f, %.2f\n", index, row, col, bottom_data[index], top_data[index]); } __global__ void lga_filter_backward (const int n, const float *bottom_data, const float *top_diff, const int height, const int width, const int channel, const int radius, float *filter_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int wsize = 2 * radius + 1; int base = index / (step * wsize * wsize * 3) * (step * channel) + index % step; int location = index / step % (wsize * wsize * 3); int d = location / (wsize * wsize) - 1; int r = (location / wsize) % wsize - radius; int c = location % wsize - radius; int rr = index % step / width + r; int cc = index % width + c; for (int i = 0; i < channel; i++) { int dd = i + d; if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width && dd < channel) { int shift = r * width + c + d * step; filter_diff[index] += top_diff[base + i * step] * bottom_data[base + shift + i * step]; } else filter_diff[index] += top_diff[base + i * step] * bottom_data[base + i * step]; } } __global__ void lga_data_backward (const int n, const float *filters, const float *top_diff, const int height, const int width, const int channel, const int radius, float *bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int wsize = 2 * radius + 1; // int fsize=wsize*wsize*3; int fbase = index / (step * channel) * (step * wsize * wsize * 3) + index % step; int row = index % step / width; int col = index % width; int depth = index / step % channel; for (int d = -1; d <= 1; d++) { for (int r = -radius; r <= radius; r++) { for (int c = -radius; c <= radius; c++) { int rr = r + row; int cc = c + col; int dd = d + depth; // int shift = 0; if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width && dd < channel) { int shift = r * width + c + d * step; // int fshift= r*width+c; int location = (-d + 1) * (wsize * wsize) + (-r + radius) * wsize - c + radius; bottom_diff[index] += top_diff[index + shift] * filters[fbase + r * width + c + location * step]; } else { int location = (d + 1) * (wsize * wsize) + (r + radius) * wsize + c + radius; bottom_diff[index] += top_diff[index] * filters[fbase + location * step]; } } } } } void lga_forward (at::Tensor input, at::Tensor filters, at::Tensor output, const int radius){ // print_kernel<<<10, 10>>>(); // cudaDeviceSynchronize(); // int num=input->size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); int n = input.numel (); // printf("%d, %d, %d, %d, %d\n", height, width, channel, n, radius); // cudaStream_t stream = at::cuda::getCurrentCUDAStream(); /* float *temp = new float[n]; float *out = input.data<float>(); cudaMemcpy(temp,out,n*sizeof(float),cudaMemcpyDeviceToHost); for(int i=0;i<n;i++) printf("%.2f ", temp[i]); */ lga_filtering_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, input.data<float>(), filters.data<float>(), height, width, channel, radius, output.data<float>()); // temp = new float[n]; } void lga_backward (at::Tensor input, at::Tensor filters, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor gradFilters, const int radius){ // int num=input->size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); // cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int n = filters.numel (); lga_filter_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, input.data<float>(), gradOutput.data<float>(), height, width, channel, radius, gradFilters.data<float>()); // printf("%d, %d, %d, %d\n", height, width, channel, n); n = input.numel (); float *grad = gradInput.data<float>(); cudaMemset (grad, 0, sizeof (float) * n); lga_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, filters.data<float>(), gradOutput.data<float>(), height, width, channel, radius, grad); } void lga3d_forward (at::Tensor input, at::Tensor filters, at::Tensor output, const int radius){ // int num=input->size(0); int channel = input.size(2); int height = input.size(3); int width = input.size(4); int n = input.numel (); // cudaStream_t stream = at::cuda::getCurrentCUDAStream(); lga_filtering_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, input.data<float>(), filters.data<float>(), height, width, channel, radius, output.data<float>()); } void lga3d_backward (at::Tensor input, at::Tensor filters, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor gradFilters, const int radius){ // int num=input->size(0); int channel = input.size(2); int height = input.size(3); int width = input.size(4); // cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int n = filters.numel (); lga_filter_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, input.data<float>(), gradOutput.data<float>(), height, width, channel, radius, gradFilters.data<float>()); n = input.numel (); float *grad = gradInput.data<float>(); cudaMemset (grad, 0, sizeof (float) * n); lga_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, filters.data<float>(), gradOutput.data<float>(), height, width, channel, radius, grad); } #ifdef __cplusplus } #endif
the_stack
#include "multi_bspline.h" #include "multi_bspline_create_cuda.h" //__constant__ float A[48]; // typedef struct // { // float *coefs_real, *coefs_imag; // uint3 stride; // float3 gridInv; // int num_splines; // } multi_UBspline_3d_c_cuda; #ifndef NO_CUDA_MAIN extern "C" multi_UBspline_3d_c_cuda* create_multi_UBspline_3d_c_cuda (multi_UBspline_3d_c* spline) { float A_h[48] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0, 3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0, -3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0, 1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0, 0.0, -0.5, 1.0, -0.5, 0.0, 1.5, -2.0, 0.0, 0.0, -1.5, 1.0, 0.5, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 3.0, -2.0, 0.0, 0.0, -3.0, 1.0, 0.0, 0.0, 1.0, 0.0 }; cudaMemcpyToSymbol(A, A_h, 48*sizeof(float), 0, cudaMemcpyHostToDevice); multi_UBspline_3d_c_cuda *cuda_spline = (multi_UBspline_3d_c_cuda*) malloc (sizeof (multi_UBspline_3d_c_cuda*)); cuda_spline->num_splines = spline->num_splines; int Nx = spline->x_grid.num+3; int Ny = spline->y_grid.num+3; int Nz = spline->z_grid.num+3; int N = spline->num_splines; if ((N%BLOCK_SIZE) != 0) N += 64 - (N%BLOCK_SIZE); cuda_spline->stride.x = Ny*Nz*N; cuda_spline->stride.y = Nz*N; cuda_spline->stride.z = N; size_t size = Nx*Ny*Nz+N*sizeof(float); cudaMalloc((void**)&(cuda_spline->coefs_real), size); cudaMalloc((void**)&(cuda_spline->coefs_imag), size); float *spline_buff = (float*)malloc(size); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int isp=0; isp<spline->num_splines; isp++) { spline_buff[ix*cuda_spline->stride.x + iy*cuda_spline->stride.y + iz*cuda_spline->stride.z + isp] = spline->coefs[ix*spline->x_stride + iy*spline->y_stride + iz*spline->z_stride + isp].real(); } cudaMemcpy(cuda_spline->coefs_real, spline_buff, size, cudaMemcpyHostToDevice); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int isp=0; isp<spline->num_splines; isp++) { spline_buff[ix*cuda_spline->stride.x + iy*cuda_spline->stride.y + iz*cuda_spline->stride.z + isp] = spline->coefs[ix*spline->x_stride + iy*spline->y_stride + iz*spline->z_stride + isp].imag(); } cudaMemcpy(cuda_spline->coefs_imag, spline_buff, size, cudaMemcpyHostToDevice); free(spline_buff); return cuda_spline; } #endif __global__ static void eval_multi_multi_UBspline_3d_c_cuda (float *pos, float3 drInv, const float *coefs_real, const float *coefs_imag, float *vals[], uint3 strides) { int block = blockIdx.x; int thr = threadIdx.x; int ir = blockIdx.y; int off = block*BLOCK_SIZE+thr; __shared__ float *myval; __shared__ float abc[64], coefs[2*BLOCK_SIZE]; // __shared__ float pos_s[BLOCK_SIZE]; // int ir1 = (ir >> 4)*64; // int ir2 = (ir & 15)*4; // pos_s[thr] = pos[ir1+thr]; // __syncthreads(); // float3 r; // r.x = pos_s[ir2+0]; // r.y = pos_s[ir2+1]; // r.z = pos_s[ir2+2]; __shared__ float3 r; if (thr == 0) { r.x = pos[4*ir+0]; r.y = pos[4*ir+1]; r.z = pos[4*ir+2]; myval = vals[ir]; } __syncthreads(); int3 index; float3 t; float s, sf; float4 tp[3]; s = r.x * drInv.x; sf = floor(s); index.x = (int)sf; t.x = s - sf; s = r.y * drInv.y; sf = floor(s); index.y = (int)sf; t.y = s - sf; s = r.z * drInv.z; sf = floor(s); index.z = (int)sf; t.z = s - sf; tp[0] = make_float4(t.x*t.x*t.x, t.x*t.x, t.x, 1.0); tp[1] = make_float4(t.y*t.y*t.y, t.y*t.y, t.y, 1.0); tp[2] = make_float4(t.z*t.z*t.z, t.z*t.z, t.z, 1.0); __shared__ float a[4], b[4], c[4]; if (thr < 4) { a[thr] = Acuda[4*thr+0]*tp[0].x + Acuda[4*thr+1]*tp[0].y + Acuda[4*thr+2]*tp[0].z + Acuda[4*thr+3]*tp[0].w; b[thr] = Acuda[4*thr+0]*tp[1].x + Acuda[4*thr+1]*tp[1].y + Acuda[4*thr+2]*tp[1].z + Acuda[4*thr+3]*tp[1].w; c[thr] = Acuda[4*thr+0]*tp[2].x + Acuda[4*thr+1]*tp[2].y + Acuda[4*thr+2]*tp[2].z + Acuda[4*thr+3]*tp[2].w; } __syncthreads(); int i = (thr>>4)&3; int j = (thr>>2)&3; int k = (thr & 3); abc[thr] = a[i]*b[j]*c[k]; __syncthreads(); float val_real = 0.0; float val_imag = 0.0; val_real = val_imag = 0.0; for (int i=0; i<4; i++) { for (int j=0; j<4; j++) { float *base_real = coefs_real + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; float *base_imag = coefs_imag + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; for (int k=0; k<4; k++) { val_real += abc[16*i+4*j+k] * base_real[off+k*strides.z]; val_imag += abc[16*i+4*j+k] * base_imag[off+k*strides.z]; } } } // for (int i=0; i<4; i++) { // for (int j=0; j<4; j++) { // float *base_real = coefs_real + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; // float *base_imag = coefs_imag + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; // for (int k=0; k<4; k++) { // coefs[thr] = base_real[(2*block+0)*BLOCK_SIZE+thr]; // coefs[thr+BLOCK_SIZE] = base_real[(2*block+1)*BLOCK_SIZE+thr]; // __syncthreads(); // val_real += abc[16*i+4*j+k] * coefs[2*thr+0]; // val_imag += abc[16*i+4*j+k] * coefs[2*thr+1]; // } // } // } __shared__ float buff[2*BLOCK_SIZE]; buff[2*thr+0] = val_real; buff[2*thr+1] = val_imag; __syncthreads(); myval[off] = buff[thr]; myval[off+BLOCK_SIZE] = buff[thr+BLOCK_SIZE]; // myval[2*off+0] = val_real; // myval[2*off+1] = val_imag; //myval[off+BLOCK_SIZE] = val_imag; //vals_real[ir][offset] = val_real; //vals_imag[ir][offset] = val_imag; } __global__ static void eval_multi_multi_UBspline_3d_c_vgh_cuda (float *pos, float3 drInv, const float *coefs_real, const float *coefs_imag, float *vals[], float *grads[], float *hess[], uint3 strides) { int block = blockIdx.x; int thr = threadIdx.x; int ir = blockIdx.y; int off = block*BLOCK_SIZE+thr; __shared__ float *myval, *mygrad, *myhess; __shared__ float3 r; if (thr == 0) { r.x = pos[4*ir+0]; r.y = pos[4*ir+1]; r.z = pos[4*ir+2]; myval = vals[ir]; mygrad = grads[ir]; myhess = hess[ir]; } __syncthreads(); int3 index; float3 t; float s, sf; float4 tp[3]; s = r.x * drInv.x; sf = floor(s); index.x = (int)sf; t.x = s - sf; s = r.y * drInv.y; sf = floor(s); index.y = (int)sf; t.y = s - sf; s = r.z * drInv.z; sf = floor(s); index.z = (int)sf; t.z = s - sf; tp[0] = make_float4(t.x*t.x*t.x, t.x*t.x, t.x, 1.0); tp[1] = make_float4(t.y*t.y*t.y, t.y*t.y, t.y, 1.0); tp[2] = make_float4(t.z*t.z*t.z, t.z*t.z, t.z, 1.0); // First 4 of a are value, second 4 are derivative, last four are // second derivative. __shared__ float a[12], b[12], c[12]; if (thr < 12) { a[thr] = Acuda[4*thr+0]*tp[0].x + Acuda[4*thr+1]*tp[0].y + Acuda[4*thr+2]*tp[0].z + Acuda[4*thr+3]*tp[0].w; b[thr] = Acuda[4*thr+0]*tp[1].x + Acuda[4*thr+1]*tp[1].y + Acuda[4*thr+2]*tp[1].z + Acuda[4*thr+3]*tp[1].w; c[thr] = Acuda[4*thr+0]*tp[2].x + Acuda[4*thr+1]*tp[2].y + Acuda[4*thr+2]*tp[2].z + Acuda[4*thr+3]*tp[2].w; } __syncthreads(); __shared__ float abc[640]; int i = (thr>>4)&3; int j = (thr>>2)&3; int k = (thr & 3); abc[10*(16*i+4*j+k)+0] = a[i+0]*b[j+0]*c[k+0]; // val abc[10*(16*i+4*j+k)+1] = a[i+4]*b[j+0]*c[k+0]; // d/dx abc[10*(16*i+4*j+k)+2] = a[i+0]*b[j+4]*c[k+0]; // d/dy abc[10*(16*i+4*j+k)+3] = a[i+0]*b[j+0]*c[k+4]; // d/dz abc[10*(16*i+4*j+k)+4] = a[i+8]*b[j+0]*c[k+0]; // d2/dx2 abc[10*(16*i+4*j+k)+5] = a[i+4]*b[j+4]*c[k+0]; // d2/dxdy abc[10*(16*i+4*j+k)+6] = a[i+4]*b[j+0]*c[k+4]; // d2/dxdz abc[10*(16*i+4*j+k)+7] = a[i+0]*b[j+8]*c[k+0]; // d2/dy2 abc[10*(16*i+4*j+k)+8] = a[i+0]*b[j+4]*c[k+4]; // d2/dydz abc[10*(16*i+4*j+k)+9] = a[i+0]*b[j+0]*c[k+8]; // d2/dz2 __syncthreads(); float v_r = 0.0; float v_i = 0.0; float g0_r=0.0, g0_i=0.0, g1_r=0.0, g1_i=0.0, g2_r=0.0, g2_i=0.0, h00_r=0.0, h00_i=0.0, h01_r=0.0, h01_i=0.0, h02_r=0.0, h02_i=0.0, h11_r=0.0, h11_i=0.0, h12_r=0.0, h12_i=0.0, h22_r=0.0, h22_i=0.0; int n = 0; for (int i=0; i<4; i++) { for (int j=0; j<4; j++) { float *base_real = coefs_real + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; float *base_imag = coefs_imag + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; // float c0_r, c0_i, c1_r, c1_i, c2_r, c2_i, c3_r, c3_i; // c0_r = base_real[off+0*strides.z]; c0_i = base_imag[off+0*strides.z]; // c1_r = base_real[off+1*strides.z]; c1_i = base_imag[off+1*strides.z]; // c2_r = base_real[off+2*strides.z]; c2_i = base_imag[off+2*strides.z]; // c3_r = base_real[off+3*strides.z]; c3_i = base_imag[off+3*strides.z]; // v_r += abc[n+0] * c0_r; v_i += abc[n+0] * c0_i; // g0_r += abc[n+1] * c0_r; g0_i += abc[n+1] * c0_i; // g1_r += abc[n+2] * c0_r; g1_i += abc[n+2] * c0_i; // g2_r += abc[n+3] * c0_r; g2_i += abc[n+3] * c0_i; // h00_r += abc[n+4] * c0_r; h00_i += abc[n+4] * c0_i; // h01_r += abc[n+5] * c0_r; h01_i += abc[n+5] * c0_i; // h02_r += abc[n+6] * c0_r; h02_i += abc[n+6] * c0_i; // h11_r += abc[n+7] * c0_r; h11_i += abc[n+7] * c0_i; // h12_r += abc[n+8] * c0_r; h12_i += abc[n+8] * c0_i; // h22_r += abc[n+9] * c0_r; h22_i += abc[n+9] * c0_i; // v_r += abc[n+10] * c1_r; v_i += abc[n+10] * c1_i; // g0_r += abc[n+11] * c1_r; g0_i += abc[n+11] * c1_i; // g1_r += abc[n+12] * c1_r; g1_i += abc[n+12] * c1_i; // g2_r += abc[n+13] * c1_r; g2_i += abc[n+13] * c1_i; // h00_r += abc[n+14] * c1_r; h00_i += abc[n+14] * c1_i; // h01_r += abc[n+15] * c1_r; h01_i += abc[n+15] * c1_i; // h02_r += abc[n+16] * c1_r; h02_i += abc[n+16] * c1_i; // h11_r += abc[n+17] * c1_r; h11_i += abc[n+17] * c1_i; // h12_r += abc[n+18] * c1_r; h12_i += abc[n+18] * c1_i; // h22_r += abc[n+19] * c1_r; h22_i += abc[n+19] * c1_i; // v_r += abc[n+20] * c2_r; v_i += abc[n+20] * c2_i; // g0_r += abc[n+21] * c2_r; g0_i += abc[n+21] * c2_i; // g1_r += abc[n+22] * c2_r; g1_i += abc[n+22] * c2_i; // g2_r += abc[n+23] * c2_r; g2_i += abc[n+23] * c2_i; // h00_r += abc[n+24] * c2_r; h00_i += abc[n+24] * c2_i; // h01_r += abc[n+25] * c2_r; h01_i += abc[n+25] * c2_i; // h02_r += abc[n+26] * c2_r; h02_i += abc[n+26] * c2_i; // h11_r += abc[n+27] * c2_r; h11_i += abc[n+27] * c2_i; // h12_r += abc[n+28] * c2_r; h12_i += abc[n+28] * c2_i; // h22_r += abc[n+29] * c2_r; h22_i += abc[n+29] * c2_i; // v_r += abc[n+30] * c3_r; v_i += abc[n+30] * c3_i; // g0_r += abc[n+31] * c3_r; g0_i += abc[n+31] * c3_i; // g1_r += abc[n+32] * c3_r; g1_i += abc[n+32] * c3_i; // g2_r += abc[n+33] * c3_r; g2_i += abc[n+33] * c3_i; // h00_r += abc[n+34] * c3_r; h00_i += abc[n+34] * c3_i; // h01_r += abc[n+35] * c3_r; h01_i += abc[n+35] * c3_i; // h02_r += abc[n+36] * c3_r; h02_i += abc[n+36] * c3_i; // h11_r += abc[n+37] * c3_r; h11_i += abc[n+37] * c3_i; // h12_r += abc[n+38] * c3_r; h12_i += abc[n+38] * c3_i; // h22_r += abc[n+39] * c3_r; h22_i += abc[n+39] * c3_i; // n += 40; for (int k=0; k<4; k++) { float cr = base_real[off+k*strides.z]; float ci = base_imag[off+k*strides.z]; v_r += abc[n+0] * cr; v_i += abc[n+0] * ci; g0_r += abc[n+1] * cr; g0_i += abc[n+1] * ci; g1_r += abc[n+2] * cr; g1_i += abc[n+2] * ci; g2_r += abc[n+3] * cr; g2_i += abc[n+3] * ci; h00_r += abc[n+4] * cr; h00_i += abc[n+4] * ci; h01_r += abc[n+5] * cr; h01_i += abc[n+5] * ci; h02_r += abc[n+6] * cr; h02_i += abc[n+6] * ci; h11_r += abc[n+7] * cr; h11_i += abc[n+7] * ci; h12_r += abc[n+8] * cr; h12_i += abc[n+8] * ci; h22_r += abc[n+9] * cr; h22_i += abc[n+9] * ci; n += 10; } } } g0_r *= drInv.x; g0_i *= drInv.x; g1_r *= drInv.y; g1_i *= drInv.y; g2_r *= drInv.z; g2_i *= drInv.z; h00_r *= drInv.x * drInv.x; h00_i *= drInv.x * drInv.x; h01_r *= drInv.x * drInv.y; h01_i *= drInv.x * drInv.y; h02_r *= drInv.x * drInv.z; h02_i *= drInv.x * drInv.z; h11_r *= drInv.y * drInv.y; h11_i *= drInv.y * drInv.y; h12_r *= drInv.y * drInv.z; h12_i *= drInv.y * drInv.z; h22_r *= drInv.z * drInv.z; h22_i *= drInv.z * drInv.z; __shared__ float buff[6*BLOCK_SIZE]; // Note, we can reuse abc, by replacing buff with abc. buff[2*thr+0] = v_r; buff[2*thr+1] = v_i; __syncthreads(); myval[off] = buff[thr]; myval[off+BLOCK_SIZE] = buff[thr+BLOCK_SIZE]; buff[6*thr+0] = g0_r; buff[6*thr+1] = g0_i; buff[6*thr+2] = g1_r; buff[6*thr+3] = g1_i; buff[6*thr+4] = g2_r; buff[6*thr+5] = g2_i; __syncthreads(); for (int i=0; i<6; i++) mygrad[(6*block+i)*BLOCK_SIZE+thr] = buff[i*BLOCK_SIZE+thr]; __syncthreads(); // Write first half of Hessians if (thr < 32) { buff[12*thr+0] = h00_r; buff[12*thr+1] = h00_i; buff[12*thr+2] = h01_r; buff[12*thr+3] = h01_i; buff[12*thr+4] = h02_r; buff[12*thr+5] = h02_i; buff[12*thr+6] = h11_r; buff[12*thr+7] = h11_i; buff[12*thr+8] = h12_r; buff[12*thr+9] = h12_i; buff[12*thr+10] = h22_r; buff[12*thr+11] = h22_i; } __syncthreads(); if (thr < 32) for (int i=0; i<6; i++) myhess[(12*block+i)*BLOCK_SIZE+thr] = buff[i*BLOCK_SIZE+thr]; __syncthreads(); int th2 = thr-32; if (thr >= 32) { buff[12*th2+0] = h00_r; buff[12*th2+1] = h00_i; buff[12*th2+2] = h01_r; buff[12*th2+3] = h01_i; buff[12*th2+4] = h02_r; buff[12*th2+5] = h02_i; buff[12*th2+6] = h11_r; buff[12*th2+7] = h11_i; buff[12*th2+8] = h12_r; buff[12*th2+9] = h12_i; buff[12*th2+10] = h22_r; buff[12*th2+11] = h22_i; } __syncthreads(); if (thr >= 32) { for (int i=0; i<6; i++) myhess[(12*block+i+6)*BLOCK_SIZE+th2] = buff[i*BLOCK_SIZE+th2]; } } #ifndef NO_CUDA_MAIN static void * test_multi_cuda(void *thread) { // CUcontext ctx; // CUdevice dev; // cuDeviceGet (&dev, (int)(size_t)thread); // cuCtxCreate(&ctx, CU_CTX_SCHED_YIELD, dev); // int deviceCount; // cudaGetDeviceCount(&deviceCount); cudaSetDevice((int)(size_t)thread); fprintf (stderr, "In thread %p\n", thread); int numWalkers = 200; float *coefs , __device__ *vals[numWalkers], *grads[numWalkers], *hess[numWalkers]; float *coefs_real_d, *coefs_imag_d, __device__ **vals_d, **grads_d, **hess_d; float A_h[48] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0, 3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0, -3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0, 1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0, 0.0, -0.5, 1.0, -0.5, 0.0, 1.5, -2.0, 0.0, 0.0, -1.5, 1.0, 0.5, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 3.0, -2.0, 0.0, 0.0, -3.0, 1.0, 0.0, 0.0, 1.0, 0.0 }; // Copy A to host cudaMemcpy(Acuda, A_h, 48*sizeof(float), cudaMemcpyHostToDevice); float *r_d, *r_h; int xs, ys, zs, N; int Nx, Ny, Nz; N = 128; Nx = Ny = Nz = 16; xs = Ny*Nz*N; ys = Nz*N; zs = N; float3 drInv; drInv.x = 1.0/float(Nx); drInv.y = 1.0/float(Ny); drInv.z = 1.0/float(Nz); // Setup Bspline coefficients int size = Nx*Ny*Nz*N*sizeof(float); posix_memalign((void**)&coefs, 16, size); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int n=0; n<N; n++) coefs[ix*xs + iy*ys + iz*zs + n] = drand48(); fprintf (stderr, "Filled in coefs.\n"); // Setup values //posix_memalign((void**)&vals, 16, N*sizeof(float)); // cudaMemcpy(r_d, r, numWalkers*sizeof(float3), cudaMemcpyHostToDevice); fprintf (stderr, "size = %d\n", size); // Setup CUDA coefficients fprintf (stderr, "Before first CUDA mallocs.\n"); cudaMalloc((void**)&coefs_real_d, 2*size); cudaMalloc((void**)&coefs_imag_d, 2*size); fprintf (stderr, "Before Memcpy.\n"); cudaMemcpy(coefs_real_d, coefs, size, cudaMemcpyHostToDevice); cudaMemcpy(coefs_imag_d, coefs, size, cudaMemcpyHostToDevice); fprintf (stderr, "After Memcpy.\n"); // Setup device value storage int numVals = 2*N*numWalkers*10; float *valBlock_d, *valBlock_h; cudaMalloc((void**)&(valBlock_d), numVals*sizeof(float)); cudaMallocHost((void**)&(valBlock_h), numVals*sizeof(float)); cudaMalloc((void**)&(vals_d), 2*numWalkers*sizeof(float*)); cudaMalloc((void**)&(grads_d), 2*numWalkers*sizeof(float*)); cudaMalloc((void**)&(hess_d), 2*numWalkers*sizeof(float*)); fprintf (stderr, "valBlock_d = %p\n", valBlock_d); for (int i=0; i<numWalkers; i++) { vals[i] = valBlock_d + 2*i*N; grads[i] = valBlock_d + 2*N*numWalkers + 6*i*N; hess[i] = valBlock_d + 8*N*numWalkers + 12*i*N; } cudaMemcpy(vals_d, vals, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(grads_d, grads, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(hess_d, hess, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); fprintf (stderr, "Finished cuda allocations.\n"); // Setup walker positions cudaMalloc((void**)&(r_d), 4*numWalkers*sizeof(float)); cudaMallocHost((void**)&(r_h), 4*numWalkers*sizeof(float)); for (int ir=0; ir<numWalkers; ir++) { r_h[4*ir+0] = 0.5*drand48(); r_h[4*ir+1] = 0.5*drand48(); r_h[4*ir+2] = 0.5*drand48(); } uint3 strides; strides.x = xs; strides.y = ys; strides.z = zs; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE,numWalkers); clock_t start, end; start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); cudaMemcpy(r_d, r_h, 4*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_c_cuda<<<dimGrid,dimBlock>>> (r_d, drInv, coefs_real_d, coefs_imag_d, vals_d, strides); // eval_multi_multi_UBspline_3d_cuda_c<<<dimGrid,dimBlock>>> // (r_d, drInv, coefs_real_d, coefs_imag_d, // valBlock_d, valBlock_d+numVals/2, strides); //cudaMemcpy(valBlock_h, valBlock_d, numVals*sizeof(float), cudaMemcpyDeviceToHost); } end = clock(); double time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "VGH evals per second = %1.8e\n", 1.0/time); start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); cudaMemcpy(r_d, r_h, 4*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_c_vgh_cuda<<<dimGrid,dimBlock>>> (r_d, drInv, coefs_real_d, coefs_imag_d, vals_d, grads_d, hess_d, strides); } end = clock(); time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "Evals per second = %1.8e\n", 1.0/time); cudaFree (valBlock_d); cudaFree (vals_d); cudaFree (coefs_real_d); cudaFree (coefs_imag_d); cudaFree (r_d); return NULL; // cudaMemcpy (vals, vals_d, N*sizeof(float), cudaMemcpyDeviceToHost); // float vals2[N]; // for (int n=0; n<N; n++) { // vals2[n] = 0.0; // int index=0; // for(int i=0; i<4; i++) // for (int j=0; j<4; j++) // for (int k=0; k<4; k++) { // vals2[n] += abc[index] * coefs[(ix+i)*xs+(iy+j)*ys+(iz+k)*zs+n]; // index++; // } // } // for (int i=0; i<N/256; i++) // fprintf (stderr, "%1.9f %1.9f\n", vals[i], vals2[i]); // cudaFree(abc_d); // cudaFree(coefs_d); // cudaFree(vals_d); } #endif #ifndef NO_CUDA_MAIN main() { int deviceCount; cudaGetDeviceCount(&deviceCount); fprintf (stderr, "Detected %d CUDA devices.\n", deviceCount); // test_cuda(); for (int device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); fprintf (stderr, "Device %d:\n", device); fprintf (stderr, " Global memory: %10d\n", deviceProp.totalGlobalMem); fprintf (stderr, " MultiProcessors: %10d\n", deviceProp.multiProcessorCount); fprintf (stderr, " Registers: %10d\n", deviceProp.regsPerBlock); fprintf (stderr, " Constant memory: %10d\n", deviceProp.totalConstMem); fprintf (stderr, " Shared memory: %10d\n", deviceProp.sharedMemPerBlock); } // pthread_t threads[deviceCount]; // for (int device = 0; device < deviceCount; device++) // pthread_create (&(threads[device]), NULL, test_multi_cuda, (void*)device); // cutStartThread((CUT_THREADROUTINE)test_multi_cuda,(void*)device); test_multi_cuda((void*)0); // pthread_exit(NULL); //test_multi_cuda(); } #endif
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include <ATen/core/Tensor.h> #include <THC/THCAtomics.cuh> namespace rubiks { template <typename T> __global__ void rubiks_shift_3d_forward_cuda(const int total_num_elements, \ const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const int pad_T, const int pad_H, \ const int pad_W, const int stride_T, \ const int stride_H, const int stride_W, \ const T* input_tensor_data_ptr, T* output_tensor_data_ptr, bool quantize) { const int output_HW_dim = output_H_dim * output_W_dim; const int input_HW_dim = input_H_dim * input_W_dim; // Organization: grid -> block -> threads. for (int index = blockIdx.x * blockDim.x + threadIdx.x; \ index < total_num_elements; \ index += blockDim.x * gridDim.x) { // Batch number of output const int N_idx = index / (output_T_dim * C_dim * output_HW_dim); const int within_N_idx = index % (output_T_dim * C_dim * output_HW_dim); // Timestep number const int output_T_idx = within_N_idx / (C_dim * output_HW_dim); const int within_T_idx = within_N_idx % (C_dim * output_HW_dim); // Channel number const int C_idx = within_T_idx / output_HW_dim; const int within_C_idx = within_T_idx % output_HW_dim; // Spatial index (where in the H, W grid you are) const int output_H_idx = within_C_idx / output_W_dim; const int output_W_idx = within_C_idx % output_W_dim; const int strided_output_T_idx = output_T_idx * stride_T - pad_T; const int strided_output_H_idx = output_H_idx * stride_H - pad_H; const int strided_output_W_idx = output_W_idx * stride_W - pad_W; // Takes the offsets from the corresponding channel const T shift_T = shift_tensor_data_T_ptr[C_idx]; const T shift_H = shift_tensor_data_H_ptr[C_idx]; const T shift_W = shift_tensor_data_W_ptr[C_idx]; int input_T_idx, input_H_idx, input_W_idx; const int small_shift_T = floorf(shift_T); const int large_shift_T = small_shift_T + 1; const int small_shift_H = floorf(shift_H); const int large_shift_H = small_shift_H + 1; const int small_shift_W = floorf(shift_W); const int large_shift_W = small_shift_W + 1; T remainder_T = shift_T - small_shift_T; T remainder_W = shift_W - small_shift_W; T remainder_H = shift_H - small_shift_H; if (quantize) { int quantize_T = (remainder_T < 0.5f) ? small_shift_T : large_shift_T; int quantize_H = (remainder_H < 0.5f) ? small_shift_H : large_shift_H; int quantize_W = (remainder_W < 0.5f) ? small_shift_W : large_shift_W; input_T_idx = strided_output_T_idx + quantize_T; input_H_idx = strided_output_H_idx + quantize_H; input_W_idx = strided_output_W_idx + quantize_W; T q_quantize = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; output_tensor_data_ptr[index] = q_quantize; continue; // skip the rest to do the next grid-stride loop } // ------------------- SMALL T, SMALL H, SMALL W ------------------- input_T_idx = strided_output_T_idx + small_shift_T; input_H_idx = strided_output_H_idx + small_shift_H; input_W_idx = strided_output_W_idx + small_shift_W; // If we're in bounds, grab the data at that location. Otherwise, simply give zero. T q111 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- SMALL T, SMALL H, LARGE W ------------------- input_T_idx = strided_output_T_idx + small_shift_T; input_H_idx = strided_output_H_idx + small_shift_H; input_W_idx = strided_output_W_idx + large_shift_W; T q112 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- SMALL T, LARGE H, SMALL W ------------------- input_T_idx = strided_output_T_idx + small_shift_T; input_H_idx = strided_output_H_idx + large_shift_H; input_W_idx = strided_output_W_idx + small_shift_W; T q121 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- SMALL T, LARGE H, LARGE W ------------------- input_T_idx = strided_output_T_idx + small_shift_T; input_H_idx = strided_output_H_idx + large_shift_H; input_W_idx = strided_output_W_idx + large_shift_W; T q122 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- LARGE T, SMALL H, SMALL W ------------------- input_T_idx = strided_output_T_idx + large_shift_T; input_H_idx = strided_output_H_idx + small_shift_H; input_W_idx = strided_output_W_idx + small_shift_W; // If we're in bounds, grab the data at that location. Otherwise, simply give zero. T q211 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- LARGE T, SMALL H, LARGE W ------------------- input_T_idx = strided_output_T_idx + large_shift_T; input_H_idx = strided_output_H_idx + small_shift_H; input_W_idx = strided_output_W_idx + large_shift_W; T q212 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- LARGE T, LARGE H, SMALL W ------------------- input_T_idx = strided_output_T_idx + large_shift_T; input_H_idx = strided_output_H_idx + large_shift_H; input_W_idx = strided_output_W_idx + small_shift_W; T q221 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; // ------------------- LARGE T, LARGE H, LARGE W ------------------- input_T_idx = strided_output_T_idx + large_shift_T; input_H_idx = strided_output_H_idx + large_shift_H; input_W_idx = strided_output_W_idx + large_shift_W; T q222 = (input_H_idx >= 0 && input_W_idx >= 0 && input_T_idx >= 0 && \ input_H_idx < input_H_dim && input_W_idx < input_W_dim && input_T_idx < input_T_dim) ? \ input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + \ input_T_idx * C_dim * input_HW_dim + C_idx * input_HW_dim + \ input_H_idx * input_W_dim + input_W_idx] : 0; output_tensor_data_ptr[index] = \ (1 - remainder_T) * \ ((1 - remainder_H) * \ (q111 * (1 - remainder_W) + q112 * remainder_W) + \ remainder_H * \ (q121 * (1 - remainder_W) + q122 * remainder_W)) + \ remainder_T * \ ((1 - remainder_H) * \ (q211 * (1 - remainder_W) + q212 * remainder_W) + \ remainder_H * \ (q221 * (1 - remainder_W) + q222 * remainder_W)); } } template <typename T> __device__ T interpolate_2D(T p11, T p12, T p21, T p22, T delta_1, T delta_2) { return p11 * (1 - delta_1) * (1 - delta_2) + \ p12 * (1 - delta_1) * delta_2 + \ p21 * delta_1 * (1 - delta_2) + \ p22 * delta_1 * delta_2; } template <typename T> __global__ void rubiks_shift_3d_backward_cuda( const int total_num_elements, \ const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const int pad_T, const int pad_H, \ const int pad_W, const int stride_T, \ const int stride_H, const int stride_W, \ const T* input_tensor_data_ptr, \ const T* output_grad_data_ptr, \ T* shift_grad_buffer_T_start_ptr, \ T* shift_grad_buffer_H_start_ptr, \ T* shift_grad_buffer_W_start_ptr) { const int output_HW_dim = output_H_dim * output_W_dim; const int input_HW_dim = input_H_dim * input_W_dim; for (int index = blockIdx.x * blockDim.x + threadIdx.x; \ index < total_num_elements; \ index += blockDim.x * gridDim.x) { // Again, batch index for output (NOT total N_number of batches) const int N_idx = index / (output_T_dim * C_dim * output_HW_dim); const int within_N_idx = index % (output_T_dim * C_dim * output_HW_dim); // Timestep index (NOT total number of T_dim) const int T_idx = within_N_idx / (C_dim * output_HW_dim); const int within_T_idx = within_N_idx % (C_dim * output_HW_dim); // Channel index (NOT total number of C_dim) const int C_idx = within_T_idx / output_HW_dim; const int within_C_idx = within_T_idx % output_HW_dim; // Height; width indices const int H_idx = within_C_idx / output_W_dim; const int W_idx = within_C_idx % output_W_dim; // Same thing as forward -- accounting for stride and padding const int T_offset = T_idx * stride_T - pad_T; const int H_offset = H_idx * stride_H - pad_H; const int W_offset = W_idx * stride_W - pad_W; // output: {3 x C, H x W} // Output into shift temp buffers so we can aggregate later. // This calculates the correct index within the temp buffer T* shift_grad_buffer_T_ptr = shift_grad_buffer_T_start_ptr + C_idx * output_HW_dim + within_C_idx; T* shift_grad_buffer_H_ptr = shift_grad_buffer_H_start_ptr + C_idx * output_HW_dim + within_C_idx; T* shift_grad_buffer_W_ptr = shift_grad_buffer_W_start_ptr + C_idx * output_HW_dim + within_C_idx; const T shift_T = shift_tensor_data_T_ptr[C_idx]; const T shift_H = shift_tensor_data_H_ptr[C_idx]; const T shift_W = shift_tensor_data_W_ptr[C_idx]; // Computes four offsets and integer truncation differences const int small_shift_T = floorf(shift_T); const int large_shift_T = small_shift_T + 1; const int small_shift_H = floorf(shift_H); const int large_shift_H = small_shift_H + 1; const int small_shift_W = floorf(shift_W); const int large_shift_W = small_shift_W + 1; const T remainder_T = shift_T - small_shift_T; const T remainder_H = shift_H - small_shift_H; const T remainder_W = shift_W - small_shift_W; // Calculates actual indices which we grabbed from in the forward pass const int input_small_T = T_offset + small_shift_T; const int input_large_T = T_offset + large_shift_T; const int input_small_Ta = input_small_T + ((remainder_T == 0) ? -1 : 0); const int input_small_H = H_offset + small_shift_H; const int input_large_H = H_offset + large_shift_H; const int input_small_Ha = input_small_H + ((remainder_H == 0) ? -1 : 0); const int input_small_W = W_offset + small_shift_W; const int input_large_W = W_offset + large_shift_W; const int input_small_Wa = input_small_W + ((remainder_W == 0) ? -1 : 0); // -------------------------- SMALL T, SMALL H, SMALL W -------------------------- const T q111 = (input_small_T >= 0 && input_small_H >= 0 && input_small_W >= 0 && \ input_small_T < input_T_dim && input_small_H < input_H_dim && input_small_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_H * input_W_dim + input_small_W] : 0; // -------------------------- SMALL T, SMALL H, LARGE W -------------------------- const T q112 = (input_small_T >= 0 && input_small_H >= 0 && input_large_W >= 0 && \ input_small_T < input_T_dim && input_small_H < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_H * input_W_dim + input_large_W] : 0; // -------------------------- SMALL T, LARGE H, SMALL W -------------------------- const T q121 = (input_small_T >= 0 && input_large_H >= 0 && input_small_W >= 0 && \ input_small_T < input_T_dim && input_large_H < input_H_dim && input_small_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_small_W] : 0; // -------------------------- SMALL T, LARGE H, LARGE W -------------------------- const T q122 = (input_small_T >= 0 && input_large_H >= 0 && input_large_W >= 0 && \ input_small_T < input_T_dim && input_large_H < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_large_W] : 0; // -------------------------- LARGE T, SMALL H, SMALL W -------------------------- const T q211 = (input_large_T >= 0 && input_small_H >= 0 && input_small_W >= 0 && \ input_large_T < input_T_dim && input_small_H < input_H_dim && input_small_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_H * input_W_dim + input_small_W] : 0; // -------------------------- LARGE T, SMALL H, LARGE W -------------------------- const T q212 = (input_large_T >= 0 && input_small_H >= 0 && input_large_W >= 0 && \ input_large_T < input_T_dim && input_small_H < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_H * input_W_dim + input_large_W] : 0; // -------------------------- LARGE T, LARGE H, SMALL W -------------------------- const T q221 = (input_large_T >= 0 && input_large_H >= 0 && input_small_W >= 0 && \ input_large_T < input_T_dim && input_large_H < input_H_dim && input_small_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_small_W] : 0; // -------------------------- LARGE T, LARGE H, LARGE W -------------------------- const T q222 = (input_large_T >= 0 && input_large_H >= 0 && input_large_W >= 0 && \ input_large_T < input_T_dim && input_large_H < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_large_W] : 0; // -------------------------- SMALL T, SMALL H, SMALL W -------------------------- // One non-ternary statement for clarity. The rest follow, but in ternary. T q111a = 0; // If it turns out our shifts are exact integers, then we need the smaller versions // of the shifted pixels. if (remainder_T == 0 || remainder_H == 0 || remainder_W == 0) { // Check if the modified starts are in bounds. if (input_small_Ta >= 0 && input_small_Ha >= 0 && input_small_Wa >= 0 && \ input_small_Ta < input_T_dim && input_small_Ha < input_H_dim && input_small_Wa < input_W_dim) { q111a = input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_Ta * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_Ha * input_W_dim + input_small_Wa]; } else { q111a = 0; } // Otherwise, we take what we've got from the regular interpolation formula. } else { q111a = q111; } // -------------------------- SMALL T, SMALL H, LARGE W -------------------------- const T q112a = (remainder_T == 0 || remainder_H == 0) ? ((input_small_Ta >= 0 && input_small_Ha >= 0 && input_large_W >= 0 && \ input_small_Ta < input_T_dim && input_small_Ha < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_Ta * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_Ha * input_W_dim + input_large_W] : 0) :q112; // -------------------------- SMALL T, LARGE H, SMALL W -------------------------- const T q121a = (remainder_T == 0 || remainder_W == 0) ? ((input_small_Ta >= 0 && input_large_H >= 0 && input_small_Wa >= 0 && \ input_small_Ta < input_T_dim && input_large_H < input_H_dim && input_small_Wa < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_Ta * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_small_Wa] : 0) :q121; // -------------------------- SMALL T, LARGE H, LARGE W -------------------------- const T q122a = (remainder_T == 0) ? ((input_small_Ta >= 0 && input_large_H >= 0 && input_large_W >= 0 && \ input_small_Ta < input_T_dim && input_large_H < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_small_Ta * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_large_W] : 0) :q122; // -------------------------- LARGE T, SMALL H, SMALL W -------------------------- const T q211a = (remainder_H == 0 || remainder_W == 0) ? ((input_large_T >= 0 && input_small_Ha >= 0 && input_small_Wa >= 0 && \ input_large_T < input_T_dim && input_small_Ha < input_H_dim && input_small_Wa < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_Ha * input_W_dim + input_small_Wa] : 0) :q211; // -------------------------- LARGE T, SMALL H, LARGE W -------------------------- const T q212a = (remainder_H == 0) ? ((input_large_T >= 0 && input_small_Ha >= 0 && input_large_W >= 0 && \ input_large_T < input_T_dim && input_small_Ha < input_H_dim && input_large_W < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_small_Ha * input_W_dim + input_large_W] : 0) :q212; // -------------------------- LARGE T, LARGE H, SMALL W -------------------------- const T q221a = (remainder_W == 0) ? ((input_large_T >= 0 && input_large_H >= 0 && input_small_Wa >= 0 && \ input_large_T < input_T_dim && input_large_H < input_H_dim && input_small_Wa < input_W_dim) ? input_tensor_data_ptr[N_idx * input_T_dim * C_dim * input_HW_dim + input_large_T * C_dim * input_HW_dim + \ C_idx * input_HW_dim + input_large_H * input_W_dim + input_small_Wa] : 0) :q221; // -------------------------- LARGE T, LARGE H, LARGE W -------------------------- const T q222a = q222; const T local_T_grad_small = interpolate_2D<T>(q111a, q112a, q121a, q122a, remainder_H, remainder_W); const T local_T_grad_large = interpolate_2D<T>(q211a, q212a, q221a, q222a, remainder_H, remainder_W); const T local_H_grad_small = interpolate_2D<T>(q111a, q112a, q211a, q212a, remainder_T, remainder_W); const T local_H_grad_large = interpolate_2D<T>(q121a, q122a, q221a, q222a, remainder_T, remainder_W); const T local_W_grad_small = interpolate_2D<T>(q111a, q121a, q211a, q221a, remainder_T, remainder_H); const T local_W_grad_large = interpolate_2D<T>(q112a, q122a, q212a, q222a, remainder_T, remainder_H); const T local_pixel_T_grad = -local_T_grad_small + local_T_grad_large; const T local_pixel_H_grad = -local_H_grad_small + local_H_grad_large; const T local_pixel_W_grad = -local_W_grad_small + local_W_grad_large; const T upstream_grad = output_grad_data_ptr[index]; const T pixel_T_grad = local_pixel_T_grad * upstream_grad; const T pixel_H_grad = local_pixel_H_grad * upstream_grad; const T pixel_W_grad = local_pixel_W_grad * upstream_grad; atomicAdd(shift_grad_buffer_T_ptr, pixel_T_grad); atomicAdd(shift_grad_buffer_H_ptr, pixel_H_grad); atomicAdd(shift_grad_buffer_W_ptr, pixel_W_grad); } } template <typename T> __global__ void rubiks_shift_3d_backward_input_cuda(\ const int total_num_elements, const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const int pad_T, const int pad_H, \ const int pad_W, const int stride_T, \ const int stride_H, const int stride_W, \ const T* input_tensor_data_ptr, \ const T* output_grad_data_ptr, \ T* input_grad_data_ptr, bool quantize) { const int output_HW_dim = output_H_dim * output_W_dim; const int input_HW_dim = input_H_dim * input_W_dim; for (int index = blockIdx.x * blockDim.x + threadIdx.x; \ index < total_num_elements; \ index += blockDim.x * gridDim.x) { // Batch index (NOT total number of batches) const int N_idx = index / (input_T_dim * C_dim * input_HW_dim); const int within_N_idx = index % (input_T_dim * C_dim * input_HW_dim); // Timestep index (NOT total number of timesteps) const int T_idx = within_N_idx / (C_dim * input_HW_dim); const int within_T_idx = within_N_idx % (C_dim * input_HW_dim); // Channel index (NOT total number of channels) const int C_idx = within_T_idx / input_HW_dim; const int within_C_idx = within_T_idx % input_HW_dim; // Height and width spatial location which this particular thread handles const int H_idx = within_C_idx / input_W_dim; const int W_idx = within_C_idx % input_W_dim; // Offsets within the (H, W) feature map in the output gradient tensor to pull from // (note that backward input gradient is just output gradient, reverse shifted) const int T_offset = T_idx + pad_T; const int H_offset = H_idx + pad_H; const int W_offset = W_idx + pad_W; // Final value to be stuck into the input gradient T val = 0; const T shift_T = -shift_tensor_data_T_ptr[C_idx]; const T shift_H = -shift_tensor_data_H_ptr[C_idx]; const T shift_W = -shift_tensor_data_W_ptr[C_idx]; // Where in the actual (H, W) feature map we pull from. int output_T_idx, output_H_idx, output_W_idx; T q111 = 0; T q112 = 0; T q121 = 0; T q122 = 0; T q211 = 0; T q212 = 0; T q221 = 0; T q222 = 0; int small_shift_T = floorf(shift_T); int large_shift_T = small_shift_T + 1; int small_shift_H = floorf(shift_H); int large_shift_H = small_shift_H + 1; int small_shift_W = floorf(shift_W); int large_shift_W = small_shift_W + 1; // Compute interpolation remainders (e.g. 1.4 - 1 = 0.4) T remainder_T = shift_T - small_shift_T; T remainder_H = shift_H - small_shift_H; T remainder_W = shift_W - small_shift_W; if (quantize) { int quantize_T = (remainder_T < 0.5f) ? small_shift_T : large_shift_T; int quantize_H = (remainder_H < 0.5f) ? small_shift_H : large_shift_H; int quantize_W = (remainder_W < 0.5f) ? small_shift_W : large_shift_W; output_T_idx = (T_offset + quantize_T); output_H_idx = (H_offset + quantize_H); output_W_idx = (W_offset + quantize_W); T q_quantize = 0.f; if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q_quantize = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } input_grad_data_ptr[index] = q_quantize; continue; // skip the rest to do the next grid-stride loop } // Special case -- all shifts are zero; only care about strides and padding with NO interpolation. if (shift_T == 0 && shift_H == 0 && shift_W == 0) { output_T_idx = T_offset; output_H_idx = H_offset; output_W_idx = W_offset; if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; val = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } } else { // -------------------------- SMALL T, SMALL H, SMALL W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + small_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q111 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- SMALL T, SMALL H, LARGE W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + large_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q112 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- SMALL T, LARGE H, SMALL W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + small_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q121 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- SMALL T, LARGE H, LARGE W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + large_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q122 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- LARGE T, SMALL H, SMALL W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + small_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q211 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- LARGE T, SMALL H, LARGE W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + large_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q212 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- LARGE T, LARGE H, SMALL W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + small_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q221 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } // -------------------------- LARGE T, LARGE H, LARGE W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + large_shift_W); if (output_T_idx % stride_T == 0 && output_H_idx % stride_H == 0 && output_W_idx % stride_W == 0) { output_T_idx = output_T_idx / stride_T; output_H_idx = output_H_idx / stride_H; output_W_idx = output_W_idx / stride_W; q222 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } val = \ (1 - remainder_T) * \ ((1 - remainder_H) * \ (q111 * (1 - remainder_W) + q112 * remainder_W) + \ remainder_H * \ (q121 * (1 - remainder_W) + q122 * remainder_W)) + \ remainder_T * \ ((1 - remainder_H) * \ (q211 * (1 - remainder_W) + q212 * remainder_W) + \ remainder_H * \ (q221 * (1 - remainder_W) + q222 * remainder_W)); } input_grad_data_ptr[index] = val; } } template <typename T> __global__ void rubiks_shift_3d_backward_input_s1p0_cuda(\ const int total_num_elements, const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const T* input_tensor_data_ptr, \ const T* output_grad_data_ptr, \ T* input_grad_data_ptr, bool quantize) { const int output_HW_dim = output_H_dim * output_W_dim; const int input_HW_dim = input_H_dim * input_W_dim; for (int index = blockIdx.x * blockDim.x + threadIdx.x; \ index < total_num_elements; \ index += blockDim.x * gridDim.x) { // Batch index (NOT total number of batches) const int N_idx = index / (input_T_dim * C_dim * input_HW_dim); const int within_N_idx = index % (input_T_dim * C_dim * input_HW_dim); // Timestep index (NOT total number of timesteps) const int T_idx = within_N_idx / (C_dim * input_HW_dim); const int within_T_idx = within_N_idx % (C_dim * input_HW_dim); // Channel index (NOT total number of channels) const int C_idx = within_T_idx / input_HW_dim; const int within_C_idx = within_T_idx % input_HW_dim; // Height and width spatial location which this particular thread handles const int H_idx = within_C_idx / input_W_dim; const int W_idx = within_C_idx % input_W_dim; // Offsets within the (H, W) feature map in the output gradient tensor to pull from // (note that backward input gradient is just output gradient, reverse shifted) const int T_offset = T_idx; const int H_offset = H_idx; const int W_offset = W_idx; T val = 0; const T shift_T = -shift_tensor_data_T_ptr[C_idx]; const T shift_H = -shift_tensor_data_H_ptr[C_idx]; const T shift_W = -shift_tensor_data_W_ptr[C_idx]; // Where in the actual (H, W) feature map we pull from. int output_T_idx, output_H_idx, output_W_idx; T q111 = 0; T q112 = 0; T q121 = 0; T q122 = 0; T q211 = 0; T q212 = 0; T q221 = 0; T q222 = 0; int small_shift_T = floorf(shift_T); int large_shift_T = small_shift_T + 1; int small_shift_H = floorf(shift_H); int large_shift_H = small_shift_H + 1; int small_shift_W = floorf(shift_W); int large_shift_W = small_shift_W + 1; // Compute interpolation remainders (e.g. 1.4 - 1 = 0.4) T remainder_T = shift_T - small_shift_T; T remainder_H = shift_H - small_shift_H; T remainder_W = shift_W - small_shift_W; if (quantize) { int quantize_T = (remainder_T < 0.5f) ? small_shift_T : large_shift_T; int quantize_H = (remainder_H < 0.5f) ? small_shift_H : large_shift_H; int quantize_W = (remainder_W < 0.5f) ? small_shift_W : large_shift_W; output_T_idx = (T_offset + quantize_T); output_H_idx = (H_offset + quantize_H); output_W_idx = (W_offset + quantize_W); T q_quantize = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; input_grad_data_ptr[index] = q_quantize; continue; // skip the rest to do the next grid-stride loop } // Special case -- all shifts are zero; only care about strides and padding with NO interpolation. if (shift_T == 0 && shift_H == 0 && shift_W == 0) { output_T_idx = T_offset; output_H_idx = H_offset; output_W_idx = W_offset; val = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; } else { // -------------------------- SMALL T, SMALL H, SMALL W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + small_shift_W); q111 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- SMALL T, SMALL H, LARGE W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + large_shift_W); q112 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- SMALL T, LARGE H, SMALL W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + small_shift_W); q121 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- SMALL T, LARGE H, LARGE W -------------------------- output_T_idx = (T_offset + small_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + large_shift_W); q122 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- LARGE T, SMALL H, SMALL W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + small_shift_W); q211 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- LARGE T, SMALL H, LARGE W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + small_shift_H); output_W_idx = (W_offset + large_shift_W); q212 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- LARGE T, LARGE H, SMALL W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + small_shift_W); q221 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // -------------------------- LARGE T, LARGE H, LARGE W -------------------------- output_T_idx = (T_offset + large_shift_T); output_H_idx = (H_offset + large_shift_H); output_W_idx = (W_offset + large_shift_W); q222 = (output_H_idx >= 0 && output_W_idx >= 0 && output_T_idx >= 0 && \ output_H_idx < output_H_dim && output_W_idx < output_W_dim && output_T_idx < output_T_dim) ? \ output_grad_data_ptr[N_idx * output_T_dim * C_dim * output_HW_dim + output_T_idx * C_dim * output_HW_dim + \ C_idx * output_HW_dim + output_H_idx * output_W_dim + output_W_idx] : 0; // Perform interpolation (draw a picture for yourself to clarify -- the q's are the four corner // points of the box.) val = \ (1 - remainder_T) * \ ((1 - remainder_H) * \ (q111 * (1 - remainder_W) + q112 * remainder_W) + \ remainder_H * \ (q121 * (1 - remainder_W) + q122 * remainder_W)) + \ remainder_T * \ ((1 - remainder_H) * \ (q211 * (1 - remainder_W) + q212 * remainder_W) + \ remainder_H * \ (q221 * (1 - remainder_W) + q222 * remainder_W)); } // Finally, stick the correct value into the correct location in the input grad tensor. input_grad_data_ptr[index] = val; } } template <typename T> __global__ void normalize_shift_grad_3d_cuda(const int C_dim, \ T* shift_grad_T_data_ptr, T* shift_grad_H_data_ptr, T* shift_grad_W_data_ptr, const T normalize_t_factor) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; \ index < C_dim; \ index += blockDim.x * gridDim.x) { T cur_T_grad, cur_H_grad, cur_W_grad; if (normalize_t_factor < 0) { cur_T_grad = shift_grad_T_data_ptr[index]; cur_H_grad = 0; cur_W_grad = 0; } else { cur_T_grad = shift_grad_T_data_ptr[index] * normalize_t_factor; cur_H_grad = shift_grad_H_data_ptr[index]; cur_W_grad = shift_grad_W_data_ptr[index]; } const T magnitude = sqrt(cur_T_grad * cur_T_grad + cur_H_grad * cur_H_grad + cur_W_grad * cur_W_grad); if (magnitude > 0) { shift_grad_T_data_ptr[index] = cur_T_grad / magnitude; shift_grad_H_data_ptr[index] = cur_H_grad / magnitude; shift_grad_W_data_ptr[index] = cur_W_grad / magnitude; } } } #define cudaErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl; if (abort) exit(code); } } // Populates block_count and thread_per_block with properties of CUDA Device // Remark: the argument "count" is inferred as virtual_thread_count void get_cuda_device_properties(const c10::Device& d, const int count, \ int& block_count, int& thread_per_block) { int max_thread_per_block = 0; cudaErrorCheck( cudaDeviceGetAttribute(&max_thread_per_block, \ cudaDevAttrMaxThreadsPerBlock, 0) ); int max_thread_per_multiprocessor = 0; cudaErrorCheck( cudaDeviceGetAttribute(&max_thread_per_multiprocessor, \ cudaDevAttrMaxThreadsPerMultiProcessor, 0) ); int num_multiprocessor = 0; cudaErrorCheck( cudaDeviceGetAttribute(&num_multiprocessor, \ cudaDevAttrMultiProcessorCount, 0) ); const int virtual_thread_count = count; const int physical_thread_count = std::min(num_multiprocessor * max_thread_per_multiprocessor, virtual_thread_count); thread_per_block = std::min(1024, max_thread_per_block); block_count = std::min((physical_thread_count + thread_per_block - 1) / thread_per_block, \ num_multiprocessor); } template <typename T> struct RubiksShift3DForward { void operator()(const c10::Device& d, \ const int total_num_elements, \ const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const int pad_T, const int pad_H, \ const int pad_W, const int stride_T, \ const int stride_H, const int stride_W, \ const T* input_tensor_data_ptr, T* output_tensor_data_ptr, bool quantize) { // Sets up CUDA multi-threading int block_count = 0; int thread_per_block = 0; get_cuda_device_properties(d, total_num_elements, block_count, thread_per_block); rubiks_shift_3d_forward_cuda<T> <<<block_count, thread_per_block>>>( total_num_elements, N_dim, \ input_T_dim, output_T_dim, C_dim, \ input_H_dim, output_H_dim, \ input_W_dim, output_W_dim, \ shift_tensor_data_T_ptr, \ shift_tensor_data_H_ptr, \ shift_tensor_data_W_ptr, \ pad_T, pad_H, pad_W, \ stride_T, stride_H, stride_W, \ input_tensor_data_ptr, output_tensor_data_ptr, quantize); } }; template <typename T> struct RubiksShift3DBackward { void operator()(const c10::Device& d, \ const int total_num_elements, \ const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const int pad_T, const int pad_H, \ const int pad_W, const int stride_T, \ const int stride_H, const int stride_W, \ const T* input_tensor_data_ptr, \ const T* output_grad_data_ptr, \ T* shift_grad_buffer_T_start_ptr, \ T* shift_grad_buffer_H_start_ptr, \ T* shift_grad_buffer_W_start_ptr) { // Sets up CUDA multi-threading int block_count = 0; int thread_per_block = 0; get_cuda_device_properties(d, total_num_elements, block_count, thread_per_block); // Invoke GPU kernel rubiks_shift_3d_backward_cuda<T> <<<block_count, thread_per_block>>>( total_num_elements, N_dim, \ input_T_dim, output_T_dim, C_dim, \ input_H_dim, output_H_dim, \ input_W_dim, output_W_dim, \ shift_tensor_data_T_ptr, \ shift_tensor_data_H_ptr, \ shift_tensor_data_W_ptr, \ pad_T, pad_H, pad_W, \ stride_T, stride_H, stride_W, \ input_tensor_data_ptr, \ output_grad_data_ptr, \ shift_grad_buffer_T_start_ptr, \ shift_grad_buffer_H_start_ptr, \ shift_grad_buffer_W_start_ptr); } }; template <typename T> struct RubiksShift3DBackwardInput { void operator()(const c10::Device& d, \ const int total_num_elements, \ const int N_dim, const int input_T_dim, \ const int output_T_dim, const int C_dim, \ const int input_H_dim, const int output_H_dim, \ const int input_W_dim, const int output_W_dim, \ const T* shift_tensor_data_T_ptr, \ const T* shift_tensor_data_H_ptr, \ const T* shift_tensor_data_W_ptr, \ const int pad_T, const int pad_H, \ const int pad_W, const int stride_T, \ const int stride_H, const int stride_W, \ const T* input_tensor_data_ptr, \ const T* output_grad_data_ptr, \ T* input_grad_data_ptr, bool quantize) { // Sets up CUDA multi-threading int block_count = 0; int thread_per_block = 0; get_cuda_device_properties(d, total_num_elements, block_count, thread_per_block); if (stride_T == 1 && stride_H == 1 && stride_W == 1 && pad_T == 0 && pad_H == 0 && pad_W == 0) { rubiks_shift_3d_backward_input_s1p0_cuda<T> <<<block_count, thread_per_block>>>( total_num_elements, N_dim, \ input_T_dim, output_T_dim, C_dim, \ input_H_dim, output_H_dim, \ input_W_dim, output_W_dim, \ shift_tensor_data_T_ptr, \ shift_tensor_data_H_ptr, \ shift_tensor_data_W_ptr, \ input_tensor_data_ptr, \ output_grad_data_ptr, \ input_grad_data_ptr, quantize); } else { rubiks_shift_3d_backward_input_cuda<T> <<<block_count, thread_per_block>>>( total_num_elements, N_dim, \ input_T_dim, output_T_dim, C_dim, \ input_H_dim, output_H_dim, \ input_W_dim, output_W_dim, \ shift_tensor_data_T_ptr, \ shift_tensor_data_H_ptr, \ shift_tensor_data_W_ptr, \ pad_T, pad_H, pad_W, \ stride_T, stride_H, stride_W, \ input_tensor_data_ptr, \ output_grad_data_ptr, \ input_grad_data_ptr, quantize); } } }; template <typename T> struct NormalizeShiftGrad3D { void operator()(const c10::Device& d, \ const int C_dim, \ T* shift_grad_T_data_ptr, \ T* shift_grad_H_data_ptr, \ T* shift_grad_W_data_ptr, const T normalize_t_factor) { int block_count = 0; int thread_per_block = 0; get_cuda_device_properties(d, C_dim, block_count, thread_per_block); normalize_shift_grad_3d_cuda<T> <<<block_count, thread_per_block>>>( C_dim, shift_grad_T_data_ptr, \ shift_grad_H_data_ptr, \ shift_grad_W_data_ptr, normalize_t_factor); } }; #define CUDA_CHECK(condition) \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #undef CUDA_CHECK #undef cudaErrorCheck #define STRUCTS(T) \ template struct RubiksShift3DForward<T>; \ template struct RubiksShift3DBackward<T>; \ template struct RubiksShift3DBackwardInput<T>; \ template struct NormalizeShiftGrad3D<T>; STRUCTS(float); STRUCTS(double); #undef STRUCTS } // namespace rubiks_shift
the_stack
#include <stdio.h> #include <unistd.h> #include <stdlib.h> // The first kernel just computes Ainv * u and also stores the kth // row of Ainv in global memory __global__ static void update_inverse_cuda1 (float *Ainv_g[], float *u_g[], float *AinvT_u_g[], float *Ainv_colk_g[], int N, int rowstride, int k) { __shared__ float *Ainv, *u, *AinvT_u, *Ainv_colk; if (threadIdx.x==0) { Ainv = Ainv_g[blockIdx.y]; u = u_g[blockIdx.y]; AinvT_u = AinvT_u_g[blockIdx.y]; Ainv_colk = Ainv_colk_g[blockIdx.y]; } __syncthreads(); // Store the product Ainv * u in shared memory __shared__ float AinvT_u_shared[BLOCK_SIZE], Ainv_colk_shared[BLOCK_SIZE]; __shared__ float u_shared[BLOCK_SIZE]; AinvT_u_shared[threadIdx.x] = 0.0; int col = blockIdx.x*BLOCK_SIZE + threadIdx.x; int numblocks = N / BLOCK_SIZE; if (blockIdx.x*BLOCK_SIZE <= k && k < (blockIdx.x+1)*BLOCK_SIZE) { for (int block=0; block<numblocks; block++) { u_shared[threadIdx.x] = u[block*BLOCK_SIZE+threadIdx.x]; __syncthreads(); for (int i=0; i<BLOCK_SIZE; i++) { int row = block*BLOCK_SIZE + i; float ainv = Ainv[row*rowstride+col]; if (col == k) Ainv_colk_shared[i] = ainv; AinvT_u_shared[threadIdx.x] += ainv*u_shared[i]; } __syncthreads(); Ainv_colk[block*BLOCK_SIZE+threadIdx.x] = Ainv_colk_shared[threadIdx.x]; } } else { for (int block=0; block<numblocks; block++) { u_shared[threadIdx.x] = u[block*BLOCK_SIZE+threadIdx.x]; __syncthreads(); for (int i=0; i<BLOCK_SIZE; i++) { int row = block*BLOCK_SIZE + i; AinvT_u_shared[threadIdx.x] += Ainv[row*rowstride+col]*u_shared[i]; } } } __syncthreads(); // Write the data back to global memory AinvT_u[col] = AinvT_u_shared[threadIdx.x]; } __global__ static void update_inverse_cuda2 (float *Ainv_g[], float *u_g[], float *AinvT_u_g[], float *Ainv_colk_g[], int N, int rowstride, int k) { __shared__ float *Ainv, *AinvT_u, *Ainv_colk; if (threadIdx.x==0) { Ainv = Ainv_g[blockIdx.y]; AinvT_u = AinvT_u_g[blockIdx.y]; Ainv_colk = Ainv_colk_g[blockIdx.y]; } __syncthreads(); __shared__ float AinvT_u_shared[BLOCK_SIZE]; __shared__ float Ainv_colk_shared[BLOCK_SIZE]; int col = blockIdx.x*BLOCK_SIZE + threadIdx.x; // Read the data back from global memory AinvT_u_shared[threadIdx.x] = AinvT_u[col]; Ainv_colk_shared[threadIdx.x] = Ainv_colk[col]; __shared__ float prefact; if (threadIdx.x == 0) prefact = -1.0f/(1.0f+AinvT_u[k]); __syncthreads(); int numblocks = N / BLOCK_SIZE; for (int block=0; block<numblocks; block++) { Ainv_colk_shared[threadIdx.x] = prefact*Ainv_colk[block*BLOCK_SIZE+threadIdx.x]; __syncthreads(); for (int i=0; i<BLOCK_SIZE; i++) { int row = block*BLOCK_SIZE + i; Ainv[row*rowstride+col] += AinvT_u_shared[threadIdx.x]*Ainv_colk_shared[i]; } } } #define NMAX 128 __global__ static void update_inverse_cuda (float *Ainv, float *u, int N, int rowstride, int k) { __shared__ float A_k[NMAX], u_shared[NMAX], Ainv_u[NMAX], Ainv_shared[NMAX]; A_k[threadIdx.x] = Ainv[k*rowstride+threadIdx.x]; u_shared[threadIdx.x] = u[threadIdx.x]; // First, compute k'th element of Ainv_u Ainv_u[threadIdx.x] = u_shared[threadIdx.x] * A_k[threadIdx.x]; __syncthreads(); for (int n=N>>1; n>0; n = n>>1) { float a; if (threadIdx.x < n) a = Ainv_u[2*threadIdx.x] + Ainv_u[2*threadIdx.x+1]; __syncthreads(); Ainv_u[threadIdx.x] = a; __syncthreads(); } float prefact = -1.0f/(1.0f + Ainv_u[0]); for (int row=0; row<N; row++) { Ainv_shared[threadIdx.x] = Ainv[row*rowstride+threadIdx.x]; __syncthreads(); Ainv_u[threadIdx.x] = u_shared[threadIdx.x] * Ainv_shared[threadIdx.x]; for (int n=N>>1; n>0; n = n>>1) { float a; if (threadIdx.x < n) a = Ainv_u[2*threadIdx.x] + Ainv_u[2*threadIdx.x+1]; __syncthreads(); Ainv_u[threadIdx.x] = a; __syncthreads(); } __syncthreads(); // Now Ainv_u[0] has the row'th element of Ainv_u. Ainv[row*rowstride + threadIdx.x] = Ainv_shared[threadIdx.x] + prefact*Ainv_u[0]*A_k[threadIdx.x]; } } // __global__ static void // update_inverse_cuda (float *AinvT, float *u, int N, int rowstride, int k) // { // // Store the product Ainv * u in shared memory // __shared__ float Ainv_u[BLOCK_SIZE], Ainv_u_k[BLOCK_SIZE]; // Ainv_u[threadIdx.x] = 0.0; // __syncthreads(); // for (int row=0; row < N; row++) // Ainv_u[threadIdx.x] += AinvT[row*rowstride+threadIdx.x]*u[row]; // // Compute lambda = [A^(-1)]_k dot u // float lambda = 0.0; // for (int i=0; i<N; i += BLOCK_SIZE) { // Ainv_u_k[threadIdx.x] = AinvT[i+threadIdx.x] * u[i+threadIdx.x]; // __syncthreads(); // for (int j=BLOCK_SIZE>>1; j!=0; j >>=1) { // if (threadIdx.x < j) // Ainv_u_k[threadIdx.x] = Ainv_u_k[2*threadIdx.x] + Ainv_u_k[2*threadIdx.x+1]; // lambda += Ainv_u_k[0]; // } // float prefact = 1.0/(1.0+lambda); // } // // Now, subtract off outer product // } void update_inverse (float *AinvT, float *u, int N, int k) { float Ainv_u[128], Ainv_rowk[128]; for (int i=0; i<N; i++) { Ainv_u[i] = 0.0f; Ainv_rowk[i] = AinvT[N*i+k]; for (int j=0; j<N; j++) Ainv_u[i] += AinvT[j*N+i] * u[j]; } float prefact = 1.0/(1.0+Ainv_u[k]); for (int i=0; i<N; i++) for (int j=0; j<N; j++) AinvT[j*N+i] -= prefact * Ainv_u[i]*Ainv_rowk[j]; } // Replaces A with its inverse by gauss-jordan elimination with full pivoting // Adapted from Numerical Recipes in C void GJInverse (double *A, int n) { const int maxSize = 2000; if (n == 2) { // Special case for 2x2 double a=A[0]; double b=A[1]; double c=A[2]; double d=A[3]; double detInv = 1.0/(a*d-b*c); A[0] = d*detInv; A[1] = -b*detInv; A[2] = -c*detInv; A[3] = a*detInv; return; } int colIndex[maxSize], rowIndex[maxSize], ipiv[maxSize]; double big, pivInv; int icol, irow; for (int j=0; j<n; j++) ipiv[j] = -1; for (int i=0; i<n; i++) { big = 0.0; for (int j=0; j<n; j++) if (ipiv[j] != 0) for (int k=0; k<n; k++) { if (ipiv[k] == -1) { if (fabs(A[n*j+k]) >= big) { big = fabs(A[n*j+k]); irow = j; icol = k; } } else if (ipiv[k] > 0) { fprintf (stderr, "GJInverse: Singular matrix!\n"); exit(1); } } ++(ipiv[icol]); if (irow != icol) for (int l=0; l<n; l++) { double tmp = A[n*irow+l]; A[n*irow+l] = A[n*icol+l]; A[n*icol+l] = tmp; // swap (A[n*irow+l], A[n*icol+l]); } rowIndex[i] = irow; colIndex[i] = icol; if (A[n*icol+icol] == 0.0) { fprintf (stderr, "GJInverse: Singular matrix!\n"); exit(1); } pivInv = 1.0/A[n*icol+icol]; A[n*icol+icol] = 1.0; for (int l=0; l<n; l++) A[n*icol+l] *= pivInv; for (int ll=0; ll<n; ll++) if (ll != icol) { double dum = A[n*ll+icol]; A[n*ll+icol] = 0.0; for (int l=0; l<n; l++) A[n*ll+l] -= A[n*icol+l]*dum; } } // Now unscramble the permutations for (int l=n-1; l>=0; l--) { if (rowIndex[l] != colIndex[l]) for (int k=0; k<n ; k++) { double tmp = A[n*k+rowIndex[l]]; A[n*k+rowIndex[l]] = A[n*k+colIndex[l]]; A[n*k+colIndex[l]] = tmp; // swap (A(k,rowIndex[l]),A(k, colIndex[l])); } } } #define MAT_SIZE 128 #define NUM_MATS 1000 main() { int N = MAT_SIZE; double *A, *Ainv; int numMats = NUM_MATS; float *Ainv_h, *u_h; float *Ainv_d, *Ainv_u_d, *Ainv_colk_d, *u_d; A = (double*)malloc (N*N*sizeof(double)); Ainv = (double*)malloc (N*N*sizeof(double)); Ainv_h = (float*) malloc (N*N*sizeof(float)); u_h = (float*) malloc (N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&u_d, N*sizeof(float)); cudaMalloc((void**)&Ainv_u_d, N*sizeof(float)); cudaMalloc((void**)&Ainv_colk_d, N*sizeof(float)); float **AinvList, **Ainv_uList, **Ainv_colkList, **uList; AinvList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_uList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_colkList = (float**)malloc(NUM_MATS*sizeof(float*)); uList = (float**)malloc(NUM_MATS*sizeof(float*)); float **AinvList_d, **Ainv_uList_d, **Ainv_colkList_d, **uList_d; cudaMalloc((void**)&AinvList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_uList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_colkList_d, numMats*sizeof(float*)); cudaMalloc((void**)&uList_d, numMats*sizeof(float*)); fprintf (stderr, "N = %d\n", N); for (int mat=0; mat<numMats; mat++) { cudaMalloc((void**)&(AinvList[mat]) , N*N*sizeof(float)); cudaMalloc((void**)&(Ainv_uList[mat]) , N*sizeof(float)); cudaMalloc((void**)&(Ainv_colkList[mat]), N*sizeof(float)); cudaMalloc((void**)&(uList[mat]) , N*sizeof(float)); } fprintf (stderr, "N = %d\n", N); cudaMemcpy (AinvList_d, AinvList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (Ainv_uList_d, Ainv_uList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (Ainv_colkList_d, Ainv_colkList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (uList_d, uList, numMats*sizeof(float*), cudaMemcpyHostToDevice); srand48((long int) 12341313); fprintf (stderr, "N = %d\n", N); for (int mat=0; mat<numMats; mat++) { if (mat == 0 ) { for (int i=0; i<N; i++) { u_h[i] = drand48(); for (int j=0; j<N; j++) A[i*N+j] = Ainv[i*N+j] = drand48(); } GJInverse(Ainv, N); for (int i=0; i<N; i++) for (int j=0; j<N; j++) Ainv_h[i*N+j] = (float)Ainv[i*N+j]; } cudaMemcpy (AinvList[mat], Ainv_h, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (uList[mat], u_h, N*sizeof(float), cudaMemcpyHostToDevice); } dim3 dimBlock2(BLOCK_SIZE); dim3 dimGrid2(N/BLOCK_SIZE, NUM_MATS); int row = 1; fprintf (stderr, "Before updates.\n"); clock_t upStart = clock(); for (int i=0; i<1; i++) { update_inverse_cuda1<<<dimGrid2,dimBlock2>>> (AinvList_d, uList_d, Ainv_uList_d, Ainv_colkList_d, N, N, row); update_inverse_cuda2<<<dimGrid2,dimBlock2>>> (AinvList_d, uList_d, Ainv_uList_d, Ainv_colkList_d, N, N, row); } clock_t upEnd = clock(); double uptime = (double)(upEnd - upStart)/(double)CLOCKS_PER_SEC; double uprate = (double)N*10*NUM_MATS/uptime; fprintf (stderr, "%1.2f updates per second.\n", uprate); fprintf (stderr, "%1.3f generations per second.\n", 10.0/uptime); cudaMemcpy (Ainv_h, AinvList[1], N*N*sizeof(float),cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) A[row*N+i] += u_h[i]; for (int i=0; i<N; i++) for (int j=0; j<N; j++) { double ident = 0.0; for (int k=0; k<N; k++) ident += Ainv_h[i*N+k]*A[k*N+j]; if ((i==j && fabs(ident - 1.0) > 1.0e-4) || (i!=j && fabs(ident) > 1.0e-4)) fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); } fprintf (stderr, "Finished.\n"); // cudaMemcpy (AinvT_h, AinvT_d, N*N*sizeof(float), cudaMemcpyDeviceToHost); // for (int i=0; i<N; i++) { // u_h[i] = drand48(); // for (int j=0; j<N; j++) // A[i*N+j] = Ainv[i*N+j] = drand48(); // } // GJInverse(Ainv, N); // for (int i=0; i<N; i++) // for (int j=0; j<N; j++) { // double ident = 0.0; // for (int k=0; k<N; k++) // ident += Ainv[i*N+k]*A[k*N+j]; // if ((i==j && fabs(ident - 1.0) > 1.0e-8) || // (i!=j && fabs(ident) > 1.0e-8)) // fprintf (stderr, "Error in matrix inverse.\n"); // } // for (int i=0; i<N; i++) // for (int j=0; j<N; j++) { // AinvT_h[j*N+i] = (float)Ainv[i*N+j]; // Ainv_h[i*N+j] = (float)Ainv[i*N+j]; // } // cudaMemcpy (Ainv_d, Ainv_h, N*N*sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy (AinvT_d, AinvT_h, N*N*sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy (u_d, u_h, N*sizeof(float), cudaMemcpyHostToDevice); // int col = 1; // update_inverse (AinvT_h, u_h, N, col); // for (int i=0; i<N; i++) // A[i*N+col] += u_h[i]; // for (int i=0; i<N; i++) // for (int j=0; j<N; j++) { // double ident = 0.0; // for (int k=0; k<N; k++) // ident += AinvT_h[k*N+i]*A[k*N+j]; // if ((i==j && fabs(ident - 1.0) > 1.0e-4) || // (i!=j && fabs(ident) > 1.0e-4)) // fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); // } // // clock_t host_start = clock(); // // for (int i=0; i<100000; i++) // // update_inverse (AinvT_h, u_h, N, col); // // clock_t host_end = clock(); // // double host_time = (double)(host_end - host_start)/(double)(CLOCKS_PER_SEC); // // double host_rate = 1.0e5/host_time; // // fprintf (stderr, "Host rate = %1.8f updates per seconds.\n", host_rate); // dim3 dimBlock2(BLOCK_SIZE); // dim3 dimGrid2(N/BLOCK_SIZE); // update_inverse_cuda1<<<dimGrid2,dimBlock2>>> // (AinvT_d, u_d, Ainv_u_d, Ainv_rowk_d, N, N, col); // update_inverse_cuda2<<<dimGrid2,dimBlock2>>> // (AinvT_d, u_d, Ainv_u_d, Ainv_rowk_d, N, N, col); // cudaMemcpy (AinvT_h, AinvT_d, N*N*sizeof(float), cudaMemcpyDeviceToHost); // fprintf (stderr, "2 kernel Device test: "); // bool passed = true; // for (int i=0; i<N; i++) // for (int j=0; j<N; j++) { // double ident = 0.0; // for (int k=0; k<N; k++) // ident += AinvT_h[k*N+i]*A[k*N+j]; // if ((i==j && fabs(ident - 1.0) > 1.0e-4) || // (i!=j && fabs(ident) > 1.0e-4)) { // fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); // passed = false; // } // } // if (passed) // fprintf (stderr, "Passed.\n"); // else // fprintf (stderr, "Failed.\n"); // dim3 dimBlock1(MAT_SIZE); // dim3 dimGrid1(1); // update_inverse_cuda<<<dimGrid1, dimBlock1>>> (Ainv_d, u_d, N, N, col); // cudaMemcpy (Ainv_h, Ainv_d, N*N*sizeof(float), cudaMemcpyDeviceToHost); // fprintf (stderr, "1-kernel Device test: "); // passed = true; // for (int i=0; i<N; i++) // for (int j=0; j<N; j++) { // double ident = 0.0; // for (int k=0; k<N; k++) // //ident += AinvT_h[k*N+i]*A[k*N+j]; // ident += Ainv_h[i*N+k]*A[k*N+j]; // if ((i==j && fabs(ident - 1.0) > 1.0e-4) || // (i!=j && fabs(ident) > 1.0e-4)) { // fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); // passed = false; // } // } // if (passed) // fprintf (stderr, "Passed.\n"); // else // fprintf (stderr, "Failed.\n"); // dim3 dimGrid1000(1000); // clock_t start = clock(); // for (int i=0; i<1000; i++) // update_inverse_cuda<<<dimGrid1000,dimBlock1>>> // (AinvT_d, u_d, N, N, col); // clock_t end = clock(); // double time = (double)(end-start)/(double)CLOCKS_PER_SEC; // double rate = 1.0e6/time; // fprintf (stderr, "Device rate = %1.8f updates per seconds.\n", rate); // // dim3 dimGrid3(N/BLOCK_SIZE, 1000); // // dim3 dimGrid4(N/BLOCK_SIZE, 1000); // // clock_t start = clock(); // // for (int i=0; i<1000; i++) { // // update_inverse_cuda1<<<dimGrid3,dimBlock>>> // // (AinvT_d, u_d, Ainv_u_d, Ainv_rowk_d, N, N, col); // // update_inverse_cuda2<<<dimGrid4,dimBlock>>> // // (AinvT_d, u_d, Ainv_u_d, Ainv_rowk_d, N, N, col); // // } // // clock_t end = clock(); // // double time = (double)(end-start)/(double)CLOCKS_PER_SEC; // // double rate = 1.0e6/time; // // fprintf (stderr, "Device rate = %1.8f updates per seconds.\n", rate); }
the_stack
#include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/gather.h> #include <thrust/transform.h> #include <cstring> #include <algorithm> #include <exception> #include "algorithm.hpp" #include "iterator.hpp" #include "query/time_series_aggregate.h" #include "memory.hpp" CGoCallResHandle Sort(DimensionVector keys, int length, void *cudaStream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifdef RUN_ON_DEVICE cudaSetDevice(device); #endif ares::sort(keys, length, reinterpret_cast<cudaStream_t>(cudaStream)); CheckCUDAError("Sort"); } catch (std::exception &e) { std::cerr << "Exception happend when doing Sort:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } CGoCallResHandle Reduce(DimensionVector inputKeys, uint8_t *inputValues, DimensionVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *stream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifdef RUN_ON_DEVICE cudaSetDevice(device); #endif cudaStream_t cudaStream = reinterpret_cast<cudaStream_t>(stream); resHandle.res = reinterpret_cast<void *>(ares::reduce(inputKeys, inputValues, outputKeys, outputValues, valueBytes, length, aggFunc, cudaStream)); CheckCUDAError("Reduce"); return resHandle; } catch (std::exception &e) { std::cerr << "Exception happend when doing Reduce:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } CGoCallResHandle Expand(DimensionVector inputKeys, DimensionVector outputKeys, uint32_t *baseCounts, uint32_t *indexVector, int indexVectorLen, int outputOccupiedLen, void *stream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { SET_DEVICE(device); cudaStream_t cudaStream = reinterpret_cast<cudaStream_t>(stream); resHandle.res = reinterpret_cast<void *>(ares::expand(inputKeys, outputKeys, baseCounts, indexVector, indexVectorLen, outputOccupiedLen, cudaStream)); CheckCUDAError("Expand"); return resHandle; } catch (std::exception &e) { std::cerr << "Exception happend when doing Expand:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } namespace ares { // sort based on DimensionVector void sort(DimensionVector keys, int length, cudaStream_t cudaStream) { DimensionHashIterator<> hashIter(keys.DimValues, keys.NumDimsPerDimWidth, keys.VectorCapacity, keys.IndexVector); thrust::copy(GET_EXECUTION_POLICY(cudaStream), hashIter, hashIter + length, keys.HashValues); thrust::stable_sort_by_key(GET_EXECUTION_POLICY(cudaStream), keys.HashValues, keys.HashValues + length, keys.IndexVector); } template<typename Value, typename AggFunc> int reduceInternal(uint64_t *inputHashValues, uint32_t *inputIndexVector, uint8_t *inputValues, uint64_t *outputHashValues, uint32_t *outputIndexVector, uint8_t *outputValues, int length, cudaStream_t cudaStream) { thrust::equal_to<uint64_t> binaryPred; AggFunc aggFunc; ReduceByHashFunctor<AggFunc> reduceFunc(aggFunc); auto zippedInputIter = thrust::make_zip_iterator(thrust::make_tuple( inputIndexVector, thrust::make_permutation_iterator(reinterpret_cast<Value *>(inputValues), inputIndexVector))); auto zippedOutputIter = thrust::make_zip_iterator(thrust::make_tuple( outputIndexVector, reinterpret_cast<Value *>(outputValues))); auto resEnd = thrust::reduce_by_key(GET_EXECUTION_POLICY(cudaStream), inputHashValues, inputHashValues + length, zippedInputIter, thrust::make_discard_iterator(), zippedOutputIter, binaryPred, reduceFunc); return thrust::get<1>(resEnd) - zippedOutputIter; } int bindValueAndAggFunc(uint64_t *inputHashValues, uint32_t *inputIndexVector, uint8_t *inputValues, uint64_t *outputHashValues, uint32_t *outputIndexVector, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, cudaStream_t cudaStream) { switch (aggFunc) { #define REDUCE_INTERNAL(ValueType, AggFunc) \ return reduceInternal< ValueType, AggFunc >( \ inputHashValues, \ inputIndexVector, \ inputValues, \ outputHashValues, \ outputIndexVector, \ outputValues, \ length, \ cudaStream); case AGGR_SUM_UNSIGNED: if (valueBytes == 4) { REDUCE_INTERNAL(uint32_t, thrust::plus<uint32_t>) } else { REDUCE_INTERNAL(uint64_t, thrust::plus<uint64_t>) } case AGGR_SUM_SIGNED: if (valueBytes == 4) { REDUCE_INTERNAL(int32_t, thrust::plus<int32_t>) } else { REDUCE_INTERNAL(int64_t, thrust::plus<int64_t>) } case AGGR_SUM_FLOAT: if (valueBytes == 4) { REDUCE_INTERNAL(float_t, thrust::plus<float_t>) } else { REDUCE_INTERNAL(double_t, thrust::plus<double_t>) } case AGGR_MIN_UNSIGNED: REDUCE_INTERNAL(uint32_t, thrust::minimum<uint32_t>) case AGGR_MIN_SIGNED: REDUCE_INTERNAL(int32_t, thrust::minimum<int32_t>) case AGGR_MIN_FLOAT: REDUCE_INTERNAL(float_t, thrust::minimum<float_t>) case AGGR_MAX_UNSIGNED: REDUCE_INTERNAL(uint32_t, thrust::maximum<uint32_t>) case AGGR_MAX_SIGNED: REDUCE_INTERNAL(int32_t, thrust::maximum<int32_t>) case AGGR_MAX_FLOAT: REDUCE_INTERNAL(float_t, thrust::maximum<float_t>) case AGGR_AVG_FLOAT: REDUCE_INTERNAL(uint64_t, RollingAvgFunctor) default: throw std::invalid_argument("Unsupported aggregation function type"); } } int reduce(DimensionVector inputKeys, uint8_t *inputValues, DimensionVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, cudaStream_t cudaStream) { int outputLength = bindValueAndAggFunc( inputKeys.HashValues, inputKeys.IndexVector, inputValues, outputKeys.HashValues, outputKeys.IndexVector, outputValues, valueBytes, length, aggFunc, cudaStream); DimensionColumnPermutateIterator iterIn( inputKeys.DimValues, outputKeys.IndexVector, inputKeys.VectorCapacity, outputLength, inputKeys.NumDimsPerDimWidth); DimensionColumnOutputIterator iterOut(outputKeys.DimValues, inputKeys.VectorCapacity, outputLength, inputKeys.NumDimsPerDimWidth, 0); int numDims = 0; for (int i = 0; i < NUM_DIM_WIDTH; i++) { numDims += inputKeys.NumDimsPerDimWidth[i]; } // copy dim values into output thrust::copy(GET_EXECUTION_POLICY(cudaStream), iterIn, iterIn + numDims * 2 * outputLength, iterOut); return outputLength; } int expand(DimensionVector inputKeys, DimensionVector outputKeys, uint32_t *baseCounts, uint32_t *indexVector, int indexVectorLen, int outputOccupiedLen, cudaStream_t cudaStream) { // create count interator from baseCount and indexVector IndexCountIterator countIter = IndexCountIterator(baseCounts, indexVector); // total item counts by adding counts together uint32_t totalCount = thrust::reduce(GET_EXECUTION_POLICY(cudaStream), countIter, countIter+indexVectorLen); // scan the counts to obtain output offsets for each input element ares::device_vector<uint32_t> offsets(indexVectorLen); thrust::exclusive_scan(GET_EXECUTION_POLICY(cudaStream), countIter, countIter+indexVectorLen, offsets.begin()); // scatter the nonzero counts into their corresponding output positions ares::device_vector<uint32_t> indices(totalCount); thrust::scatter_if(GET_EXECUTION_POLICY(cudaStream), thrust::counting_iterator<uint32_t>(0), thrust::counting_iterator<uint32_t>(indexVectorLen), offsets.begin(), countIter, indices.begin()); // compute max-scan over the indices, filling in the holes thrust::inclusive_scan(GET_EXECUTION_POLICY(cudaStream), indices.begin(), indices.end(), indices.begin(), thrust::maximum<uint32_t>()); // get the raw pointer from device/host vector uint32_t * newIndexVector = thrust::raw_pointer_cast(&indices[0]); int outputLen = min(totalCount, outputKeys.VectorCapacity - outputOccupiedLen); // start the real copy operation DimensionColumnPermutateIterator iterIn( inputKeys.DimValues, newIndexVector, inputKeys.VectorCapacity, outputLen, inputKeys.NumDimsPerDimWidth); DimensionColumnOutputIterator iterOut(outputKeys.DimValues, outputKeys.VectorCapacity, outputLen, inputKeys.NumDimsPerDimWidth, outputOccupiedLen); int numDims = 0; for (int i = 0; i < NUM_DIM_WIDTH; i++) { numDims += inputKeys.NumDimsPerDimWidth[i]; } // copy dim values into output thrust::copy(GET_EXECUTION_POLICY(cudaStream), iterIn, iterIn + numDims * 2 * outputLen, iterOut); // return total count in the output dimensionVector return outputLen + outputOccupiedLen; } } // namespace ares
the_stack