hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
878ff7c8e35110c677be75946557c99db032e180.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/memory/allocation/allocator.h> #include "hipcub/hipcub.hpp" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; const int kBBoxSize = 4; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids, int* length_lod) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (nthreads); i += blockDim.x * gridDim.x) { platform::CudaAtomicAdd(length_lod + batch_ids[i], 1); } } template <typename DeviceContext, typename T> class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois"); const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores"); auto fpn_rois = ctx.Output<LoDTensor>("FpnRois"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); const int post_nms_topN = ctx.Attr<int>("post_nms_topN"); // concat inputs along axis = 0 int roi_offset = 0; int score_offset = 0; int total_roi_num = 0; for (size_t i = 0; i < roi_ins.size(); ++i) { total_roi_num += roi_ins[i]->dims()[0]; } int real_post_num = min(post_nms_topN, total_roi_num); fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor concat_rois; Tensor concat_scores; T* concat_rois_data = concat_rois.mutable_data<T>( {total_roi_num, kBBoxSize}, dev_ctx.GetPlace()); T* concat_scores_data = concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace()); Tensor roi_batch_id_list; roi_batch_id_list.Resize({total_roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); int index = 0; int lod_size; auto place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); for (size_t i = 0; i < roi_ins.size(); ++i) { auto roi_in = roi_ins[i]; auto score_in = score_ins[i]; auto roi_lod = roi_in->lod().back(); lod_size = roi_lod.size() - 1; for (size_t n = 0; n < lod_size; ++n) { for (size_t j = roi_lod[n]; j < roi_lod[n + 1]; ++j) { roi_batch_id_data[index++] = n; } } memory::Copy(place, concat_rois_data + roi_offset, place, roi_in->data<T>(), roi_in->numel() * sizeof(T), dev_ctx.stream()); memory::Copy(place, concat_scores_data + score_offset, place, score_in->data<T>(), score_in->numel() * sizeof(T), dev_ctx.stream()); roi_offset += roi_in->numel(); score_offset += score_in->numel(); } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_total( dev_ctx, total_roi_num); for_range_total(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; T* keys_out = keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairsDescending<T, int>( nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation // sort score to get corresponding index hipcub::DeviceRadixSort::SortPairsDescending<T, int>( d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); index_out_t.Resize({real_post_num}); Tensor sorted_rois; sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor sorted_batch_id; sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois); GPUGather<int>(dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id); Tensor batch_index_t; int* batch_idx_in = batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_post( dev_ctx, real_post_num); for_range_post(RangeInitFunctor{0, 1, batch_idx_in}); Tensor out_id_t; int* out_id_data = out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs<int, int>( nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); // Allocate temporary storage d_temp_storage = memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation // sort batch_id to get corresponding index hipcub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois); Tensor length_lod; int* length_lod_data = length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, int> set_zero; set_zero(dev_ctx, &length_lod, static_cast<int>(0)); int blocks = NumBlocks(real_post_num); int threads = kNumCUDAThreads; // get length-based lod by batch ids hipLaunchKernelGGL(( GetLengthLoD), dim3(blocks), dim3(threads), 0, 0, real_post_num, out_id_data, length_lod_data); std::vector<int> length_lod_cpu(lod_size); memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place, length_lod_data, sizeof(int) * lod_size, dev_ctx.stream()); dev_ctx.Wait(); std::vector<size_t> offset(1, 0); for (int i = 0; i < lod_size; ++i) { offset.emplace_back(offset.back() + length_lod_cpu[i]); } framework::LoD lod; lod.emplace_back(offset); fpn_rois->set_lod(lod); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( collect_fpn_proposals, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, double>);
878ff7c8e35110c677be75946557c99db032e180.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/memory/allocation/allocator.h> #include "cub/cub.cuh" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; const int kBBoxSize = 4; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids, int* length_lod) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (nthreads); i += blockDim.x * gridDim.x) { platform::CudaAtomicAdd(length_lod + batch_ids[i], 1); } } template <typename DeviceContext, typename T> class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois"); const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores"); auto fpn_rois = ctx.Output<LoDTensor>("FpnRois"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); const int post_nms_topN = ctx.Attr<int>("post_nms_topN"); // concat inputs along axis = 0 int roi_offset = 0; int score_offset = 0; int total_roi_num = 0; for (size_t i = 0; i < roi_ins.size(); ++i) { total_roi_num += roi_ins[i]->dims()[0]; } int real_post_num = min(post_nms_topN, total_roi_num); fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor concat_rois; Tensor concat_scores; T* concat_rois_data = concat_rois.mutable_data<T>( {total_roi_num, kBBoxSize}, dev_ctx.GetPlace()); T* concat_scores_data = concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace()); Tensor roi_batch_id_list; roi_batch_id_list.Resize({total_roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); int index = 0; int lod_size; auto place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); for (size_t i = 0; i < roi_ins.size(); ++i) { auto roi_in = roi_ins[i]; auto score_in = score_ins[i]; auto roi_lod = roi_in->lod().back(); lod_size = roi_lod.size() - 1; for (size_t n = 0; n < lod_size; ++n) { for (size_t j = roi_lod[n]; j < roi_lod[n + 1]; ++j) { roi_batch_id_data[index++] = n; } } memory::Copy(place, concat_rois_data + roi_offset, place, roi_in->data<T>(), roi_in->numel() * sizeof(T), dev_ctx.stream()); memory::Copy(place, concat_scores_data + score_offset, place, score_in->data<T>(), score_in->numel() * sizeof(T), dev_ctx.stream()); roi_offset += roi_in->numel(); score_offset += score_in->numel(); } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_total( dev_ctx, total_roi_num); for_range_total(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; T* keys_out = keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending<T, int>( nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation // sort score to get corresponding index cub::DeviceRadixSort::SortPairsDescending<T, int>( d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); index_out_t.Resize({real_post_num}); Tensor sorted_rois; sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor sorted_batch_id; sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois); GPUGather<int>(dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id); Tensor batch_index_t; int* batch_idx_in = batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_post( dev_ctx, real_post_num); for_range_post(RangeInitFunctor{0, 1, batch_idx_in}); Tensor out_id_t; int* out_id_data = out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs<int, int>( nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); // Allocate temporary storage d_temp_storage = memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation // sort batch_id to get corresponding index cub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois); Tensor length_lod; int* length_lod_data = length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, int> set_zero; set_zero(dev_ctx, &length_lod, static_cast<int>(0)); int blocks = NumBlocks(real_post_num); int threads = kNumCUDAThreads; // get length-based lod by batch ids GetLengthLoD<<<blocks, threads>>>(real_post_num, out_id_data, length_lod_data); std::vector<int> length_lod_cpu(lod_size); memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place, length_lod_data, sizeof(int) * lod_size, dev_ctx.stream()); dev_ctx.Wait(); std::vector<size_t> offset(1, 0); for (int i = 0; i < lod_size; ++i) { offset.emplace_back(offset.back() + length_lod_cpu[i]); } framework::LoD lod; lod.emplace_back(offset); fpn_rois->set_lod(lod); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( collect_fpn_proposals, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, double>);
dedf4fa0a90425ff83996c9fdbc6e9a22c5b959f.hip
// !!! This is a file automatically generated by hipify!!! #include <gauge_field.h> #include <color_spinor_field.h> #include <dslash.h> #include <worker.h> #include <dslash_policy.cuh> #include <kernels/dslash_twisted_mass_preconditioned.cuh> /** This is the preconditioned gauged twisted-mass operator */ namespace quda { /** @brief This is a helper class that is used to instantiate the correct templated kernel for the dslash. */ template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg> struct TwistedMassPreconditionedLaunch { static constexpr const char *kernel = "quda::twistedMassPreconditionedGPU"; // kernel name for jit compilation template <typename Dslash> inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const hipStream_t &stream) { static_assert(nParity == 1, "preconditioned twisted-mass operator only defined for nParity=1"); dslash.launch( twistedMassPreconditionedGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream); } }; template <typename Float, int nDim, int nColor, typename Arg> class TwistedMassPreconditioned : public Dslash<Float> { protected: Arg &arg; const ColorSpinorField &in; public: TwistedMassPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash<Float>(arg, out, in, "kernels/dslash_twisted_mass_preconditioned.cuh"), arg(arg), in(in) { if (arg.asymmetric) for (int i = 0; i < 8; i++) if (i != 4) { strcat(Dslash<Float>::aux[i], ",asym"); } } virtual ~TwistedMassPreconditioned() {} void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); Dslash<Float>::setParam(arg); if (arg.asymmetric && !arg.dagger) errorQuda("asymmetric operator only defined for dagger"); if (arg.asymmetric && arg.xpay) errorQuda("asymmetric operator not defined for xpay"); if (arg.nParity == 1) { if (arg.xpay) Dslash<Float>::template instantiate<TwistedMassPreconditionedLaunch, nDim, nColor, 1, true>(tp, arg, stream); else Dslash<Float>::template instantiate<TwistedMassPreconditionedLaunch, nDim, nColor, 1, false>(tp, arg, stream); } else { errorQuda("Preconditioned twisted-mass operator not defined nParity=%d", arg.nParity); } } long long flops() const { long long flops = Dslash<Float>::flops(); switch (arg.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; // twisted-mass flops are in the interior kernel case INTERIOR_KERNEL: case KERNEL_POLICY: flops += 2 * nColor * 4 * 2 * in.Volume(); // complex * Nc * Ns * fma * vol break; } return flops; } TuneKey tuneKey() const { return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]); } }; template <typename Float, int nColor, QudaReconstructType recon> struct TwistedMassPreconditionedApply { inline TwistedMassPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; TwistedMassArg<Float, nColor, recon> arg(out, in, U, a, b, xpay, x, parity, dagger, asymmetric, comm_override); TwistedMassPreconditioned<Float, nDim, nColor, TwistedMassArg<Float, nColor, recon>> twisted(arg, out, in); dslash::DslashPolicyTune<decltype(twisted)> policy(twisted, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; /* Apply the preconditioned twisted-mass Dslash operator out = x + A^{-1} D * in = x + a*(1 + i*b*gamma_5)*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu) */ void ApplyTwistedMassPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric, const int *comm_override, TimeProfile &profile) { #ifdef GPU_TWISTED_MASS_DIRAC if (in.V() == out.V()) errorQuda("Aliasing pointers"); if (in.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder()); // check all precisions match checkPrecision(out, in, U); // check all locations match checkLocation(out, in, U); // with symmetric dagger operator we must use kernel packing if (dagger && !asymmetric) pushKernelPackT(true); instantiate<TwistedMassPreconditionedApply>( out, in, U, a, b, xpay, x, parity, dagger, asymmetric, comm_override, profile); if (dagger && !asymmetric) popKernelPackT(); #else errorQuda("Twisted-mass dslash has not been built"); #endif // GPU_TWISTED_MASS_DIRAC } } // namespace quda
dedf4fa0a90425ff83996c9fdbc6e9a22c5b959f.cu
#include <gauge_field.h> #include <color_spinor_field.h> #include <dslash.h> #include <worker.h> #include <dslash_policy.cuh> #include <kernels/dslash_twisted_mass_preconditioned.cuh> /** This is the preconditioned gauged twisted-mass operator */ namespace quda { /** @brief This is a helper class that is used to instantiate the correct templated kernel for the dslash. */ template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg> struct TwistedMassPreconditionedLaunch { static constexpr const char *kernel = "quda::twistedMassPreconditionedGPU"; // kernel name for jit compilation template <typename Dslash> inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const cudaStream_t &stream) { static_assert(nParity == 1, "preconditioned twisted-mass operator only defined for nParity=1"); dslash.launch( twistedMassPreconditionedGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream); } }; template <typename Float, int nDim, int nColor, typename Arg> class TwistedMassPreconditioned : public Dslash<Float> { protected: Arg &arg; const ColorSpinorField &in; public: TwistedMassPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash<Float>(arg, out, in, "kernels/dslash_twisted_mass_preconditioned.cuh"), arg(arg), in(in) { if (arg.asymmetric) for (int i = 0; i < 8; i++) if (i != 4) { strcat(Dslash<Float>::aux[i], ",asym"); } } virtual ~TwistedMassPreconditioned() {} void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); Dslash<Float>::setParam(arg); if (arg.asymmetric && !arg.dagger) errorQuda("asymmetric operator only defined for dagger"); if (arg.asymmetric && arg.xpay) errorQuda("asymmetric operator not defined for xpay"); if (arg.nParity == 1) { if (arg.xpay) Dslash<Float>::template instantiate<TwistedMassPreconditionedLaunch, nDim, nColor, 1, true>(tp, arg, stream); else Dslash<Float>::template instantiate<TwistedMassPreconditionedLaunch, nDim, nColor, 1, false>(tp, arg, stream); } else { errorQuda("Preconditioned twisted-mass operator not defined nParity=%d", arg.nParity); } } long long flops() const { long long flops = Dslash<Float>::flops(); switch (arg.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; // twisted-mass flops are in the interior kernel case INTERIOR_KERNEL: case KERNEL_POLICY: flops += 2 * nColor * 4 * 2 * in.Volume(); // complex * Nc * Ns * fma * vol break; } return flops; } TuneKey tuneKey() const { return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]); } }; template <typename Float, int nColor, QudaReconstructType recon> struct TwistedMassPreconditionedApply { inline TwistedMassPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; TwistedMassArg<Float, nColor, recon> arg(out, in, U, a, b, xpay, x, parity, dagger, asymmetric, comm_override); TwistedMassPreconditioned<Float, nDim, nColor, TwistedMassArg<Float, nColor, recon>> twisted(arg, out, in); dslash::DslashPolicyTune<decltype(twisted)> policy(twisted, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; /* Apply the preconditioned twisted-mass Dslash operator out = x + A^{-1} D * in = x + a*(1 + i*b*gamma_5)*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu) */ void ApplyTwistedMassPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double b, bool xpay, const ColorSpinorField &x, int parity, bool dagger, bool asymmetric, const int *comm_override, TimeProfile &profile) { #ifdef GPU_TWISTED_MASS_DIRAC if (in.V() == out.V()) errorQuda("Aliasing pointers"); if (in.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder()); // check all precisions match checkPrecision(out, in, U); // check all locations match checkLocation(out, in, U); // with symmetric dagger operator we must use kernel packing if (dagger && !asymmetric) pushKernelPackT(true); instantiate<TwistedMassPreconditionedApply>( out, in, U, a, b, xpay, x, parity, dagger, asymmetric, comm_override, profile); if (dagger && !asymmetric) popKernelPackT(); #else errorQuda("Twisted-mass dslash has not been built"); #endif // GPU_TWISTED_MASS_DIRAC } } // namespace quda
0665d0d152d144c9a775e5ee0c686ac5a8a43636.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "trackerKCFparallel.hpp" #include <opencv2/cudaarithm.hpp> #include "dft.cu" #include "mulspectrums.cu" #define returnFromUpdate() {fprintf(stderr, "Error in %s line %d while updating frame %d\n", __FILE__, __LINE__, frame);} /*--------------------------- | TrackerKCFModel |---------------------------*/ namespace cv{ /** * \brief Implementation of TrackerModel for MIL algorithm */ class TrackerKCFModel : public TrackerModel{ public: TrackerKCFModel(TrackerKCF::Params /*params*/){} ~TrackerKCFModel(){} protected: void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){} void modelUpdateImpl(){} }; } /* namespace cv */ namespace helper { void MatType( Mat inputMat ) { int inttype = inputMat.type(); std::string r, a; uchar depth = inttype & CV_MAT_DEPTH_MASK; uchar chans = 1 + (inttype >> CV_CN_SHIFT); switch ( depth ) { case CV_8U: r = "8U"; a = "Mat.at<uchar>(y,x)"; break; case CV_8S: r = "8S"; a = "Mat.at<schar>(y,x)"; break; case CV_16U: r = "16U"; a = "Mat.at<ushort>(y,x)"; break; case CV_16S: r = "16S"; a = "Mat.at<short>(y,x)"; break; case CV_32S: r = "32S"; a = "Mat.at<int>(y,x)"; break; case CV_32F: r = "32F"; a = "Mat.at<float>(y,x)"; break; case CV_64F: r = "64F"; a = "Mat.at<double>(y,x)"; break; case CV_32FC2: r = "32FC2"; a = "Mat.at<complex float>(y,x)"; break; case CV_64FC2: r = "64FC2"; a = "Mat.at<complex double>(y,x)"; break; default: r = "User"; a = "Mat.at<UKNOWN>(y,x)"; break; } r += "C"; r += (chans+'0'); std::cout << "Mat is of type " << r << " and should be accessed with " << a << std::endl; } } namespace cv { /* * Constructor */ TackerKCFImplParallel::TackerKCFImplParallel( const TrackerKCF::Params &parameters ) : params( parameters ) { isInit = false; resizeImage = false; use_custom_extractor_pca = false; use_custom_extractor_npca = false; #if TIME total_lines = num_steps; for (int i = 0; i < num_steps; i++) { cumulated_times[i] = 0; } #if TIME == 2 for (int i = 0; i < num_steps - 1; i++) { total_lines += num_steps_details[i]; for (int j = 0; j < max_num_details; j++) { cumulated_details_times[i][j] = 0; } } #endif #endif } void TackerKCFImplParallel::read( const cv::FileNode& fn ){ params.read( fn ); } void TackerKCFImplParallel::write( cv::FileStorage& fs ) const { params.write( fs ); } /* * Initialization: * - creating hann window filter * - ROI padding * - creating a gaussian response for the training ground-truth * - perform FFT to the gaussian response */ bool TackerKCFImplParallel::initImpl( const Mat& image, const Rect2d& boundingBox ){ #if TIME double startInit = CycleTimer::currentSeconds(); #endif frame=0; roi = boundingBox; //calclulate output sigma output_sigma=sqrt(roi.width*roi.height)*params.output_sigma_factor; output_sigma=-0.5/(output_sigma*output_sigma); //resize the ROI whenever needed if(params.resize && roi.width*roi.height>params.max_patch_size){ resizeImage=true; roi.x/=2.0; roi.y/=2.0; roi.width/=2.0; roi.height/=2.0; } // add padding to the roi roi.x-=roi.width/2; roi.y-=roi.height/2; roi.width*=2; roi.height*=2; // initialize the hann window filter createHanningWindow(hann, roi.size(), CV_64F); // hann window filter for CN feature Mat _layer[] = {hann, hann, hann, hann, hann, hann, hann, hann, hann, hann}; merge(_layer, 10, hann_cn); // create gaussian response y=Mat::zeros((int)roi.height,(int)roi.width,CV_64F); for(unsigned i=0;i<roi.height;i++){ for(unsigned j=0;j<roi.width;j++){ y.at<double>(i,j)=(i-roi.height/2+1)*(i-roi.height/2+1)+(j-roi.width/2+1)*(j-roi.width/2+1); } } y*=(double)output_sigma; cv::exp(y,y); // perform fourier transfor to the gaussian response fft2(y,yf); model=Ptr<TrackerKCFModel>(new TrackerKCFModel(params)); // record the non-compressed descriptors if((params.desc_npca & GRAY) == GRAY)descriptors_npca.push_back(GRAY); if((params.desc_npca & CN) == CN)descriptors_npca.push_back(CN); if(use_custom_extractor_npca)descriptors_npca.push_back(CUSTOM); features_npca.resize(descriptors_npca.size()); // record the compressed descriptors if((params.desc_pca & GRAY) == GRAY)descriptors_pca.push_back(GRAY); if((params.desc_pca & CN) == CN)descriptors_pca.push_back(CN); if(use_custom_extractor_pca)descriptors_pca.push_back(CUSTOM); features_pca.resize(descriptors_pca.size()); // accept only the available descriptor modes CV_Assert( (params.desc_pca & GRAY) == GRAY || (params.desc_npca & GRAY) == GRAY || (params.desc_pca & CN) == CN || (params.desc_npca & CN) == CN || use_custom_extractor_pca || use_custom_extractor_npca ); // Initialize ExtractCN GpuMats cuda::createContinuous(roi.size(), CV_8UC3, patch_data_gpu); cuda::createContinuous(roi.size(), CV_16U, indexes_gpu); hann_cn_gpu.upload(hann_cn); // Initialize pca_data_gpu GpuMat cuda::createContinuous(roi.size(), CV_64F, pca_data_gpu); // Initialize fft2 GpuMats Size complex_size(roi.size().width/2+1, roi.size().height); int num_channels = image.channels(); cuda::createContinuous(complex_size, CV_64FC2, xyf_c_gpu); cuda::createContinuous(roi.size(), CV_64F, xyf_r_gpu); xf_data_gpu.resize(num_channels); yf_data_gpu.resize(num_channels); layers_data_gpu.resize(num_channels); xyf_v_gpu.resize(num_channels); for (int i = 0; i < num_channels; i++){ cuda::createContinuous(roi.size(), CV_64F, layers_data_gpu[i]); cuda::createContinuous(complex_size, CV_64FC2, xf_data_gpu[i]); cuda::createContinuous(complex_size, CV_64FC2, yf_data_gpu[i]); } // Initialize ColorNames size_t ColorNames_size = 32768 * 10 * sizeof(double); //2^15 * 10 cudaSafeCall(hipMalloc((void**) &ColorNames_gpu, ColorNames_size)); cudaSafeCall(hipMemcpy(ColorNames_gpu, ColorNames, ColorNames_size, hipMemcpyHostToDevice)); #if TIME printInitializationTime(startInit); #endif // TODO: return true only if roi inside the image return true; } /* * Main part of the KCF algorithm */ bool TackerKCFImplParallel::updateImpl( const Mat& image, Rect2d& boundingBox ){ #if TIME double startUpdate = CycleTimer::currentSeconds(); #endif double minVal, maxVal; // min-max response Point minLoc,maxLoc; // min-max location Mat img=image.clone(); // check the channels of the input image, grayscale is preferred CV_Assert(img.channels() == 1 || img.channels() == 3); // resize the image whenever needed if(resizeImage)resize(img,img,Size(img.cols/2,img.rows/2)); #if TIME double startDetection = CycleTimer::currentSeconds(); #endif // detection part if(frame>0){ #if TIME == 2 double startDetectionDetail = CycleTimer::currentSeconds(); #endif // extract and pre-process the patch // get non compressed descriptors for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){ if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 0); #endif //get non-compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){ if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate(); } if(features_npca.size()>0)merge(features_npca,X[1]); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 1); #endif // get compressed descriptors for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){ if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 2); #endif //get compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){ if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate(); } if(features_pca.size()>0)merge(features_pca,X[0]); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 3); #endif //compress the features and the KRSL model if(params.desc_pca !=0){ compress(proj_mtx,X[0],X[0],data_temp,compress_data); compress(proj_mtx,Z[0],Zc[0],data_temp,compress_data); } // copy the compressed KRLS model Zc[1] = Z[1]; #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 4); #endif // merge all features if(features_npca.size()==0){ x = X[0]; z = Zc[0]; }else if(features_pca.size()==0){ x = X[1]; z = Z[1]; }else{ merge(X,2,x); merge(Zc,2,z); } #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 5); #endif //compute the gaussian kernel denseGaussKernel(params.sigma,x,z,k,layers,vxf,vyf,vxyf,xy_data,xyf_data); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 6); #endif // compute the fourier transform of the kernel fft2(k,kf); if(frame==1)spec2=Mat_<Vec2d >(kf.rows, kf.cols); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 7); #endif // calculate filter response if(params.split_coeff) calcResponse(alphaf,alphaf_den,kf,response, spec, spec2); else calcResponse(alphaf,kf,response, spec); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 8); #endif // extract the maximum response minMaxLoc( response, &minVal, &maxVal, &minLoc, &maxLoc ); roi.x+=(maxLoc.x-roi.width/2+1); roi.y+=(maxLoc.y-roi.height/2+1); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 9); #endif } #if TIME updateTime(startDetection, 0); double startPatches = CycleTimer::currentSeconds(); #endif #if TIME == 2 double startPatchesDetail = startPatches; #endif // update the bounding box boundingBox.x=(resizeImage?roi.x*2:roi.x)+(resizeImage?roi.width*2:roi.width)/4; boundingBox.y=(resizeImage?roi.y*2:roi.y)+(resizeImage?roi.height*2:roi.height)/4; boundingBox.width = (resizeImage?roi.width*2:roi.width)/2; boundingBox.height = (resizeImage?roi.height*2:roi.height)/2; #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 0); #endif // extract the patch for learning purpose // get non compressed descriptors for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){ if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 1); #endif //get non-compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){ if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate(); } if(features_npca.size()>0)merge(features_npca,X[1]); #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 2); #endif // get compressed descriptors for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){ if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 3); #endif //get compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){ if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate(); } if(features_pca.size()>0)merge(features_pca,X[0]); #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 4); #endif //update the training data if(frame==0){ Z[0] = X[0].clone(); Z[1] = X[1].clone(); }else{ Z[0]=(1.0-params.interp_factor)*Z[0]+params.interp_factor*X[0]; Z[1]=(1.0-params.interp_factor)*Z[1]+params.interp_factor*X[1]; } #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 5); #endif #if TIME updateTime(startPatches, 1); double startCompression = CycleTimer::currentSeconds(); #endif #if TIME == 2 double startCompressionDetail = startCompression; #endif if(params.desc_pca !=0 || use_custom_extractor_pca){ // initialize the vector of Mat variables if(frame==0){ layers_pca_data.resize(Z[0].channels()); average_data.resize(Z[0].channels()); } // feature compression updateProjectionMatrix(Z[0],old_cov_mtx,proj_mtx,params.pca_learning_rate,params.compressed_size,layers_pca_data,average_data,data_pca, new_covar,w_data,u_data,vt_data); #if TIME == 2 updateTimeDetail(&startCompressionDetail, 2, 0); #endif compress(proj_mtx,X[0],X[0],data_temp,compress_data); #if TIME == 2 updateTimeDetail(&startCompressionDetail, 2, 1); #endif } // merge all features if(features_npca.size()==0) x = X[0]; else if(features_pca.size()==0) x = X[1]; else merge(X,2,x); #if TIME == 2 updateTimeDetail(&startCompressionDetail, 2, 2); #endif #if TIME updateTime(startCompression, 2); double startLeastSquares = CycleTimer::currentSeconds(); #endif #if TIME == 2 double startLeastSquaresDetail = startLeastSquares; #endif // initialize some required Mat variables if(frame==0){ layers.resize(x.channels()); vxf.resize(x.channels()); vyf.resize(x.channels()); vxyf.resize(vyf.size()); new_alphaf=Mat_<Vec2d >(yf.rows, yf.cols); } #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 0); #endif // Kernel Regularized Least-Squares, calculate alphas denseGaussKernel(params.sigma,x,x,k,layers,vxf,vyf,vxyf,xy_data,xyf_data); #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 1); #endif // compute the fourier transform of the kernel and add a small value fft2(k,kf); #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 2); #endif kf_lambda=kf+params.lambda; #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 3); #endif double den; if(params.split_coeff){ mulSpectrums(yf,kf,new_alphaf,0); mulSpectrums(kf,kf_lambda,new_alphaf_den,0); }else{ for(int i=0;i<yf.rows;i++){ for(int j=0;j<yf.cols;j++){ den = 1.0/(kf_lambda.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+kf_lambda.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1]); new_alphaf.at<Vec2d>(i,j)[0]= (yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1])*den; new_alphaf.at<Vec2d>(i,j)[1]= (yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[0]-yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[1])*den; } } } #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 4); #endif // update the RLS model if(frame==0){ alphaf=new_alphaf.clone(); if(params.split_coeff)alphaf_den=new_alphaf_den.clone(); }else{ alphaf=(1.0-params.interp_factor)*alphaf+params.interp_factor*new_alphaf; if(params.split_coeff)alphaf_den=(1.0-params.interp_factor)*alphaf_den+params.interp_factor*new_alphaf_den; } #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 5); #endif #if TIME updateTime(startLeastSquares, 3); updateTime(startUpdate, 4); printAverageTimes(); #endif frame++; return true; } /*------------------------------------- | implementation of the KCF functions |-------------------------------------*/ /* * hann window filter */ void TackerKCFImplParallel::createHanningWindow(OutputArray dest, const cv::Size winSize, const int type) const { CV_Assert( type == CV_32FC1 || type == CV_64FC1 ); dest.create(winSize, type); Mat dst = dest.getMat(); int rows = dst.rows, cols = dst.cols; AutoBuffer<double> _wc(cols); double * const wc = (double *)_wc; double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0f * CV_PI / (double)(rows - 1); for(int j = 0; j < cols; j++) wc[j] = 0.5 * (1.0 - cos(coeff0 * j)); if(dst.depth() == CV_32F){ for(int i = 0; i < rows; i++){ float* dstData = dst.ptr<float>(i); double wr = 0.5 * (1.0 - cos(coeff1 * i)); for(int j = 0; j < cols; j++) dstData[j] = (float)(wr * wc[j]); } }else{ for(int i = 0; i < rows; i++){ double* dstData = dst.ptr<double>(i); double wr = 0.5 * (1.0 - cos(coeff1 * i)); for(int j = 0; j < cols; j++) dstData[j] = wr * wc[j]; } } // perform batch sqrt for SSE performance gains //cv::sqrt(dst, dst); //matlab do not use the square rooted version } /* * simplification of fourier transform function in opencv */ void inline TackerKCFImplParallel::fft2(const Mat src, Mat & dest) const { dft(src,dest,DFT_COMPLEX_OUTPUT); } void inline TackerKCFImplParallel::fft2(const Mat src, std::vector<Mat> & dest, std::vector<Mat> & layers_data) const { split(src, layers_data); for(int i=0;i<src.channels();i++){ dft(layers_data[i],dest[i],DFT_COMPLEX_OUTPUT); } } void inline TackerKCFImplParallel::cudafft2(int num_channels, std::vector<cuda::GpuMat> & dest, std::vector<cuda::GpuMat> & layers_data) { for (int i = 0; i < num_channels; i++) { cuda::dft(layers_data[i], dest[i], layers_data[i].size(), DFT_DOUBLE); } } /* * simplification of inverse fourier transform function in opencv */ void inline TackerKCFImplParallel::ifft2(const Mat src, Mat & dest) const { idft(src,dest,DFT_SCALE+DFT_REAL_OUTPUT); } void inline TackerKCFImplParallel::cudaifft2(const cuda::GpuMat src, cuda::GpuMat & dest) { cuda::GpuMat src_cce; src_cce = src; // The size correection is necessary to account for the CCE format cv::Size dest_size((src.size().width -1)*2,src.size().height); cuda::dft(src_cce, dest, dest_size, (DFT_SCALE + DFT_REAL_OUTPUT) | DFT_INVERSE | DFT_DOUBLE); } // Expand half a matrix by inferring the complex conjugates of the cols to // complete the second half void inline TackerKCFImplParallel::cce2full(const Mat src, Mat & dest) { // Assume that the original size of the matrix was divisible by 2 Mat result(cv::Size((src.size().width-1)*2,src.size().height),src.type()); for (int j=0; j < (src.size().width-1)*2;j++) { for (int i = 0; i < src.size().height;i++) { if (j <src.size().width-1) { result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,j)[0]; result.at<Vec2d>(i,j)[1] = src.at<Vec2d>(i,j)[1]; } else { // Complex conjugate result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,2*(src.size().width - 1) - j)[0]; result.at<Vec2d>(i,j)[1] = - src.at<Vec2d>(i,2*(src.size().width -1) - j)[1]; } } } dest = result; } void inline TackerKCFImplParallel::full2cce(const Mat src, Mat & dest) { //We take the first half of the matrix cv::Rect roi(0, 0, src.size().width/2+1, src.size().height); dest = src(roi); } /* * Point-wise multiplication of two Multichannel Mat data */ void inline TackerKCFImplParallel::pixelWiseMult(const std::vector<cuda::GpuMat> src1, const std::vector<cuda::GpuMat> src2, std::vector<cuda::GpuMat> & dest, const int flags, const bool conjB) const { for(unsigned i=0;i<src1.size();i++){ cv::cuda::mulSpectrums(src1[i], src2[i], dest[i],flags,conjB); } } /* * Combines all channels in a multi-channels Mat data into a single channel */ void inline TackerKCFImplParallel::sumChannels(std::vector<cuda::GpuMat> src, cuda::GpuMat & dest) const { src[0].copyTo(dest); for(unsigned i=1;i<src.size();i++){ cuda::add(src[i],dest,dest); } } //void inline /* * obtains the projection matrix using PCA */ void inline TackerKCFImplParallel::updateProjectionMatrix(const Mat src, Mat & old_cov,Mat & proj_matrix, double pca_rate, int compressed_sz, std::vector<Mat> & layers_pca,std::vector<Scalar> & average, Mat pca_data, Mat new_cov, Mat w, Mat u, Mat vt) { GpuMat new_cov_gpu; double start = CycleTimer::currentSeconds(); CV_Assert(compressed_sz<=src.channels()); split(src,layers_pca); for (int i=0;i<src.channels();i++){ average[i]=mean(layers_pca[i]); layers_pca[i]-=average[i]; } // calc covariance matrix merge(layers_pca,pca_data); pca_data=pca_data.reshape(1,src.rows*src.cols); pca_data_gpu.upload(pca_data); GpuMat src3; cuda::gemm(pca_data_gpu, pca_data_gpu, 1.0/(double)(src.rows*src.cols-1), src3, 0, new_cov_gpu, GEMM_1_T); new_cov_gpu.download(new_cov); if(old_cov.rows==0)old_cov=new_cov.clone(); // calc PCA SVD::compute((1.0-pca_rate)*old_cov+pca_rate*new_cov, w, u, vt); // extract the projection matrix proj_matrix=u(Rect(0,0,compressed_sz,src.channels())).clone(); Mat proj_vars=Mat::eye(compressed_sz,compressed_sz,proj_matrix.type()); for(int i=0;i<compressed_sz;i++){ proj_vars.at<double>(i,i)=w.at<double>(i); } // update the covariance matrix old_cov=(1.0-pca_rate)*old_cov+pca_rate*proj_matrix*proj_vars*proj_matrix.t(); } /* * compress the features */ void inline TackerKCFImplParallel::compress(const Mat proj_matrix, const Mat src, Mat & dest, Mat & data, Mat & compressed) const { data=src.reshape(1,src.rows*src.cols); compressed=data*proj_matrix; dest=compressed.reshape(proj_matrix.cols,src.rows).clone(); } /* * obtain the patch and apply hann window filter to it */ bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, Mat& patch, TrackerKCF::MODE desc) { Rect region=_roi; // return false if roi is outside the image if((_roi.x+_roi.width<0) ||(_roi.y+_roi.height<0) ||(_roi.x>=img.cols) ||(_roi.y>=img.rows) )return false; // extract patch inside the image if(_roi.x<0){region.x=0;region.width+=_roi.x;} if(_roi.y<0){region.y=0;region.height+=_roi.y;} if(_roi.x+_roi.width>img.cols)region.width=img.cols-_roi.x; if(_roi.y+_roi.height>img.rows)region.height=img.rows-_roi.y; if(region.width>img.cols)region.width=img.cols; if(region.height>img.rows)region.height=img.rows; patch=img(region).clone(); // add some padding to compensate when the patch is outside image border int addTop,addBottom, addLeft, addRight; addTop=region.y-_roi.y; addBottom=(_roi.height+_roi.y>img.rows?_roi.height+_roi.y-img.rows:0); addLeft=region.x-_roi.x; addRight=(_roi.width+_roi.x>img.cols?_roi.width+_roi.x-img.cols:0); copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE); if(patch.rows==0 || patch.cols==0)return false; // extract the desired descriptors switch(desc){ case CN: CV_Assert(img.channels() == 3); extractCN(patch,feat); //feat=feat.mul(hann_cn); // hann window filter break; default: // GRAY if(img.channels()>1) cvtColor(patch,feat, CV_BGR2GRAY); else feat=patch; feat.convertTo(feat,CV_64F); feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5 feat=feat.mul(hann); // hann window filter break; } return true; } /* * get feature using external function */ bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, void (*f)(const Mat, const Rect, Mat& )) const{ // return false if roi is outside the image if((_roi.x+_roi.width<0) ||(_roi.y+_roi.height<0) ||(_roi.x>=img.cols) ||(_roi.y>=img.rows) )return false; f(img, _roi, feat); if(_roi.width != feat.cols || _roi.height != feat.rows){ printf("error in customized function of features extractor!\n"); printf("Rules: roi.width==feat.cols && roi.height = feat.rows \n"); } Mat hann_win; std::vector<Mat> _layers; for(int i=0;i<feat.channels();i++) _layers.push_back(hann); merge(_layers, hann_win); feat=feat.mul(hann_win); // hann window filter return true; } __global__ void extractIndexKernel(const cuda::PtrStepSz<uchar3> input, cuda::PtrStep<ushort> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= 0 && x < input.cols && y >= 0 && y < input.rows) { uchar3 pixel = input(y,x); output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8)); } } __global__ void extractCNKernel(const cuda::PtrStepSz<ushort> input, cuda::PtrStep<double[10]> output, const double *ColorNames) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (x >= 0 && x < input.cols && y >= 0 && y < input.rows && k >= 0 && k < 10) { short index = input(y,x); output.ptr(y)[x][k] = ColorNames[10*index + k]; //output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8)); } } /* Convert BGR to ColorNames */ void TackerKCFImplParallel::extractCN(Mat patch_data, Mat & cnFeatures) { if(cnFeatures.type() != CV_64FC(10)) { cnFeatures = Mat::zeros(patch_data.rows,patch_data.cols,CV_64FC(10)); } patch_data_gpu.upload(patch_data); dim3 cthreads2d(32, 32); dim3 cblocks2d( static_cast<int>(::ceil(patch_data_gpu.size().width / static_cast<double>(cthreads2d.x))), static_cast<int>(::ceil(patch_data_gpu.size().height / static_cast<double>(cthreads2d.y)))); hipLaunchKernelGGL(( extractIndexKernel), dim3(cblocks2d), dim3(cthreads2d), 0, 0, patch_data_gpu, indexes_gpu); cudaSafeCall(hipGetLastError()); cuda::GpuMat cnFeatures_gpu; cuda::createContinuous(patch_data.size(), CV_64FC(10), cnFeatures_gpu); dim3 cthreads3d(32, 32, 1); dim3 cblocks3d( static_cast<int>(::ceil(patch_data_gpu.size().width / static_cast<double>(cthreads3d.x))), static_cast<int>(::ceil(patch_data_gpu.size().height / static_cast<double>(cthreads3d.y))), static_cast<int>(::ceil(10 / static_cast<double>(cthreads3d.z)))); hipLaunchKernelGGL(( extractCNKernel), dim3(cblocks3d), dim3(cthreads3d), 0, 0, indexes_gpu, cnFeatures_gpu, ColorNames_gpu); cudaSafeCall(hipGetLastError()); cuda::multiply(cnFeatures_gpu, hann_cn_gpu, cnFeatures_gpu); cnFeatures_gpu.download(cnFeatures); } /* * dense gauss kernel function */ void TackerKCFImplParallel::denseGaussKernel(const double sigma, const Mat x_data, const Mat y_data, Mat & k_data, std::vector<Mat> & layers_data,std::vector<Mat> & xf_data,std::vector<Mat> & yf_data, std::vector<Mat> xyf_v, Mat xy, Mat xyf ) { // First we download all the data onto the Gpu int num_channels = x_data.channels(); double normX = norm(x_data, NORM_L2SQR); double normY = norm(y_data, NORM_L2SQR); cv::cuda::Stream stream; split(x_data, layers_data); for (int i = 0; i < x_data.channels(); i++){ layers_data_gpu[i].upload(layers_data[i], stream); } stream.waitForCompletion(); cudafft2(x_data.channels(),xf_data_gpu,layers_data_gpu); split(y_data, layers_data); for (int i = 0; i < x_data.channels(); i++){ layers_data_gpu[i].upload(layers_data[i], stream); } stream.waitForCompletion(); cudafft2(y_data.channels(),yf_data_gpu,layers_data_gpu); pixelWiseMult(xf_data_gpu,yf_data_gpu,xyf_v_gpu,0,true); sumChannels(xyf_v_gpu,xyf_c_gpu); cudaifft2(xyf_c_gpu,xyf_r_gpu); xyf_r_gpu.download(xyf); if(params.wrap_kernel){ shiftRows(xyf, x_data.rows/2); shiftCols(xyf, x_data.cols/2); } //(xx + yy - 2 * xy) / numel(x) xy=(normX+normY-2*xyf)/(x_data.rows*x_data.cols*x_data.channels()); // TODO: check wether we really need thresholding or not //threshold(xy,xy,0.0,0.0,THRESH_TOZERO);//max(0, (xx + yy - 2 * xy) / numel(x)) for(int i=0;i<xy.rows;i++){ for(int j=0;j<xy.cols;j++){ if(xy.at<double>(i,j)<0.0)xy.at<double>(i,j)=0.0; } } double sig=-1.0/(sigma*sigma); xy=sig*xy; exp(xy, k_data); } /* CIRCULAR SHIFT Function * http://stackoverflow.com/questions/10420454/shift-like-matlab-function-rows-or-columns-of-a-matrix-in-opencv */ // circular shift one row from up to down void TackerKCFImplParallel::shiftRows(Mat& mat) const { Mat temp; Mat m; int _k = (mat.rows-1); mat.row(_k).copyTo(temp); for(; _k > 0 ; _k-- ) { m = mat.row(_k); mat.row(_k-1).copyTo(m); } m = mat.row(0); temp.copyTo(m); } // circular shift n rows from up to down if n > 0, -n rows from down to up if n < 0 void TackerKCFImplParallel::shiftRows(Mat& mat, int n) const { if( n < 0 ) { n = -n; flip(mat,mat,0); for(int _k=0; _k < n;_k++) { shiftRows(mat); } flip(mat,mat,0); }else{ for(int _k=0; _k < n;_k++) { shiftRows(mat); } } } //circular shift n columns from left to right if n > 0, -n columns from right to left if n < 0 void TackerKCFImplParallel::shiftCols(Mat& mat, int n) const { if(n < 0){ n = -n; flip(mat,mat,1); transpose(mat,mat); shiftRows(mat,n); transpose(mat,mat); flip(mat,mat,1); }else{ transpose(mat,mat); shiftRows(mat,n); transpose(mat,mat); } } /* * calculate the detection response */ void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat kf_data, Mat & response_data, Mat & spec_data) { //alpha f--> 2channels ; k --> 1 channel; mulSpectrums(alphaf_data,kf_data,spec_data,0,false); ifft2(spec_data,response_data); } /* * calculate the detection response for splitted form */ void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat _alphaf_den, const Mat kf_data, Mat & response_data, Mat & spec_data, Mat & spec2_data) { mulSpectrums(alphaf_data,kf_data,spec_data,0,false); //z=(a+bi)/(c+di)=[(ac+bd)+i(bc-ad)]/(c^2+d^2) double den; for(int i=0;i<kf_data.rows;i++){ for(int j=0;j<kf_data.cols;j++){ den=1.0/(_alphaf_den.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+_alphaf_den.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1]); spec2_data.at<Vec2d>(i,j)[0]= (spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1])*den; spec2_data.at<Vec2d>(i,j)[1]= (spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[0]-spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[1])*den; } } ifft2(spec2_data,response_data); } void TackerKCFImplParallel::setFeatureExtractor(void (*f)(const Mat, const Rect, Mat&), bool pca_func){ if(pca_func){ extractor_pca.push_back(f); use_custom_extractor_pca = true; }else{ extractor_npca.push_back(f); use_custom_extractor_npca = true; } } /*----------------------------------------------------------------------*/ }
0665d0d152d144c9a775e5ee0c686ac5a8a43636.cu
#include "trackerKCFparallel.hpp" #include <opencv2/cudaarithm.hpp> #include "dft.cu" #include "mulspectrums.cu" #define returnFromUpdate() {fprintf(stderr, "Error in %s line %d while updating frame %d\n", __FILE__, __LINE__, frame);} /*--------------------------- | TrackerKCFModel |---------------------------*/ namespace cv{ /** * \brief Implementation of TrackerModel for MIL algorithm */ class TrackerKCFModel : public TrackerModel{ public: TrackerKCFModel(TrackerKCF::Params /*params*/){} ~TrackerKCFModel(){} protected: void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){} void modelUpdateImpl(){} }; } /* namespace cv */ namespace helper { void MatType( Mat inputMat ) { int inttype = inputMat.type(); std::string r, a; uchar depth = inttype & CV_MAT_DEPTH_MASK; uchar chans = 1 + (inttype >> CV_CN_SHIFT); switch ( depth ) { case CV_8U: r = "8U"; a = "Mat.at<uchar>(y,x)"; break; case CV_8S: r = "8S"; a = "Mat.at<schar>(y,x)"; break; case CV_16U: r = "16U"; a = "Mat.at<ushort>(y,x)"; break; case CV_16S: r = "16S"; a = "Mat.at<short>(y,x)"; break; case CV_32S: r = "32S"; a = "Mat.at<int>(y,x)"; break; case CV_32F: r = "32F"; a = "Mat.at<float>(y,x)"; break; case CV_64F: r = "64F"; a = "Mat.at<double>(y,x)"; break; case CV_32FC2: r = "32FC2"; a = "Mat.at<complex float>(y,x)"; break; case CV_64FC2: r = "64FC2"; a = "Mat.at<complex double>(y,x)"; break; default: r = "User"; a = "Mat.at<UKNOWN>(y,x)"; break; } r += "C"; r += (chans+'0'); std::cout << "Mat is of type " << r << " and should be accessed with " << a << std::endl; } } namespace cv { /* * Constructor */ TackerKCFImplParallel::TackerKCFImplParallel( const TrackerKCF::Params &parameters ) : params( parameters ) { isInit = false; resizeImage = false; use_custom_extractor_pca = false; use_custom_extractor_npca = false; #if TIME total_lines = num_steps; for (int i = 0; i < num_steps; i++) { cumulated_times[i] = 0; } #if TIME == 2 for (int i = 0; i < num_steps - 1; i++) { total_lines += num_steps_details[i]; for (int j = 0; j < max_num_details; j++) { cumulated_details_times[i][j] = 0; } } #endif #endif } void TackerKCFImplParallel::read( const cv::FileNode& fn ){ params.read( fn ); } void TackerKCFImplParallel::write( cv::FileStorage& fs ) const { params.write( fs ); } /* * Initialization: * - creating hann window filter * - ROI padding * - creating a gaussian response for the training ground-truth * - perform FFT to the gaussian response */ bool TackerKCFImplParallel::initImpl( const Mat& image, const Rect2d& boundingBox ){ #if TIME double startInit = CycleTimer::currentSeconds(); #endif frame=0; roi = boundingBox; //calclulate output sigma output_sigma=sqrt(roi.width*roi.height)*params.output_sigma_factor; output_sigma=-0.5/(output_sigma*output_sigma); //resize the ROI whenever needed if(params.resize && roi.width*roi.height>params.max_patch_size){ resizeImage=true; roi.x/=2.0; roi.y/=2.0; roi.width/=2.0; roi.height/=2.0; } // add padding to the roi roi.x-=roi.width/2; roi.y-=roi.height/2; roi.width*=2; roi.height*=2; // initialize the hann window filter createHanningWindow(hann, roi.size(), CV_64F); // hann window filter for CN feature Mat _layer[] = {hann, hann, hann, hann, hann, hann, hann, hann, hann, hann}; merge(_layer, 10, hann_cn); // create gaussian response y=Mat::zeros((int)roi.height,(int)roi.width,CV_64F); for(unsigned i=0;i<roi.height;i++){ for(unsigned j=0;j<roi.width;j++){ y.at<double>(i,j)=(i-roi.height/2+1)*(i-roi.height/2+1)+(j-roi.width/2+1)*(j-roi.width/2+1); } } y*=(double)output_sigma; cv::exp(y,y); // perform fourier transfor to the gaussian response fft2(y,yf); model=Ptr<TrackerKCFModel>(new TrackerKCFModel(params)); // record the non-compressed descriptors if((params.desc_npca & GRAY) == GRAY)descriptors_npca.push_back(GRAY); if((params.desc_npca & CN) == CN)descriptors_npca.push_back(CN); if(use_custom_extractor_npca)descriptors_npca.push_back(CUSTOM); features_npca.resize(descriptors_npca.size()); // record the compressed descriptors if((params.desc_pca & GRAY) == GRAY)descriptors_pca.push_back(GRAY); if((params.desc_pca & CN) == CN)descriptors_pca.push_back(CN); if(use_custom_extractor_pca)descriptors_pca.push_back(CUSTOM); features_pca.resize(descriptors_pca.size()); // accept only the available descriptor modes CV_Assert( (params.desc_pca & GRAY) == GRAY || (params.desc_npca & GRAY) == GRAY || (params.desc_pca & CN) == CN || (params.desc_npca & CN) == CN || use_custom_extractor_pca || use_custom_extractor_npca ); // Initialize ExtractCN GpuMats cuda::createContinuous(roi.size(), CV_8UC3, patch_data_gpu); cuda::createContinuous(roi.size(), CV_16U, indexes_gpu); hann_cn_gpu.upload(hann_cn); // Initialize pca_data_gpu GpuMat cuda::createContinuous(roi.size(), CV_64F, pca_data_gpu); // Initialize fft2 GpuMats Size complex_size(roi.size().width/2+1, roi.size().height); int num_channels = image.channels(); cuda::createContinuous(complex_size, CV_64FC2, xyf_c_gpu); cuda::createContinuous(roi.size(), CV_64F, xyf_r_gpu); xf_data_gpu.resize(num_channels); yf_data_gpu.resize(num_channels); layers_data_gpu.resize(num_channels); xyf_v_gpu.resize(num_channels); for (int i = 0; i < num_channels; i++){ cuda::createContinuous(roi.size(), CV_64F, layers_data_gpu[i]); cuda::createContinuous(complex_size, CV_64FC2, xf_data_gpu[i]); cuda::createContinuous(complex_size, CV_64FC2, yf_data_gpu[i]); } // Initialize ColorNames size_t ColorNames_size = 32768 * 10 * sizeof(double); //2^15 * 10 cudaSafeCall(cudaMalloc((void**) &ColorNames_gpu, ColorNames_size)); cudaSafeCall(cudaMemcpy(ColorNames_gpu, ColorNames, ColorNames_size, cudaMemcpyHostToDevice)); #if TIME printInitializationTime(startInit); #endif // TODO: return true only if roi inside the image return true; } /* * Main part of the KCF algorithm */ bool TackerKCFImplParallel::updateImpl( const Mat& image, Rect2d& boundingBox ){ #if TIME double startUpdate = CycleTimer::currentSeconds(); #endif double minVal, maxVal; // min-max response Point minLoc,maxLoc; // min-max location Mat img=image.clone(); // check the channels of the input image, grayscale is preferred CV_Assert(img.channels() == 1 || img.channels() == 3); // resize the image whenever needed if(resizeImage)resize(img,img,Size(img.cols/2,img.rows/2)); #if TIME double startDetection = CycleTimer::currentSeconds(); #endif // detection part if(frame>0){ #if TIME == 2 double startDetectionDetail = CycleTimer::currentSeconds(); #endif // extract and pre-process the patch // get non compressed descriptors for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){ if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 0); #endif //get non-compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){ if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate(); } if(features_npca.size()>0)merge(features_npca,X[1]); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 1); #endif // get compressed descriptors for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){ if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 2); #endif //get compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){ if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate(); } if(features_pca.size()>0)merge(features_pca,X[0]); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 3); #endif //compress the features and the KRSL model if(params.desc_pca !=0){ compress(proj_mtx,X[0],X[0],data_temp,compress_data); compress(proj_mtx,Z[0],Zc[0],data_temp,compress_data); } // copy the compressed KRLS model Zc[1] = Z[1]; #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 4); #endif // merge all features if(features_npca.size()==0){ x = X[0]; z = Zc[0]; }else if(features_pca.size()==0){ x = X[1]; z = Z[1]; }else{ merge(X,2,x); merge(Zc,2,z); } #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 5); #endif //compute the gaussian kernel denseGaussKernel(params.sigma,x,z,k,layers,vxf,vyf,vxyf,xy_data,xyf_data); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 6); #endif // compute the fourier transform of the kernel fft2(k,kf); if(frame==1)spec2=Mat_<Vec2d >(kf.rows, kf.cols); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 7); #endif // calculate filter response if(params.split_coeff) calcResponse(alphaf,alphaf_den,kf,response, spec, spec2); else calcResponse(alphaf,kf,response, spec); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 8); #endif // extract the maximum response minMaxLoc( response, &minVal, &maxVal, &minLoc, &maxLoc ); roi.x+=(maxLoc.x-roi.width/2+1); roi.y+=(maxLoc.y-roi.height/2+1); #if TIME == 2 updateTimeDetail(&startDetectionDetail, 0, 9); #endif } #if TIME updateTime(startDetection, 0); double startPatches = CycleTimer::currentSeconds(); #endif #if TIME == 2 double startPatchesDetail = startPatches; #endif // update the bounding box boundingBox.x=(resizeImage?roi.x*2:roi.x)+(resizeImage?roi.width*2:roi.width)/4; boundingBox.y=(resizeImage?roi.y*2:roi.y)+(resizeImage?roi.height*2:roi.height)/4; boundingBox.width = (resizeImage?roi.width*2:roi.width)/2; boundingBox.height = (resizeImage?roi.height*2:roi.height)/2; #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 0); #endif // extract the patch for learning purpose // get non compressed descriptors for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){ if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 1); #endif //get non-compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){ if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate(); } if(features_npca.size()>0)merge(features_npca,X[1]); #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 2); #endif // get compressed descriptors for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){ if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate(); } #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 3); #endif //get compressed custom descriptors for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){ if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate(); } if(features_pca.size()>0)merge(features_pca,X[0]); #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 4); #endif //update the training data if(frame==0){ Z[0] = X[0].clone(); Z[1] = X[1].clone(); }else{ Z[0]=(1.0-params.interp_factor)*Z[0]+params.interp_factor*X[0]; Z[1]=(1.0-params.interp_factor)*Z[1]+params.interp_factor*X[1]; } #if TIME == 2 updateTimeDetail(&startPatchesDetail, 1, 5); #endif #if TIME updateTime(startPatches, 1); double startCompression = CycleTimer::currentSeconds(); #endif #if TIME == 2 double startCompressionDetail = startCompression; #endif if(params.desc_pca !=0 || use_custom_extractor_pca){ // initialize the vector of Mat variables if(frame==0){ layers_pca_data.resize(Z[0].channels()); average_data.resize(Z[0].channels()); } // feature compression updateProjectionMatrix(Z[0],old_cov_mtx,proj_mtx,params.pca_learning_rate,params.compressed_size,layers_pca_data,average_data,data_pca, new_covar,w_data,u_data,vt_data); #if TIME == 2 updateTimeDetail(&startCompressionDetail, 2, 0); #endif compress(proj_mtx,X[0],X[0],data_temp,compress_data); #if TIME == 2 updateTimeDetail(&startCompressionDetail, 2, 1); #endif } // merge all features if(features_npca.size()==0) x = X[0]; else if(features_pca.size()==0) x = X[1]; else merge(X,2,x); #if TIME == 2 updateTimeDetail(&startCompressionDetail, 2, 2); #endif #if TIME updateTime(startCompression, 2); double startLeastSquares = CycleTimer::currentSeconds(); #endif #if TIME == 2 double startLeastSquaresDetail = startLeastSquares; #endif // initialize some required Mat variables if(frame==0){ layers.resize(x.channels()); vxf.resize(x.channels()); vyf.resize(x.channels()); vxyf.resize(vyf.size()); new_alphaf=Mat_<Vec2d >(yf.rows, yf.cols); } #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 0); #endif // Kernel Regularized Least-Squares, calculate alphas denseGaussKernel(params.sigma,x,x,k,layers,vxf,vyf,vxyf,xy_data,xyf_data); #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 1); #endif // compute the fourier transform of the kernel and add a small value fft2(k,kf); #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 2); #endif kf_lambda=kf+params.lambda; #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 3); #endif double den; if(params.split_coeff){ mulSpectrums(yf,kf,new_alphaf,0); mulSpectrums(kf,kf_lambda,new_alphaf_den,0); }else{ for(int i=0;i<yf.rows;i++){ for(int j=0;j<yf.cols;j++){ den = 1.0/(kf_lambda.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+kf_lambda.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1]); new_alphaf.at<Vec2d>(i,j)[0]= (yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1])*den; new_alphaf.at<Vec2d>(i,j)[1]= (yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[0]-yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[1])*den; } } } #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 4); #endif // update the RLS model if(frame==0){ alphaf=new_alphaf.clone(); if(params.split_coeff)alphaf_den=new_alphaf_den.clone(); }else{ alphaf=(1.0-params.interp_factor)*alphaf+params.interp_factor*new_alphaf; if(params.split_coeff)alphaf_den=(1.0-params.interp_factor)*alphaf_den+params.interp_factor*new_alphaf_den; } #if TIME == 2 updateTimeDetail(&startLeastSquaresDetail, 3, 5); #endif #if TIME updateTime(startLeastSquares, 3); updateTime(startUpdate, 4); printAverageTimes(); #endif frame++; return true; } /*------------------------------------- | implementation of the KCF functions |-------------------------------------*/ /* * hann window filter */ void TackerKCFImplParallel::createHanningWindow(OutputArray dest, const cv::Size winSize, const int type) const { CV_Assert( type == CV_32FC1 || type == CV_64FC1 ); dest.create(winSize, type); Mat dst = dest.getMat(); int rows = dst.rows, cols = dst.cols; AutoBuffer<double> _wc(cols); double * const wc = (double *)_wc; double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0f * CV_PI / (double)(rows - 1); for(int j = 0; j < cols; j++) wc[j] = 0.5 * (1.0 - cos(coeff0 * j)); if(dst.depth() == CV_32F){ for(int i = 0; i < rows; i++){ float* dstData = dst.ptr<float>(i); double wr = 0.5 * (1.0 - cos(coeff1 * i)); for(int j = 0; j < cols; j++) dstData[j] = (float)(wr * wc[j]); } }else{ for(int i = 0; i < rows; i++){ double* dstData = dst.ptr<double>(i); double wr = 0.5 * (1.0 - cos(coeff1 * i)); for(int j = 0; j < cols; j++) dstData[j] = wr * wc[j]; } } // perform batch sqrt for SSE performance gains //cv::sqrt(dst, dst); //matlab do not use the square rooted version } /* * simplification of fourier transform function in opencv */ void inline TackerKCFImplParallel::fft2(const Mat src, Mat & dest) const { dft(src,dest,DFT_COMPLEX_OUTPUT); } void inline TackerKCFImplParallel::fft2(const Mat src, std::vector<Mat> & dest, std::vector<Mat> & layers_data) const { split(src, layers_data); for(int i=0;i<src.channels();i++){ dft(layers_data[i],dest[i],DFT_COMPLEX_OUTPUT); } } void inline TackerKCFImplParallel::cudafft2(int num_channels, std::vector<cuda::GpuMat> & dest, std::vector<cuda::GpuMat> & layers_data) { for (int i = 0; i < num_channels; i++) { cuda::dft(layers_data[i], dest[i], layers_data[i].size(), DFT_DOUBLE); } } /* * simplification of inverse fourier transform function in opencv */ void inline TackerKCFImplParallel::ifft2(const Mat src, Mat & dest) const { idft(src,dest,DFT_SCALE+DFT_REAL_OUTPUT); } void inline TackerKCFImplParallel::cudaifft2(const cuda::GpuMat src, cuda::GpuMat & dest) { cuda::GpuMat src_cce; src_cce = src; // The size correection is necessary to account for the CCE format cv::Size dest_size((src.size().width -1)*2,src.size().height); cuda::dft(src_cce, dest, dest_size, (DFT_SCALE + DFT_REAL_OUTPUT) | DFT_INVERSE | DFT_DOUBLE); } // Expand half a matrix by inferring the complex conjugates of the cols to // complete the second half void inline TackerKCFImplParallel::cce2full(const Mat src, Mat & dest) { // Assume that the original size of the matrix was divisible by 2 Mat result(cv::Size((src.size().width-1)*2,src.size().height),src.type()); for (int j=0; j < (src.size().width-1)*2;j++) { for (int i = 0; i < src.size().height;i++) { if (j <src.size().width-1) { result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,j)[0]; result.at<Vec2d>(i,j)[1] = src.at<Vec2d>(i,j)[1]; } else { // Complex conjugate result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,2*(src.size().width - 1) - j)[0]; result.at<Vec2d>(i,j)[1] = - src.at<Vec2d>(i,2*(src.size().width -1) - j)[1]; } } } dest = result; } void inline TackerKCFImplParallel::full2cce(const Mat src, Mat & dest) { //We take the first half of the matrix cv::Rect roi(0, 0, src.size().width/2+1, src.size().height); dest = src(roi); } /* * Point-wise multiplication of two Multichannel Mat data */ void inline TackerKCFImplParallel::pixelWiseMult(const std::vector<cuda::GpuMat> src1, const std::vector<cuda::GpuMat> src2, std::vector<cuda::GpuMat> & dest, const int flags, const bool conjB) const { for(unsigned i=0;i<src1.size();i++){ cv::cuda::mulSpectrums(src1[i], src2[i], dest[i],flags,conjB); } } /* * Combines all channels in a multi-channels Mat data into a single channel */ void inline TackerKCFImplParallel::sumChannels(std::vector<cuda::GpuMat> src, cuda::GpuMat & dest) const { src[0].copyTo(dest); for(unsigned i=1;i<src.size();i++){ cuda::add(src[i],dest,dest); } } //void inline /* * obtains the projection matrix using PCA */ void inline TackerKCFImplParallel::updateProjectionMatrix(const Mat src, Mat & old_cov,Mat & proj_matrix, double pca_rate, int compressed_sz, std::vector<Mat> & layers_pca,std::vector<Scalar> & average, Mat pca_data, Mat new_cov, Mat w, Mat u, Mat vt) { GpuMat new_cov_gpu; double start = CycleTimer::currentSeconds(); CV_Assert(compressed_sz<=src.channels()); split(src,layers_pca); for (int i=0;i<src.channels();i++){ average[i]=mean(layers_pca[i]); layers_pca[i]-=average[i]; } // calc covariance matrix merge(layers_pca,pca_data); pca_data=pca_data.reshape(1,src.rows*src.cols); pca_data_gpu.upload(pca_data); GpuMat src3; cuda::gemm(pca_data_gpu, pca_data_gpu, 1.0/(double)(src.rows*src.cols-1), src3, 0, new_cov_gpu, GEMM_1_T); new_cov_gpu.download(new_cov); if(old_cov.rows==0)old_cov=new_cov.clone(); // calc PCA SVD::compute((1.0-pca_rate)*old_cov+pca_rate*new_cov, w, u, vt); // extract the projection matrix proj_matrix=u(Rect(0,0,compressed_sz,src.channels())).clone(); Mat proj_vars=Mat::eye(compressed_sz,compressed_sz,proj_matrix.type()); for(int i=0;i<compressed_sz;i++){ proj_vars.at<double>(i,i)=w.at<double>(i); } // update the covariance matrix old_cov=(1.0-pca_rate)*old_cov+pca_rate*proj_matrix*proj_vars*proj_matrix.t(); } /* * compress the features */ void inline TackerKCFImplParallel::compress(const Mat proj_matrix, const Mat src, Mat & dest, Mat & data, Mat & compressed) const { data=src.reshape(1,src.rows*src.cols); compressed=data*proj_matrix; dest=compressed.reshape(proj_matrix.cols,src.rows).clone(); } /* * obtain the patch and apply hann window filter to it */ bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, Mat& patch, TrackerKCF::MODE desc) { Rect region=_roi; // return false if roi is outside the image if((_roi.x+_roi.width<0) ||(_roi.y+_roi.height<0) ||(_roi.x>=img.cols) ||(_roi.y>=img.rows) )return false; // extract patch inside the image if(_roi.x<0){region.x=0;region.width+=_roi.x;} if(_roi.y<0){region.y=0;region.height+=_roi.y;} if(_roi.x+_roi.width>img.cols)region.width=img.cols-_roi.x; if(_roi.y+_roi.height>img.rows)region.height=img.rows-_roi.y; if(region.width>img.cols)region.width=img.cols; if(region.height>img.rows)region.height=img.rows; patch=img(region).clone(); // add some padding to compensate when the patch is outside image border int addTop,addBottom, addLeft, addRight; addTop=region.y-_roi.y; addBottom=(_roi.height+_roi.y>img.rows?_roi.height+_roi.y-img.rows:0); addLeft=region.x-_roi.x; addRight=(_roi.width+_roi.x>img.cols?_roi.width+_roi.x-img.cols:0); copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE); if(patch.rows==0 || patch.cols==0)return false; // extract the desired descriptors switch(desc){ case CN: CV_Assert(img.channels() == 3); extractCN(patch,feat); //feat=feat.mul(hann_cn); // hann window filter break; default: // GRAY if(img.channels()>1) cvtColor(patch,feat, CV_BGR2GRAY); else feat=patch; feat.convertTo(feat,CV_64F); feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5 feat=feat.mul(hann); // hann window filter break; } return true; } /* * get feature using external function */ bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, void (*f)(const Mat, const Rect, Mat& )) const{ // return false if roi is outside the image if((_roi.x+_roi.width<0) ||(_roi.y+_roi.height<0) ||(_roi.x>=img.cols) ||(_roi.y>=img.rows) )return false; f(img, _roi, feat); if(_roi.width != feat.cols || _roi.height != feat.rows){ printf("error in customized function of features extractor!\n"); printf("Rules: roi.width==feat.cols && roi.height = feat.rows \n"); } Mat hann_win; std::vector<Mat> _layers; for(int i=0;i<feat.channels();i++) _layers.push_back(hann); merge(_layers, hann_win); feat=feat.mul(hann_win); // hann window filter return true; } __global__ void extractIndexKernel(const cuda::PtrStepSz<uchar3> input, cuda::PtrStep<ushort> output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= 0 && x < input.cols && y >= 0 && y < input.rows) { uchar3 pixel = input(y,x); output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8)); } } __global__ void extractCNKernel(const cuda::PtrStepSz<ushort> input, cuda::PtrStep<double[10]> output, const double *ColorNames) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (x >= 0 && x < input.cols && y >= 0 && y < input.rows && k >= 0 && k < 10) { short index = input(y,x); output.ptr(y)[x][k] = ColorNames[10*index + k]; //output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8)); } } /* Convert BGR to ColorNames */ void TackerKCFImplParallel::extractCN(Mat patch_data, Mat & cnFeatures) { if(cnFeatures.type() != CV_64FC(10)) { cnFeatures = Mat::zeros(patch_data.rows,patch_data.cols,CV_64FC(10)); } patch_data_gpu.upload(patch_data); dim3 cthreads2d(32, 32); dim3 cblocks2d( static_cast<int>(std::ceil(patch_data_gpu.size().width / static_cast<double>(cthreads2d.x))), static_cast<int>(std::ceil(patch_data_gpu.size().height / static_cast<double>(cthreads2d.y)))); extractIndexKernel<<<cblocks2d, cthreads2d>>>(patch_data_gpu, indexes_gpu); cudaSafeCall(cudaGetLastError()); cuda::GpuMat cnFeatures_gpu; cuda::createContinuous(patch_data.size(), CV_64FC(10), cnFeatures_gpu); dim3 cthreads3d(32, 32, 1); dim3 cblocks3d( static_cast<int>(std::ceil(patch_data_gpu.size().width / static_cast<double>(cthreads3d.x))), static_cast<int>(std::ceil(patch_data_gpu.size().height / static_cast<double>(cthreads3d.y))), static_cast<int>(std::ceil(10 / static_cast<double>(cthreads3d.z)))); extractCNKernel<<<cblocks3d, cthreads3d>>>(indexes_gpu, cnFeatures_gpu, ColorNames_gpu); cudaSafeCall(cudaGetLastError()); cuda::multiply(cnFeatures_gpu, hann_cn_gpu, cnFeatures_gpu); cnFeatures_gpu.download(cnFeatures); } /* * dense gauss kernel function */ void TackerKCFImplParallel::denseGaussKernel(const double sigma, const Mat x_data, const Mat y_data, Mat & k_data, std::vector<Mat> & layers_data,std::vector<Mat> & xf_data,std::vector<Mat> & yf_data, std::vector<Mat> xyf_v, Mat xy, Mat xyf ) { // First we download all the data onto the Gpu int num_channels = x_data.channels(); double normX = norm(x_data, NORM_L2SQR); double normY = norm(y_data, NORM_L2SQR); cv::cuda::Stream stream; split(x_data, layers_data); for (int i = 0; i < x_data.channels(); i++){ layers_data_gpu[i].upload(layers_data[i], stream); } stream.waitForCompletion(); cudafft2(x_data.channels(),xf_data_gpu,layers_data_gpu); split(y_data, layers_data); for (int i = 0; i < x_data.channels(); i++){ layers_data_gpu[i].upload(layers_data[i], stream); } stream.waitForCompletion(); cudafft2(y_data.channels(),yf_data_gpu,layers_data_gpu); pixelWiseMult(xf_data_gpu,yf_data_gpu,xyf_v_gpu,0,true); sumChannels(xyf_v_gpu,xyf_c_gpu); cudaifft2(xyf_c_gpu,xyf_r_gpu); xyf_r_gpu.download(xyf); if(params.wrap_kernel){ shiftRows(xyf, x_data.rows/2); shiftCols(xyf, x_data.cols/2); } //(xx + yy - 2 * xy) / numel(x) xy=(normX+normY-2*xyf)/(x_data.rows*x_data.cols*x_data.channels()); // TODO: check wether we really need thresholding or not //threshold(xy,xy,0.0,0.0,THRESH_TOZERO);//max(0, (xx + yy - 2 * xy) / numel(x)) for(int i=0;i<xy.rows;i++){ for(int j=0;j<xy.cols;j++){ if(xy.at<double>(i,j)<0.0)xy.at<double>(i,j)=0.0; } } double sig=-1.0/(sigma*sigma); xy=sig*xy; exp(xy, k_data); } /* CIRCULAR SHIFT Function * http://stackoverflow.com/questions/10420454/shift-like-matlab-function-rows-or-columns-of-a-matrix-in-opencv */ // circular shift one row from up to down void TackerKCFImplParallel::shiftRows(Mat& mat) const { Mat temp; Mat m; int _k = (mat.rows-1); mat.row(_k).copyTo(temp); for(; _k > 0 ; _k-- ) { m = mat.row(_k); mat.row(_k-1).copyTo(m); } m = mat.row(0); temp.copyTo(m); } // circular shift n rows from up to down if n > 0, -n rows from down to up if n < 0 void TackerKCFImplParallel::shiftRows(Mat& mat, int n) const { if( n < 0 ) { n = -n; flip(mat,mat,0); for(int _k=0; _k < n;_k++) { shiftRows(mat); } flip(mat,mat,0); }else{ for(int _k=0; _k < n;_k++) { shiftRows(mat); } } } //circular shift n columns from left to right if n > 0, -n columns from right to left if n < 0 void TackerKCFImplParallel::shiftCols(Mat& mat, int n) const { if(n < 0){ n = -n; flip(mat,mat,1); transpose(mat,mat); shiftRows(mat,n); transpose(mat,mat); flip(mat,mat,1); }else{ transpose(mat,mat); shiftRows(mat,n); transpose(mat,mat); } } /* * calculate the detection response */ void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat kf_data, Mat & response_data, Mat & spec_data) { //alpha f--> 2channels ; k --> 1 channel; mulSpectrums(alphaf_data,kf_data,spec_data,0,false); ifft2(spec_data,response_data); } /* * calculate the detection response for splitted form */ void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat _alphaf_den, const Mat kf_data, Mat & response_data, Mat & spec_data, Mat & spec2_data) { mulSpectrums(alphaf_data,kf_data,spec_data,0,false); //z=(a+bi)/(c+di)=[(ac+bd)+i(bc-ad)]/(c^2+d^2) double den; for(int i=0;i<kf_data.rows;i++){ for(int j=0;j<kf_data.cols;j++){ den=1.0/(_alphaf_den.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+_alphaf_den.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1]); spec2_data.at<Vec2d>(i,j)[0]= (spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1])*den; spec2_data.at<Vec2d>(i,j)[1]= (spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[0]-spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[1])*den; } } ifft2(spec2_data,response_data); } void TackerKCFImplParallel::setFeatureExtractor(void (*f)(const Mat, const Rect, Mat&), bool pca_func){ if(pca_func){ extractor_pca.push_back(f); use_custom_extractor_pca = true; }else{ extractor_npca.push_back(f); use_custom_extractor_npca = true; } } /*----------------------------------------------------------------------*/ }
220c6cc3f17597bb8eab1fb584b37d8084060be3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void zgecsrmv_kernel( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void zgecsrmv_kernel_shift( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magma_int_t offset, magma_int_t blocksize, magma_index_t * addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
220c6cc3f17597bb8eab1fb584b37d8084060be3.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void zgecsrmv_kernel( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void zgecsrmv_kernel_shift( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magma_int_t offset, magma_int_t blocksize, magma_index_t * addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zgecsrmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
6abda0d4ec68d000f5d6b52c21b4ecd81d95baf5.hip
// !!! This is a file automatically generated by hipify!!! /* Cuda programming implementation of the reed solomon decoder */ /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ // System includes #include <stdio.h> #include <assert.h> #include <math.h> // CUDA runtime #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #define GPU_IMP // Project related includes #include <bch_cuda_defines.h> #include <gf_defines.h> #include <gf_rs_defines.h> #include <gf_func.cu> /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ /* Cuda Kernel calls and associated variables */ dim3 cuda_grid; dim3 cuda_block; /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ GFN_DEF void cuda_gf_init(); GFN_DEF void cuda_rs_fft(DTYPEP t_data, UINTP f_data); GFN_DEF void cuda_rs_keyeq(UINTP syndrome, DTYPEP keyeq); GFN_DEF void cuda_rs_errpoly(DTYPEP keyeq,UINTP fft_data, DTYPEP pg_data, DTYPEP pg_corr_data); // Function to initialize the memory (DW) void memory_init (DTYPEP x,int N) { int i; for(i=0;i<N;i++) { x[i] = i; } } // Main call for the routine int main() { hipError_t err = hipSuccess; /* Allocate memory for each block on the host end */ DTYPEP h_pg_data = (DTYPEP) malloc(F_CPG_SIZE_BYTES); DTYPEP h_pg_corr_data = (DTYPEP) malloc(F_CPG_SIZE_BYTES); /* Alocate memory for the block on the GPU */ DTYPEP d_pg_data; CUDA_CHK_ERR(hipMalloc(&d_pg_data,F_CPG_SIZE_BYTES)); DTYPEP d_pg_corr_data; CUDA_CHK_ERR(hipMalloc(&d_pg_corr_data,F_CPG_SIZE_BYTES)); UINTP d_pg_syndrome; CUDA_CHK_ERR(hipMalloc(&d_pg_syndrome,2*T*F_NO_OF_SC*4)); UINTP d_pg_rs_rx_fft; CUDA_CHK_ERR(hipMalloc(&d_pg_rs_rx_fft,RS_N*F_NO_OF_SC*4)); DTYPEP d_pg_keyeq; CUDA_CHK_ERR(hipMalloc(&d_pg_keyeq,(T+1)*F_NO_OF_SC*4)); /* Call a host initialization */ memory_init (h_pg_data,F_CPG_SIZE_BYTES/4); memory_init (h_pg_corr_data,F_CPG_SIZE_BYTES/4); //++++++++++++++++++++++++++++++++++++++++++++++++++++++ /* Copy the data from the host memory to the GPU */ err = hipMemcpy (d_pg_data, h_pg_data, F_CPG_SIZE_BYTES, hipMemcpyHostToDevice); CUDA_CHK_ERR(err); cuda_grid = dim3(1);cuda_block = dim3(1); cuda_gf_init CUDA_VEC (); err = hipGetLastError();CUDA_CHK_ERR(err); printf ("Galois field is %3d error is %3d block size is %6d \n",M,T,RS_N); #ifndef ERR_SEEN // The block and grid size cannot be more than 1024 cuda_grid.x = 2*T; cuda_grid.y = F_NO_OF_SC; cuda_grid.z = 1; cuda_block.x = RS_N; cuda_block.y = 1; cuda_block.z = 1; cuda_rs_fft CUDA_VEC (d_pg_data,d_pg_rs_rx_fft); err = hipGetLastError();CUDA_CHK_ERR(err); #else // Run FFT on the complete spectrum cuda_grid.x = RS_N; cuda_grid.y = F_NO_OF_SC; cuda_grid.z = 1; cuda_block.x = RS_N; cuda_block.y = 1; cuda_block.z = 1; cuda_rs_fft CUDA_VEC (d_pg_data,d_pg_rs_rx_fft); err = hipGetLastError();CUDA_CHK_ERR(err); // Call berlekamp massey algorithm cuda_grid.x = 1;cuda_grid.y = 1;cuda_grid.z = 1; cuda_block.x = 1;cuda_block.y = 1;cuda_block.z = 1; cuda_rs_keyeq CUDA_VEC (d_pg_rs_rx_fft,d_pg_keyeq); err = hipGetLastError();CUDA_CHK_ERR(err); cuda_grid.x = F_NO_OF_SC; cuda_grid.y = 1; cuda_grid.z = 1; cuda_block.x = 1; cuda_block.y = 1; cuda_block.z = 1; cuda_rs_errpoly CUDA_VEC (d_pg_keyeq,d_pg_rs_rx_fft,d_pg_data,d_pg_corr_data); err = hipGetLastError();CUDA_CHK_ERR(err); #endif /* Once the computation is done, move the corrected data back to the host */ err = hipMemcpy (h_pg_corr_data, d_pg_corr_data, F_CPG_SIZE_BYTES, hipMemcpyDeviceToHost); CUDA_CHK_ERR(err); //++++++++++++++++++++++++++++++++++++++++++++++++++++++ DTYPEP h_dbg = (DTYPEP) malloc ((1<<M)*4); // err = hipMemcpyFromSymbol (h_dbg,gb_gf_ext,((1<<M)*4)); // CUDA_CHK_ERR(err); // Final print /* for(i=0;i<gl_sz;i++){ printf("GF element %03d is %04x \n",i,h_pg_corr_data[i]); } */ /* Free up the cuda memory */ hipFree(d_pg_data);hipFree(d_pg_syndrome);hipFree(d_pg_corr_data); free(h_pg_data);free(h_pg_corr_data); } /* Subroutine to initialize the galois field element */ GFN_DEF void cuda_gf_init(){ DTYPE i,elem; gb_gf_ext[0] = elem = 1; gb_gf_log_table[1] = gb_gf_log_table[0] = 0; for (i=1;i<(1<<M)-1;i++) { elem = elem << 1; if (elem >= (1<<M)) { elem = (elem ^ CS_PRIM_POLY[M]) & CS_GF_WND; } gb_gf_ext[i] = elem; gb_gf_log_table[elem] = i; } } /* FFT in the given finite field */ GFN_DEF void cuda_rs_fft (DTYPEP t_data, UINTP f_data){ __shared__ int l_f_data[RS_N]; // The position of the 32 bit is the thread id DTYPE bl_dw_pos = threadIdx.x; DTYPE byte_pos = threadIdx.y; DTYPE synd_i = blockIdx.x; DTYPE block_no = blockIdx.y; DTYPE elem = bl_dw_pos; DTYPE vec_elem = t_data[block_no*RS_N+elem]; DTYPE synd_calc_pos = synd_i + 0; DTYPE m = gb_gf_log_table[vec_elem]; DTYPE pow_i = ((synd_i * elem)+m) % ((1<<M)-1); int log_table = gb_gf_ext[(pow_i)]; l_f_data[elem] = log_table; // Assign the multiplied value to the syndrome __syncthreads(); // This will make sure the array is synchronized int index = RS_N; index /= 2; while (index != 0) { // Galois field addition is XOR if (elem < index) { l_f_data[elem] ^= l_f_data[elem+index];} index = index/2; __syncthreads(); // This will make sure the values are synchronized } f_data[block_no*RS_N+synd_i] = l_f_data[synd_i]; } /* Key equation solver */ GFN_DEF void cuda_rs_keyeq (UINTP syndrome, DTYPEP keyeq) { DTYPE block_pos = threadIdx.x*(2*T*SZ_OF_UINT); DTYPE beta[T+1][T+1]; DTYPE lr[T+1]; DTYPE dp,dr,dp_cons,bsel; DTYPE sigma[T+1][T+1]; DTYPE beta_mul[T+1]; DTYPE s0 = syndrome[block_pos]; /* First initialize the array */ int i,r; for (i=0;i<=T;i++) { if(s0 != 0) { dp = s0; beta[1][i] = (i==2) ? 1 : 0; lr[1] =1; } else { dp = 1; beta[1][i] = (i==3) ? 1 : 0; lr[1] =0; } sigma[0][i] = (i==0) ? 1 : (i==1) ? s0 : 0; } // The iteration is T times. for(r=1;r<T;r++) { dr = 0; for(i=0;i<T;i++) { dr = dr ^ gf_mul(sigma[r-1][i],syndrome[block_pos+(2*r-i)]); } dp_cons = dr; for(i=0;i<=T;i++){ beta_mul[i] = gf_mul(beta[r][i],dp_cons); } for(i=0;i<=T;i++){ sigma[r][i] = beta_mul[i] ^ gf_mul(sigma[r-1][i],dp); } bsel = (dr != 0 && r >= lr[r]) ? 1 : 0 ; for(i=0;i<=T;i++){ beta[r+1][i+2] = bsel ? sigma[r-1][i] : beta[r][i]; } beta[r+1][0] = beta[r+1][1] = 0; lr[r+1] = bsel ? lr[r]+1 : lr[r]; dp = bsel ? dr : dp; } // Now assign the result back for(i=0;i<=T;i++) { keyeq[block_pos+i] = sigma[T-1][i]; } } /* Chein search algorithm to correct the errors */ GFN_DEF void cuda_rs_errpoly (DTYPEP keyeq,UINTP fft_data, DTYPEP pg_data,DTYPEP pg_corr_data) { UINT err_vec; int block_no = threadIdx.x; int i,j; for (i=0;i<RS_N;i++) { if (i < 2*T) { err_vec = fft_data[block_no*RS_N+i]; } else { err_vec = 0; for (j=0;j<2*T;j++) { err_vec = err_vec ^ keyeq[block_no*2*T+j]; } } pg_corr_data[block_no*RS_N+i] = pg_data[block_no*RS_N+i] - err_vec; } /* pg_corr_data[dw_pos] = pg_corr_data[dw_pos] ^ (err_det << bit_pos); __syncthreads(); pg_corr_data[dw_pos] = pg_corr_data[dw_pos] ^ pg_data[dw_pos]; */ }
6abda0d4ec68d000f5d6b52c21b4ecd81d95baf5.cu
/* Cuda programming implementation of the reed solomon decoder */ /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ // System includes #include <stdio.h> #include <assert.h> #include <math.h> // CUDA runtime #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #define GPU_IMP // Project related includes #include <bch_cuda_defines.h> #include <gf_defines.h> #include <gf_rs_defines.h> #include <gf_func.cu> /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ /* Cuda Kernel calls and associated variables */ dim3 cuda_grid; dim3 cuda_block; /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ GFN_DEF void cuda_gf_init(); GFN_DEF void cuda_rs_fft(DTYPEP t_data, UINTP f_data); GFN_DEF void cuda_rs_keyeq(UINTP syndrome, DTYPEP keyeq); GFN_DEF void cuda_rs_errpoly(DTYPEP keyeq,UINTP fft_data, DTYPEP pg_data, DTYPEP pg_corr_data); // Function to initialize the memory (DW) void memory_init (DTYPEP x,int N) { int i; for(i=0;i<N;i++) { x[i] = i; } } // Main call for the routine int main() { cudaError_t err = cudaSuccess; /* Allocate memory for each block on the host end */ DTYPEP h_pg_data = (DTYPEP) malloc(F_CPG_SIZE_BYTES); DTYPEP h_pg_corr_data = (DTYPEP) malloc(F_CPG_SIZE_BYTES); /* Alocate memory for the block on the GPU */ DTYPEP d_pg_data; CUDA_CHK_ERR(cudaMalloc(&d_pg_data,F_CPG_SIZE_BYTES)); DTYPEP d_pg_corr_data; CUDA_CHK_ERR(cudaMalloc(&d_pg_corr_data,F_CPG_SIZE_BYTES)); UINTP d_pg_syndrome; CUDA_CHK_ERR(cudaMalloc(&d_pg_syndrome,2*T*F_NO_OF_SC*4)); UINTP d_pg_rs_rx_fft; CUDA_CHK_ERR(cudaMalloc(&d_pg_rs_rx_fft,RS_N*F_NO_OF_SC*4)); DTYPEP d_pg_keyeq; CUDA_CHK_ERR(cudaMalloc(&d_pg_keyeq,(T+1)*F_NO_OF_SC*4)); /* Call a host initialization */ memory_init (h_pg_data,F_CPG_SIZE_BYTES/4); memory_init (h_pg_corr_data,F_CPG_SIZE_BYTES/4); //++++++++++++++++++++++++++++++++++++++++++++++++++++++ /* Copy the data from the host memory to the GPU */ err = cudaMemcpy (d_pg_data, h_pg_data, F_CPG_SIZE_BYTES, cudaMemcpyHostToDevice); CUDA_CHK_ERR(err); cuda_grid = dim3(1);cuda_block = dim3(1); cuda_gf_init CUDA_VEC (); err = cudaGetLastError();CUDA_CHK_ERR(err); printf ("Galois field is %3d error is %3d block size is %6d \n",M,T,RS_N); #ifndef ERR_SEEN // The block and grid size cannot be more than 1024 cuda_grid.x = 2*T; cuda_grid.y = F_NO_OF_SC; cuda_grid.z = 1; cuda_block.x = RS_N; cuda_block.y = 1; cuda_block.z = 1; cuda_rs_fft CUDA_VEC (d_pg_data,d_pg_rs_rx_fft); err = cudaGetLastError();CUDA_CHK_ERR(err); #else // Run FFT on the complete spectrum cuda_grid.x = RS_N; cuda_grid.y = F_NO_OF_SC; cuda_grid.z = 1; cuda_block.x = RS_N; cuda_block.y = 1; cuda_block.z = 1; cuda_rs_fft CUDA_VEC (d_pg_data,d_pg_rs_rx_fft); err = cudaGetLastError();CUDA_CHK_ERR(err); // Call berlekamp massey algorithm cuda_grid.x = 1;cuda_grid.y = 1;cuda_grid.z = 1; cuda_block.x = 1;cuda_block.y = 1;cuda_block.z = 1; cuda_rs_keyeq CUDA_VEC (d_pg_rs_rx_fft,d_pg_keyeq); err = cudaGetLastError();CUDA_CHK_ERR(err); cuda_grid.x = F_NO_OF_SC; cuda_grid.y = 1; cuda_grid.z = 1; cuda_block.x = 1; cuda_block.y = 1; cuda_block.z = 1; cuda_rs_errpoly CUDA_VEC (d_pg_keyeq,d_pg_rs_rx_fft,d_pg_data,d_pg_corr_data); err = cudaGetLastError();CUDA_CHK_ERR(err); #endif /* Once the computation is done, move the corrected data back to the host */ err = cudaMemcpy (h_pg_corr_data, d_pg_corr_data, F_CPG_SIZE_BYTES, cudaMemcpyDeviceToHost); CUDA_CHK_ERR(err); //++++++++++++++++++++++++++++++++++++++++++++++++++++++ DTYPEP h_dbg = (DTYPEP) malloc ((1<<M)*4); // err = cudaMemcpyFromSymbol (h_dbg,gb_gf_ext,((1<<M)*4)); // CUDA_CHK_ERR(err); // Final print /* for(i=0;i<gl_sz;i++){ printf("GF element %03d is %04x \n",i,h_pg_corr_data[i]); } */ /* Free up the cuda memory */ cudaFree(d_pg_data);cudaFree(d_pg_syndrome);cudaFree(d_pg_corr_data); free(h_pg_data);free(h_pg_corr_data); } /* Subroutine to initialize the galois field element */ GFN_DEF void cuda_gf_init(){ DTYPE i,elem; gb_gf_ext[0] = elem = 1; gb_gf_log_table[1] = gb_gf_log_table[0] = 0; for (i=1;i<(1<<M)-1;i++) { elem = elem << 1; if (elem >= (1<<M)) { elem = (elem ^ CS_PRIM_POLY[M]) & CS_GF_WND; } gb_gf_ext[i] = elem; gb_gf_log_table[elem] = i; } } /* FFT in the given finite field */ GFN_DEF void cuda_rs_fft (DTYPEP t_data, UINTP f_data){ __shared__ int l_f_data[RS_N]; // The position of the 32 bit is the thread id DTYPE bl_dw_pos = threadIdx.x; DTYPE byte_pos = threadIdx.y; DTYPE synd_i = blockIdx.x; DTYPE block_no = blockIdx.y; DTYPE elem = bl_dw_pos; DTYPE vec_elem = t_data[block_no*RS_N+elem]; DTYPE synd_calc_pos = synd_i + 0; DTYPE m = gb_gf_log_table[vec_elem]; DTYPE pow_i = ((synd_i * elem)+m) % ((1<<M)-1); int log_table = gb_gf_ext[(pow_i)]; l_f_data[elem] = log_table; // Assign the multiplied value to the syndrome __syncthreads(); // This will make sure the array is synchronized int index = RS_N; index /= 2; while (index != 0) { // Galois field addition is XOR if (elem < index) { l_f_data[elem] ^= l_f_data[elem+index];} index = index/2; __syncthreads(); // This will make sure the values are synchronized } f_data[block_no*RS_N+synd_i] = l_f_data[synd_i]; } /* Key equation solver */ GFN_DEF void cuda_rs_keyeq (UINTP syndrome, DTYPEP keyeq) { DTYPE block_pos = threadIdx.x*(2*T*SZ_OF_UINT); DTYPE beta[T+1][T+1]; DTYPE lr[T+1]; DTYPE dp,dr,dp_cons,bsel; DTYPE sigma[T+1][T+1]; DTYPE beta_mul[T+1]; DTYPE s0 = syndrome[block_pos]; /* First initialize the array */ int i,r; for (i=0;i<=T;i++) { if(s0 != 0) { dp = s0; beta[1][i] = (i==2) ? 1 : 0; lr[1] =1; } else { dp = 1; beta[1][i] = (i==3) ? 1 : 0; lr[1] =0; } sigma[0][i] = (i==0) ? 1 : (i==1) ? s0 : 0; } // The iteration is T times. for(r=1;r<T;r++) { dr = 0; for(i=0;i<T;i++) { dr = dr ^ gf_mul(sigma[r-1][i],syndrome[block_pos+(2*r-i)]); } dp_cons = dr; for(i=0;i<=T;i++){ beta_mul[i] = gf_mul(beta[r][i],dp_cons); } for(i=0;i<=T;i++){ sigma[r][i] = beta_mul[i] ^ gf_mul(sigma[r-1][i],dp); } bsel = (dr != 0 && r >= lr[r]) ? 1 : 0 ; for(i=0;i<=T;i++){ beta[r+1][i+2] = bsel ? sigma[r-1][i] : beta[r][i]; } beta[r+1][0] = beta[r+1][1] = 0; lr[r+1] = bsel ? lr[r]+1 : lr[r]; dp = bsel ? dr : dp; } // Now assign the result back for(i=0;i<=T;i++) { keyeq[block_pos+i] = sigma[T-1][i]; } } /* Chein search algorithm to correct the errors */ GFN_DEF void cuda_rs_errpoly (DTYPEP keyeq,UINTP fft_data, DTYPEP pg_data,DTYPEP pg_corr_data) { UINT err_vec; int block_no = threadIdx.x; int i,j; for (i=0;i<RS_N;i++) { if (i < 2*T) { err_vec = fft_data[block_no*RS_N+i]; } else { err_vec = 0; for (j=0;j<2*T;j++) { err_vec = err_vec ^ keyeq[block_no*2*T+j]; } } pg_corr_data[block_no*RS_N+i] = pg_data[block_no*RS_N+i] - err_vec; } /* pg_corr_data[dw_pos] = pg_corr_data[dw_pos] ^ (err_det << bit_pos); __syncthreads(); pg_corr_data[dw_pos] = pg_corr_data[dw_pos] ^ pg_data[dw_pos]; */ }
fb3cbcee0751c6e8f6cac98802f2e7474d504bb5.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/optimizers/cast_with_ptr.h" #include "paddle/fluid/operators/optimizers/distributed_fused_lamb_init_op.h" #include "paddle/fluid/operators/tensor_to_string.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/algorithm.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { struct ParamGradInfo { framework::Tensor *param_t{nullptr}; framework::Tensor *grad_t{nullptr}; size_t idx{0}; size_t numel{0}; size_t numel_with_padding{0}; size_t numel_offset{0}; }; static std::ostream &operator<<(std::ostream &os, const ParamGradInfo &info) { return os << "{Param(" << info.param_t << "),Grad(" << info.grad_t << "),idx(" << info.idx << "),numel(" << info.numel << "),numel_with_padding(" << info.numel_with_padding << "),numel_offset(" << info.numel_offset << "),padding(" << info.numel_offset + info.numel_with_padding << "-" << info.numel_offset + info.numel << "=" << info.numel_with_padding - info.numel << ")}"; } struct ParamGradInfoNumelOffsetCompFunctor { bool operator()(const ParamGradInfo &x, const ParamGradInfo &y) const { return x.numel_offset < y.numel_offset; } bool operator()(const ParamGradInfo &x, size_t y) const { return x.numel_offset < y; } bool operator()(size_t x, const ParamGradInfo &y) const { return x < y.numel_offset; } bool operator()(size_t x, size_t y) const { return x < y; } }; static size_t GetAlignSize(size_t n, size_t alignment) { auto remainder = n % alignment; return remainder == 0 ? n : n + alignment - remainder; } // Shard the ParamGradInfo list by the numel size [start_size, end_size) // The final results should be: // // start_size = sum(infos[0:i].numel_with_padding) + start_numel_offset, where // start_numel_offset <= infos[i].numel_with_padding // // end_size = sum(infos[0:j].numel_with_padding) + end_numel_offset, where // end_numel_offset <= infos[j].numel_with_padding static void GetParamGradShardInfo(const std::vector<ParamGradInfo> &infos, size_t start_size, size_t end_size, size_t *start_idx, size_t *end_idx, size_t *start_numel_offset, size_t *end_numel_offset) { VLOG(10) << "NumelOffset: " << string::join_strings(infos, ",", [](const ParamGradInfo &info) { return info.numel_offset; }); VLOG(10) << "start_size = " << start_size << " , end_size = " << end_size; if (infos.empty()) { PADDLE_ENFORCE_EQ(start_size, 0, platform::errors::InvalidArgument( "start_size should be 0.")); PADDLE_ENFORCE_EQ(end_size, 0, platform::errors::InvalidArgument( "end_size should be 0.")); *start_idx = 0; *end_idx = 0; *start_numel_offset = 0; *end_numel_offset = 0; return; } PADDLE_ENFORCE_LT(start_size, end_size, platform::errors::InvalidArgument( "start_size should be less than end_size.")); size_t n = infos.size(); ParamGradInfoNumelOffsetCompFunctor comp; auto i = static_cast<size_t>( std::lower_bound(infos.begin(), infos.end(), start_size, comp) - infos.begin()); if (i == n || infos[i].numel_offset != start_size) { PADDLE_ENFORCE_GT( i, 0, platform::errors::InvalidArgument( "Cannot find suitable sharding which is between [%d, %d)", start_size, end_size)); --i; } PADDLE_ENFORCE_LT( i, n, platform::errors::InvalidArgument( "Cannot find suitable sharding which is between [%d, %d)", start_size, end_size)); *start_idx = i; *start_numel_offset = start_size - infos[i].numel_offset; auto j = static_cast<size_t>( std::lower_bound(infos.begin(), infos.end(), end_size, comp) - infos.begin()); *end_idx = j - 1; *end_numel_offset = end_size - infos[j - 1].numel_offset; PADDLE_ENFORCE_GT(*end_numel_offset, 0, platform::errors::InvalidArgument( "Internal error when sharding, this may be a bug " "caused by empty parameter.")); VLOG(10) << "Sharding [start_size=" << start_size << ", end_size=" << end_size << "): " << (*start_idx) << ":" << (*start_numel_offset) << " -> " << (*end_idx) << ":" << (*end_numel_offset); } static size_t FillAlignmentPaddingInfo(std::vector<ParamGradInfo> *infos, size_t alignment, size_t nranks, phi::DataType dtype) { auto sizeof_dtype = paddle::experimental::SizeOf(dtype); PADDLE_ENFORCE_EQ( alignment % sizeof_dtype, 0, platform::errors::InvalidArgument( "The attr(alignment) should be exactly divided by sizeof(T) %d.", sizeof_dtype)); alignment /= sizeof_dtype; size_t total_numel_sum_with_padding = 0; size_t n = infos->size(); for (size_t i = 0; i < n; ++i) { auto &info = (*infos)[i]; size_t numel_with_padding; if (i + 1 == n) { // the total fused numel must be a factor of alignment * nranks numel_with_padding = GetAlignSize(info.numel + total_numel_sum_with_padding, alignment * nranks) - total_numel_sum_with_padding; } else { numel_with_padding = GetAlignSize(info.numel, alignment); } info.numel_with_padding = numel_with_padding; info.numel_offset = total_numel_sum_with_padding; total_numel_sum_with_padding += numel_with_padding; } return total_numel_sum_with_padding; } template <typename T> static T *TensorFillConstant(const platform::CUDADeviceContext &dev_ctx, framework::Tensor *tensor, const framework::DDim &dims, T value) { tensor->Resize(dims); auto *ptr = tensor->mutable_data<T>(dev_ctx.GetPlace()); phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(dev_ctx, tensor, value); return ptr; } static framework::Tensor CastDataForInitedTensor( const platform::CUDADeviceContext &dev_ctx, framework::Tensor *origin, framework::Tensor *fused_out, size_t numel_offset) { PADDLE_ENFORCE_EQ(origin->IsInitialized(), true, platform::errors::InvalidArgument( "The tensor to be cast should be initialized.")); PADDLE_ENFORCE_EQ(fused_out->dtype(), phi::DataType::FLOAT32, platform::errors::InvalidArgument( "The dst tensor to be cast should be FP32 tensor.")); PADDLE_ENFORCE_EQ(origin->dtype(), phi::DataType::FLOAT16, platform::errors::InvalidArgument( "The src tensor to be cast should be FP16 tensor.")); auto *dst = fused_out->data<float>() + numel_offset; auto *src = origin->data<platform::float16>(); auto numel = origin->numel(); LaunchCastKernel(dev_ctx, src, dst, numel); VLOG(10) << "Cast from FP32 -> FP16, range: [" << numel_offset << ", " << numel_offset + numel << ")" << " , total: [0, " << fused_out->numel() << ")"; framework::DDim fused_out_dim = fused_out->dims(); auto fused_out_numel = fused_out->numel(); fused_out->Resize({fused_out_numel}); auto sliced_tensor = fused_out->Slice(numel_offset, numel + numel_offset); fused_out->Resize(fused_out_dim); return sliced_tensor; } static framework::Tensor CopyAndShareBufferForInitedTensor( framework::Tensor *origin, framework::Tensor *fused_out, size_t numel_offset, gpuStream_t stream) { PADDLE_ENFORCE_EQ( origin->IsInitialized(), true, platform::errors::InvalidArgument( "The tensor to be copied and shared data should be initialized.")); auto dtype = fused_out->type(); PADDLE_ENFORCE_EQ(origin->type(), dtype, platform::errors::InvalidArgument( "The tensor to be copied and shared data should be " "have the same data type.")); auto place = fused_out->place(); PADDLE_ENFORCE_EQ( origin->place(), place, platform::errors::InvalidArgument("The tensor to be copied and shared " "data should be have the same place.")); PADDLE_ENFORCE_EQ( platform::is_gpu_place(place), true, platform::errors::InvalidArgument( "The tensor to be copied and shared data should be on GPU place.")); auto numel = origin->numel(); framework::DDim fused_out_dim = fused_out->dims(); auto fused_out_numel = fused_out->numel(); auto sliced_tensor = fused_out->Resize({fused_out_numel}) .Slice(numel_offset, numel + numel_offset); memory::Copy(place, sliced_tensor.data(), place, origin->data(), numel * paddle::experimental::SizeOf(dtype), stream); origin->ShareBufferWith(sliced_tensor); fused_out->Resize(fused_out_dim); VLOG(10) << "Copy and share buffer, range: [" << numel_offset << ", " << numel_offset + numel << ") , total: [0, " << fused_out->numel() << ") , dtype = " << dtype; return sliced_tensor; } static void ShareBufferForNonInitedTensor(framework::Tensor *origin, framework::Tensor *fused_out, size_t numel_offset, const framework::DDim &dims) { PADDLE_ENFORCE_EQ( origin->IsInitialized(), false, platform::errors::InvalidArgument( "The tensor to be shared data should not be initialized.")); framework::DDim fused_out_dim = fused_out->dims(); auto fused_out_numel = fused_out->numel(); auto numel = phi::product(dims); *origin = fused_out->Resize({fused_out_numel}) .Slice(numel_offset, numel + numel_offset); origin->Resize(dims); fused_out->Resize(fused_out_dim); VLOG(10) << "Share buffer for non-inited, range: [" << numel_offset << ", " << numel_offset + numel << "), total: [0, " << fused_out->numel() << ") , dtype = " << fused_out->dtype(); } template <typename T> static void CopyVectorToCPUTensor(const std::vector<T> &src, framework::Tensor *dst) { dst->Resize({static_cast<int64_t>(src.size())}); T *dst_ptr = dst->mutable_data<T>(platform::CPUPlace()); const T *src_ptr = src.data(); auto nbytes = src.size() * sizeof(T); std::memcpy(dst_ptr, src_ptr, nbytes); } static size_t ReorderParamGradInfoList(const std::vector<int> &flags, std::vector<ParamGradInfo> *infos) { size_t n = infos->size(); std::vector<int> cur_flags; cur_flags.reserve(n); for (size_t i = 0; i < n; ++i) { auto idx = (*infos)[i].idx; cur_flags.push_back(flags[idx]); } auto origin_infos = *infos; size_t j = 0; for (size_t i = 0; i < n; ++i) { if (cur_flags[i]) { (*infos)[j] = origin_infos[i]; ++j; } } size_t ret_idx = j; for (size_t i = 0; i < n; ++i) { if (!cur_flags[i]) { (*infos)[j] = origin_infos[i]; ++j; } } return ret_idx; } template <typename T> static T ClipByBound(T x, T low_value, T high_value) { if (x < low_value) return low_value; if (x > high_value) return high_value; return x; } template <typename T> class DistributedFusedLambInitOpKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { VLOG(10) << "starts to run DistributedFusedLambInitOp"; auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto place = ctx.GetPlace(); auto stream = dev_ctx.stream(); // Step 1: Check Input(Param) and Output(ParamOut), Input(Grad) and // Output(GradOut) auto params = ctx.MultiInput<framework::Tensor>("Param"); auto grads = ctx.MultiInput<framework::Tensor>("Grad"); auto master_params = ctx.MultiOutput<framework::Tensor>("MasterParamOut"); std::vector<ParamGradInfo> fp32_infos, fp16_infos; { PADDLE_ENFORCE_EQ(params.size(), grads.size(), platform::errors::InvalidArgument( "The parameter number and parameter gradient " "number should be the same.")); auto params_out = ctx.MultiOutput<framework::Tensor>("ParamOut"); auto grads_out = ctx.MultiOutput<framework::Tensor>("GradOut"); PADDLE_ENFORCE_EQ( params.size(), params_out.size(), platform::errors::InvalidArgument("Input(Param) and Output(ParamOut) " "should have the same number.")); PADDLE_ENFORCE_EQ( grads.size(), grads_out.size(), platform::errors::InvalidArgument( "Input(Grad) and Output(GradOut) should have the same number.")); size_t n = params.size(); VLOG(10) << "parameter number: " << n; for (size_t i = 0; i < n; ++i) { auto *p = params[i]; auto *g = grads[i]; auto *p_out = params_out[i]; auto *g_out = grads_out[i]; PADDLE_ENFORCE_NOT_NULL( p, platform::errors::InvalidArgument( "The %d-th parameter should not be nullptr.", i)); PADDLE_ENFORCE_EQ(p->IsInitialized(), true, platform::errors::InvalidArgument( "The %d-th parameter should be initialized.", i)); PADDLE_ENFORCE_EQ( p->place(), place, platform::errors::InvalidArgument( "The %d-th parameter is not initialized on the right place.", i)); PADDLE_ENFORCE_EQ(p, p_out, platform::errors::InvalidArgument( "The %d-th Input(Param) and Output(ParamOut) " "should be the same tensor.", i)); auto dtype = p->dtype(); PADDLE_ENFORCE_NOT_NULL( g, platform::errors::InvalidArgument( "The %d-th gradient should not be nullptr.", i)); PADDLE_ENFORCE_EQ(g, g_out, platform::errors::InvalidArgument( "The %d-th Input(Grad) and Output(Grad) should " "be the same tensor.")); auto numel = p->numel(); PADDLE_ENFORCE_GT(numel, 0, platform::errors::InvalidArgument( "The %d-th Input(Param) have no elements.")); void *g_data = nullptr; if (g->IsInitialized()) { PADDLE_ENFORCE_EQ(g->dtype(), dtype, platform::errors::InvalidArgument( "The %d-th Input(Param) and Input(Grad) should " "have the same data type %s.", i, dtype)); PADDLE_ENFORCE_EQ(g->dims(), p->dims(), platform::errors::InvalidArgument( "The %d-th Input(Param) and Input(Grad) should " "have the same shape.", i)); g_data = g_out->data(); } ParamGradInfo *info; if (dtype == phi::DataType::FLOAT32) { fp32_infos.emplace_back(); info = &fp32_infos.back(); } else if (dtype == phi::DataType::FLOAT16) { fp16_infos.emplace_back(); info = &fp16_infos.back(); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Unsupported data type %s.", dtype)); } VLOG(10) << "Found " << dtype << " parameter " << i << " shape=[" << p_out->dims() << "] numel=" << numel << " grad.IsInitialized()=" << (g_out->IsInitialized() ? "true" : "false"); info->param_t = p_out; info->grad_t = g_out; info->idx = i; info->numel = numel; info->numel_with_padding = 0; // not determined yet info->numel_offset = 0; // not determined yet } } const auto &apply_weight_decay = ctx.Attr<std::vector<int>>("apply_weight_decay"); size_t fp32_wd_end_idx = ReorderParamGradInfoList(apply_weight_decay, &fp32_infos); size_t fp16_wd_end_idx = ReorderParamGradInfoList(apply_weight_decay, &fp16_infos); auto *param_order_t = ctx.Output<framework::Tensor>("ParamOrder"); auto param_num = fp32_infos.size() + fp16_infos.size(); param_order_t->Resize({static_cast<int16_t>(param_num)}); auto *param_order = param_order_t->mutable_data<int>(platform::CPUPlace()); for (size_t i = 0; i < fp32_infos.size(); ++i) { param_order[i] = static_cast<int>(fp32_infos[i].idx); } for (size_t i = 0; i < fp16_infos.size(); ++i) { param_order[i + fp32_infos.size()] = static_cast<int>(fp16_infos[i].idx); } VLOG(10) << "Fill ParamGradInfo ends"; // Step 2: determine the numel_with_padding and numel_offset auto rank = ctx.Attr<int>("rank"); auto nranks = ctx.Attr<int>("nranks"); auto alignment = ctx.Attr<int>("alignment"); VLOG(10) << "rank = " << rank << ", nranks = " << nranks << " , alignment = " << alignment; if (alignment <= 0) { alignment = platform::GpuMinChunkSize(); } PADDLE_ENFORCE_GE(alignment, 1, platform::errors::InvalidArgument( "The attr(alignment) should be larger than 0.")); PADDLE_ENFORCE_EQ(alignment & (alignment - 1), 0, platform::errors::InvalidArgument( "The attr(alignment) should be the power of 2.")); PADDLE_ENFORCE_GE( rank, 0, platform::errors::InvalidArgument( "The attr(rank) should be equal to or larger than 0.")); PADDLE_ENFORCE_LT( rank, nranks, platform::errors::InvalidArgument( "The attr(rank) should be less than the attr(nranks).")); // NOTE: We guarantee that both fp32_numel and fp16_numel can be exactly // divided by alignment and nranks. auto fp32_numel = FillAlignmentPaddingInfo(&fp32_infos, alignment, nranks, phi::DataType::FLOAT32); VLOG(10) << "FP32 ParamGradInfo: " << string::join_strings(fp32_infos, " "); auto fp16_numel = FillAlignmentPaddingInfo(&fp16_infos, alignment, nranks, phi::DataType::FLOAT16); VLOG(10) << "FP16 ParamGradInfo: " << string::join_strings(fp16_infos, " "); auto total_numel = fp32_numel + fp16_numel; PADDLE_ENFORCE_LT( total_numel, std::numeric_limits<int>::max(), platform::errors::InvalidArgument("Too many parameter number.")); auto fp32_numel_each_device = fp32_numel / nranks; auto fp16_numel_each_device = fp16_numel / nranks; auto numel_each_device = fp32_numel_each_device + fp16_numel_each_device; VLOG(10) << "Fill padding ends. total_numel = " << total_numel << ", fp32_numel = " << fp32_numel << ", fp16_numel = " << fp16_numel << ", fp32_numel_each_device = " << fp32_numel_each_device << ", fp16_numel_each_device = " << fp16_numel_each_device; // Step 3: allocate output tensor and do initialization float *fused_fp32_param = nullptr, *fused_fp32_grad = nullptr; platform::float16 *fused_fp16_param = nullptr, *fused_fp16_grad = nullptr; framework::Tensor *fp32_p_t = nullptr, *fp16_p_t = nullptr, *fp32_g_t = nullptr, *fp16_g_t = nullptr; std::vector<framework::Tensor *> fp16_master_params; if (total_numel > 0) { fp32_p_t = ctx.Output<framework::Tensor>("FP32FusedParam"); fused_fp32_param = TensorFillConstant<float>( dev_ctx, fp32_p_t, {static_cast<int64_t>(total_numel)}, 0.0f); } if (fp32_numel > 0) { fp32_g_t = ctx.Output<framework::Tensor>("FP32FusedGrad"); fused_fp32_grad = TensorFillConstant<float>( dev_ctx, fp32_g_t, {static_cast<int64_t>(fp32_numel)}, 0.0f); } if (fp16_numel > 0) { fp16_p_t = ctx.Output<framework::Tensor>("FP16FusedParam"); fused_fp16_param = TensorFillConstant<platform::float16>( dev_ctx, fp16_p_t, {static_cast<int64_t>(fp16_numel)}, static_cast<platform::float16>(0)); fp16_g_t = ctx.Output<framework::Tensor>("FP16FusedGrad"); fused_fp16_grad = TensorFillConstant<platform::float16>( dev_ctx, fp16_g_t, {static_cast<int64_t>(fp16_numel)}, static_cast<platform::float16>(0)); } VLOG(10) << "Allocate FP32FusedParam/Grad, FP16FusedParam/Grad ends"; // (1) For FP32FusedParam, memcpy for fp32 param and then share data, cast // for fp16 master weight // (2) For FP16FusedParam, memcpy and then share data // (3) For FP32FusedGrad/FP16FusedGrad, memcpy if gradient has been inited for (const auto &info : fp32_infos) { auto sliced_tensor = CopyAndShareBufferForInitedTensor( info.param_t, fp32_p_t, info.numel_offset, stream); master_params[info.idx]->Resize(info.param_t->dims()); master_params[info.idx]->ShareBufferWith(sliced_tensor); PADDLE_ENFORCE_EQ(master_params[info.idx]->mutable_data<float>(place), sliced_tensor.data<float>(), platform::errors::InvalidArgument( "Invalid master weight tensor pointer.")); if (info.grad_t->IsInitialized()) { CopyAndShareBufferForInitedTensor(info.grad_t, fp32_g_t, info.numel_offset, stream); } else { ShareBufferForNonInitedTensor(info.grad_t, fp32_g_t, info.numel_offset, info.param_t->dims()); } } size_t fp16_numel_offset = 0; if (fp32_numel > 0) { auto last_fp32_info = fp32_infos.back(); fp16_numel_offset = last_fp32_info.numel_offset + last_fp32_info.numel_with_padding; } for (const auto &info : fp16_infos) { auto master_weight_offset = info.numel_offset + fp16_numel_offset; auto sliced_tensor = CastDataForInitedTensor( dev_ctx, info.param_t, fp32_p_t, master_weight_offset); master_params[info.idx]->Resize(info.param_t->dims()); master_params[info.idx]->ShareBufferWith(sliced_tensor); CopyAndShareBufferForInitedTensor(info.param_t, fp16_p_t, info.numel_offset, stream); PADDLE_ENFORCE_EQ(master_params[info.idx]->mutable_data<float>(place), sliced_tensor.data<float>(), platform::errors::InvalidArgument( "Invalid master weight tensor pointer.")); if (info.grad_t->IsInitialized()) { CopyAndShareBufferForInitedTensor(info.grad_t, fp16_g_t, info.numel_offset, stream); } else { ShareBufferForNonInitedTensor(info.grad_t, fp16_g_t, info.numel_offset, info.param_t->dims()); } } VLOG(10) << "Copy/share data for Param/Grad ends"; // Step 4: For Moment1, Moment2, Beta1Pow, Beta2Pow, just fill constant TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Moment1"), {static_cast<int64_t>(numel_each_device)}, 0.0f); TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Moment2"), {static_cast<int64_t>(numel_each_device)}, 0.0f); TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Beta1Pow"), {1}, ctx.Attr<float>("beta1")); TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Beta2Pow"), {1}, ctx.Attr<float>("beta2")); VLOG(10) << "Init Moment and BetaPow ends"; // Step 5: Do sharding size_t fp32_start_idx, fp32_end_idx, fp32_start_numel_offset, fp32_end_numel_offset; GetParamGradShardInfo(fp32_infos, rank * fp32_numel_each_device, (rank + 1) * fp32_numel_each_device, &fp32_start_idx, &fp32_end_idx, &fp32_start_numel_offset, &fp32_end_numel_offset); size_t fp16_start_idx, fp16_end_idx, fp16_start_numel_offset, fp16_end_numel_offset; GetParamGradShardInfo(fp16_infos, rank * fp16_numel_each_device, (rank + 1) * fp16_numel_each_device, &fp16_start_idx, &fp16_end_idx, &fp16_start_numel_offset, &fp16_end_numel_offset); size_t fp32_local_param_num = fp32_numel_each_device > 0 ? fp32_end_idx - fp32_start_idx + 1 : 0; size_t fp16_local_param_num = fp16_numel_each_device > 0 ? fp16_end_idx - fp16_start_idx + 1 : 0; size_t total_local_param_num = fp32_local_param_num + fp16_local_param_num; VLOG(10) << "Found the sharding arguments"; auto *param_info_t = ctx.Output<framework::Tensor>("ParamInfo"); param_info_t->Resize({8}); auto *param_info = param_info_t->mutable_data<int>(platform::CPUPlace()); param_info[0] = static_cast<int>(fp32_start_idx); param_info[1] = static_cast<int>(fp32_local_param_num); param_info[2] = static_cast<int>(fp32_infos.size()); param_info[3] = ClipByBound<int>(fp32_wd_end_idx, fp32_start_idx, fp32_start_idx + fp32_local_param_num) - static_cast<int>(fp32_start_idx); param_info[4] = static_cast<int>(fp16_start_idx + fp32_infos.size()); param_info[5] = static_cast<int>(fp16_local_param_num); param_info[6] = static_cast<int>(fp16_infos.size()); param_info[7] = ClipByBound<int>(fp16_wd_end_idx, fp16_start_idx, fp16_start_idx + fp16_local_param_num) - static_cast<int>(fp16_start_idx); VLOG(10) << "Start FP32 idx: " << param_info[0]; VLOG(10) << "Local FP32 param num: " << param_info[1]; VLOG(10) << "Global FP32 param num: " << param_info[2]; VLOG(10) << "Start FP16 idx: " << param_info[4]; VLOG(10) << "Local FP16 param num: " << param_info[5]; VLOG(10) << "Global FP16 param num: " << param_info[6]; std::vector<int> numel_offsets; numel_offsets.reserve(params.size() + 1); for (const auto &info : fp32_infos) { numel_offsets.push_back(info.numel_offset); } for (const auto &info : fp16_infos) { numel_offsets.push_back(info.numel_offset + fp16_numel_offset); } numel_offsets.push_back(fp32_numel + fp16_numel); PADDLE_ENFORCE_EQ(numel_offsets.size(), params.size() + 1, platform::errors::InvalidArgument( "The numel_offsets number must be one larger than " "the parameter number.")); VLOG(10) << "Total numel offset: " << FlattenToString(numel_offsets); std::vector<int> fp32_partial_numel_offsets; fp32_partial_numel_offsets.reserve(fp32_local_param_num + 1); fp32_partial_numel_offsets.push_back(0); // Fill the partial_numel_offsets for (size_t i = fp32_start_idx; i < fp32_start_idx + fp32_local_param_num; ++i) { size_t valid_start_n = 0; if (i == fp32_start_idx) { valid_start_n = fp32_start_numel_offset; } size_t end_n = fp32_infos[i].numel_with_padding; if (i + 1 == fp32_start_idx + fp32_local_param_num) { end_n = ::min(end_n, fp32_end_numel_offset); } PADDLE_ENFORCE_NE(valid_start_n, end_n, platform::errors::InvalidArgument( "Indices sharding error. This may be a bug.")); VLOG(10) << "FP32 Partial numel = [" << valid_start_n + fp32_infos[i].numel << "," << end_n + fp32_infos[i].numel; auto len = end_n - valid_start_n; fp32_partial_numel_offsets.push_back(fp32_partial_numel_offsets.back() + len); } std::vector<int> fp16_partial_numel_offsets; fp16_partial_numel_offsets.reserve(fp16_local_param_num + 1); fp16_partial_numel_offsets.push_back(0); for (size_t i = fp16_start_idx; i < fp16_start_idx + fp16_local_param_num; ++i) { size_t valid_start_n = 0; if (i == fp16_start_idx) { valid_start_n = fp16_start_numel_offset; } size_t end_n = fp16_infos[i].numel_with_padding; if (i + 1 == fp16_start_idx + fp16_local_param_num) { end_n = ::min(end_n, fp16_end_numel_offset); } PADDLE_ENFORCE_NE(valid_start_n, end_n, platform::errors::InvalidArgument( "Indices sharding error. This may be a bug.")); auto len = end_n - valid_start_n; fp16_partial_numel_offsets.push_back(fp16_partial_numel_offsets.back() + len); } CopyVectorToCPUTensor(numel_offsets, ctx.Output<framework::Tensor>("FusedParamOffsets")); CopyVectorToCPUTensor( fp32_partial_numel_offsets, ctx.Output<framework::Tensor>("FP32ShardFusedParamOffsets")); CopyVectorToCPUTensor( fp16_partial_numel_offsets, ctx.Output<framework::Tensor>("FP16ShardFusedParamOffsets")); auto *global_scale = ctx.Output<framework::Tensor>("GlobalScale"); if (!global_scale->IsInitialized()) { TensorFillConstant<float>(dev_ctx, global_scale, {1}, 1.0f); } VLOG(10) << "Init global scale ends"; dev_ctx.Wait(); VLOG(10) << "Wait for H2D copy"; } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( distributed_fused_lamb_init, ops::DistributedFusedLambInitOpKernel<plat::CUDADeviceContext, float>);
fb3cbcee0751c6e8f6cac98802f2e7474d504bb5.cu
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/optimizers/cast_with_ptr.h" #include "paddle/fluid/operators/optimizers/distributed_fused_lamb_init_op.h" #include "paddle/fluid/operators/tensor_to_string.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/algorithm.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { struct ParamGradInfo { framework::Tensor *param_t{nullptr}; framework::Tensor *grad_t{nullptr}; size_t idx{0}; size_t numel{0}; size_t numel_with_padding{0}; size_t numel_offset{0}; }; static std::ostream &operator<<(std::ostream &os, const ParamGradInfo &info) { return os << "{Param(" << info.param_t << "),Grad(" << info.grad_t << "),idx(" << info.idx << "),numel(" << info.numel << "),numel_with_padding(" << info.numel_with_padding << "),numel_offset(" << info.numel_offset << "),padding(" << info.numel_offset + info.numel_with_padding << "-" << info.numel_offset + info.numel << "=" << info.numel_with_padding - info.numel << ")}"; } struct ParamGradInfoNumelOffsetCompFunctor { bool operator()(const ParamGradInfo &x, const ParamGradInfo &y) const { return x.numel_offset < y.numel_offset; } bool operator()(const ParamGradInfo &x, size_t y) const { return x.numel_offset < y; } bool operator()(size_t x, const ParamGradInfo &y) const { return x < y.numel_offset; } bool operator()(size_t x, size_t y) const { return x < y; } }; static size_t GetAlignSize(size_t n, size_t alignment) { auto remainder = n % alignment; return remainder == 0 ? n : n + alignment - remainder; } // Shard the ParamGradInfo list by the numel size [start_size, end_size) // The final results should be: // // start_size = sum(infos[0:i].numel_with_padding) + start_numel_offset, where // start_numel_offset <= infos[i].numel_with_padding // // end_size = sum(infos[0:j].numel_with_padding) + end_numel_offset, where // end_numel_offset <= infos[j].numel_with_padding static void GetParamGradShardInfo(const std::vector<ParamGradInfo> &infos, size_t start_size, size_t end_size, size_t *start_idx, size_t *end_idx, size_t *start_numel_offset, size_t *end_numel_offset) { VLOG(10) << "NumelOffset: " << string::join_strings(infos, ",", [](const ParamGradInfo &info) { return info.numel_offset; }); VLOG(10) << "start_size = " << start_size << " , end_size = " << end_size; if (infos.empty()) { PADDLE_ENFORCE_EQ(start_size, 0, platform::errors::InvalidArgument( "start_size should be 0.")); PADDLE_ENFORCE_EQ(end_size, 0, platform::errors::InvalidArgument( "end_size should be 0.")); *start_idx = 0; *end_idx = 0; *start_numel_offset = 0; *end_numel_offset = 0; return; } PADDLE_ENFORCE_LT(start_size, end_size, platform::errors::InvalidArgument( "start_size should be less than end_size.")); size_t n = infos.size(); ParamGradInfoNumelOffsetCompFunctor comp; auto i = static_cast<size_t>( std::lower_bound(infos.begin(), infos.end(), start_size, comp) - infos.begin()); if (i == n || infos[i].numel_offset != start_size) { PADDLE_ENFORCE_GT( i, 0, platform::errors::InvalidArgument( "Cannot find suitable sharding which is between [%d, %d)", start_size, end_size)); --i; } PADDLE_ENFORCE_LT( i, n, platform::errors::InvalidArgument( "Cannot find suitable sharding which is between [%d, %d)", start_size, end_size)); *start_idx = i; *start_numel_offset = start_size - infos[i].numel_offset; auto j = static_cast<size_t>( std::lower_bound(infos.begin(), infos.end(), end_size, comp) - infos.begin()); *end_idx = j - 1; *end_numel_offset = end_size - infos[j - 1].numel_offset; PADDLE_ENFORCE_GT(*end_numel_offset, 0, platform::errors::InvalidArgument( "Internal error when sharding, this may be a bug " "caused by empty parameter.")); VLOG(10) << "Sharding [start_size=" << start_size << ", end_size=" << end_size << "): " << (*start_idx) << ":" << (*start_numel_offset) << " -> " << (*end_idx) << ":" << (*end_numel_offset); } static size_t FillAlignmentPaddingInfo(std::vector<ParamGradInfo> *infos, size_t alignment, size_t nranks, phi::DataType dtype) { auto sizeof_dtype = paddle::experimental::SizeOf(dtype); PADDLE_ENFORCE_EQ( alignment % sizeof_dtype, 0, platform::errors::InvalidArgument( "The attr(alignment) should be exactly divided by sizeof(T) %d.", sizeof_dtype)); alignment /= sizeof_dtype; size_t total_numel_sum_with_padding = 0; size_t n = infos->size(); for (size_t i = 0; i < n; ++i) { auto &info = (*infos)[i]; size_t numel_with_padding; if (i + 1 == n) { // the total fused numel must be a factor of alignment * nranks numel_with_padding = GetAlignSize(info.numel + total_numel_sum_with_padding, alignment * nranks) - total_numel_sum_with_padding; } else { numel_with_padding = GetAlignSize(info.numel, alignment); } info.numel_with_padding = numel_with_padding; info.numel_offset = total_numel_sum_with_padding; total_numel_sum_with_padding += numel_with_padding; } return total_numel_sum_with_padding; } template <typename T> static T *TensorFillConstant(const platform::CUDADeviceContext &dev_ctx, framework::Tensor *tensor, const framework::DDim &dims, T value) { tensor->Resize(dims); auto *ptr = tensor->mutable_data<T>(dev_ctx.GetPlace()); phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(dev_ctx, tensor, value); return ptr; } static framework::Tensor CastDataForInitedTensor( const platform::CUDADeviceContext &dev_ctx, framework::Tensor *origin, framework::Tensor *fused_out, size_t numel_offset) { PADDLE_ENFORCE_EQ(origin->IsInitialized(), true, platform::errors::InvalidArgument( "The tensor to be cast should be initialized.")); PADDLE_ENFORCE_EQ(fused_out->dtype(), phi::DataType::FLOAT32, platform::errors::InvalidArgument( "The dst tensor to be cast should be FP32 tensor.")); PADDLE_ENFORCE_EQ(origin->dtype(), phi::DataType::FLOAT16, platform::errors::InvalidArgument( "The src tensor to be cast should be FP16 tensor.")); auto *dst = fused_out->data<float>() + numel_offset; auto *src = origin->data<platform::float16>(); auto numel = origin->numel(); LaunchCastKernel(dev_ctx, src, dst, numel); VLOG(10) << "Cast from FP32 -> FP16, range: [" << numel_offset << ", " << numel_offset + numel << ")" << " , total: [0, " << fused_out->numel() << ")"; framework::DDim fused_out_dim = fused_out->dims(); auto fused_out_numel = fused_out->numel(); fused_out->Resize({fused_out_numel}); auto sliced_tensor = fused_out->Slice(numel_offset, numel + numel_offset); fused_out->Resize(fused_out_dim); return sliced_tensor; } static framework::Tensor CopyAndShareBufferForInitedTensor( framework::Tensor *origin, framework::Tensor *fused_out, size_t numel_offset, gpuStream_t stream) { PADDLE_ENFORCE_EQ( origin->IsInitialized(), true, platform::errors::InvalidArgument( "The tensor to be copied and shared data should be initialized.")); auto dtype = fused_out->type(); PADDLE_ENFORCE_EQ(origin->type(), dtype, platform::errors::InvalidArgument( "The tensor to be copied and shared data should be " "have the same data type.")); auto place = fused_out->place(); PADDLE_ENFORCE_EQ( origin->place(), place, platform::errors::InvalidArgument("The tensor to be copied and shared " "data should be have the same place.")); PADDLE_ENFORCE_EQ( platform::is_gpu_place(place), true, platform::errors::InvalidArgument( "The tensor to be copied and shared data should be on GPU place.")); auto numel = origin->numel(); framework::DDim fused_out_dim = fused_out->dims(); auto fused_out_numel = fused_out->numel(); auto sliced_tensor = fused_out->Resize({fused_out_numel}) .Slice(numel_offset, numel + numel_offset); memory::Copy(place, sliced_tensor.data(), place, origin->data(), numel * paddle::experimental::SizeOf(dtype), stream); origin->ShareBufferWith(sliced_tensor); fused_out->Resize(fused_out_dim); VLOG(10) << "Copy and share buffer, range: [" << numel_offset << ", " << numel_offset + numel << ") , total: [0, " << fused_out->numel() << ") , dtype = " << dtype; return sliced_tensor; } static void ShareBufferForNonInitedTensor(framework::Tensor *origin, framework::Tensor *fused_out, size_t numel_offset, const framework::DDim &dims) { PADDLE_ENFORCE_EQ( origin->IsInitialized(), false, platform::errors::InvalidArgument( "The tensor to be shared data should not be initialized.")); framework::DDim fused_out_dim = fused_out->dims(); auto fused_out_numel = fused_out->numel(); auto numel = phi::product(dims); *origin = fused_out->Resize({fused_out_numel}) .Slice(numel_offset, numel + numel_offset); origin->Resize(dims); fused_out->Resize(fused_out_dim); VLOG(10) << "Share buffer for non-inited, range: [" << numel_offset << ", " << numel_offset + numel << "), total: [0, " << fused_out->numel() << ") , dtype = " << fused_out->dtype(); } template <typename T> static void CopyVectorToCPUTensor(const std::vector<T> &src, framework::Tensor *dst) { dst->Resize({static_cast<int64_t>(src.size())}); T *dst_ptr = dst->mutable_data<T>(platform::CPUPlace()); const T *src_ptr = src.data(); auto nbytes = src.size() * sizeof(T); std::memcpy(dst_ptr, src_ptr, nbytes); } static size_t ReorderParamGradInfoList(const std::vector<int> &flags, std::vector<ParamGradInfo> *infos) { size_t n = infos->size(); std::vector<int> cur_flags; cur_flags.reserve(n); for (size_t i = 0; i < n; ++i) { auto idx = (*infos)[i].idx; cur_flags.push_back(flags[idx]); } auto origin_infos = *infos; size_t j = 0; for (size_t i = 0; i < n; ++i) { if (cur_flags[i]) { (*infos)[j] = origin_infos[i]; ++j; } } size_t ret_idx = j; for (size_t i = 0; i < n; ++i) { if (!cur_flags[i]) { (*infos)[j] = origin_infos[i]; ++j; } } return ret_idx; } template <typename T> static T ClipByBound(T x, T low_value, T high_value) { if (x < low_value) return low_value; if (x > high_value) return high_value; return x; } template <typename T> class DistributedFusedLambInitOpKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { VLOG(10) << "starts to run DistributedFusedLambInitOp"; auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto place = ctx.GetPlace(); auto stream = dev_ctx.stream(); // Step 1: Check Input(Param) and Output(ParamOut), Input(Grad) and // Output(GradOut) auto params = ctx.MultiInput<framework::Tensor>("Param"); auto grads = ctx.MultiInput<framework::Tensor>("Grad"); auto master_params = ctx.MultiOutput<framework::Tensor>("MasterParamOut"); std::vector<ParamGradInfo> fp32_infos, fp16_infos; { PADDLE_ENFORCE_EQ(params.size(), grads.size(), platform::errors::InvalidArgument( "The parameter number and parameter gradient " "number should be the same.")); auto params_out = ctx.MultiOutput<framework::Tensor>("ParamOut"); auto grads_out = ctx.MultiOutput<framework::Tensor>("GradOut"); PADDLE_ENFORCE_EQ( params.size(), params_out.size(), platform::errors::InvalidArgument("Input(Param) and Output(ParamOut) " "should have the same number.")); PADDLE_ENFORCE_EQ( grads.size(), grads_out.size(), platform::errors::InvalidArgument( "Input(Grad) and Output(GradOut) should have the same number.")); size_t n = params.size(); VLOG(10) << "parameter number: " << n; for (size_t i = 0; i < n; ++i) { auto *p = params[i]; auto *g = grads[i]; auto *p_out = params_out[i]; auto *g_out = grads_out[i]; PADDLE_ENFORCE_NOT_NULL( p, platform::errors::InvalidArgument( "The %d-th parameter should not be nullptr.", i)); PADDLE_ENFORCE_EQ(p->IsInitialized(), true, platform::errors::InvalidArgument( "The %d-th parameter should be initialized.", i)); PADDLE_ENFORCE_EQ( p->place(), place, platform::errors::InvalidArgument( "The %d-th parameter is not initialized on the right place.", i)); PADDLE_ENFORCE_EQ(p, p_out, platform::errors::InvalidArgument( "The %d-th Input(Param) and Output(ParamOut) " "should be the same tensor.", i)); auto dtype = p->dtype(); PADDLE_ENFORCE_NOT_NULL( g, platform::errors::InvalidArgument( "The %d-th gradient should not be nullptr.", i)); PADDLE_ENFORCE_EQ(g, g_out, platform::errors::InvalidArgument( "The %d-th Input(Grad) and Output(Grad) should " "be the same tensor.")); auto numel = p->numel(); PADDLE_ENFORCE_GT(numel, 0, platform::errors::InvalidArgument( "The %d-th Input(Param) have no elements.")); void *g_data = nullptr; if (g->IsInitialized()) { PADDLE_ENFORCE_EQ(g->dtype(), dtype, platform::errors::InvalidArgument( "The %d-th Input(Param) and Input(Grad) should " "have the same data type %s.", i, dtype)); PADDLE_ENFORCE_EQ(g->dims(), p->dims(), platform::errors::InvalidArgument( "The %d-th Input(Param) and Input(Grad) should " "have the same shape.", i)); g_data = g_out->data(); } ParamGradInfo *info; if (dtype == phi::DataType::FLOAT32) { fp32_infos.emplace_back(); info = &fp32_infos.back(); } else if (dtype == phi::DataType::FLOAT16) { fp16_infos.emplace_back(); info = &fp16_infos.back(); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Unsupported data type %s.", dtype)); } VLOG(10) << "Found " << dtype << " parameter " << i << " shape=[" << p_out->dims() << "] numel=" << numel << " grad.IsInitialized()=" << (g_out->IsInitialized() ? "true" : "false"); info->param_t = p_out; info->grad_t = g_out; info->idx = i; info->numel = numel; info->numel_with_padding = 0; // not determined yet info->numel_offset = 0; // not determined yet } } const auto &apply_weight_decay = ctx.Attr<std::vector<int>>("apply_weight_decay"); size_t fp32_wd_end_idx = ReorderParamGradInfoList(apply_weight_decay, &fp32_infos); size_t fp16_wd_end_idx = ReorderParamGradInfoList(apply_weight_decay, &fp16_infos); auto *param_order_t = ctx.Output<framework::Tensor>("ParamOrder"); auto param_num = fp32_infos.size() + fp16_infos.size(); param_order_t->Resize({static_cast<int16_t>(param_num)}); auto *param_order = param_order_t->mutable_data<int>(platform::CPUPlace()); for (size_t i = 0; i < fp32_infos.size(); ++i) { param_order[i] = static_cast<int>(fp32_infos[i].idx); } for (size_t i = 0; i < fp16_infos.size(); ++i) { param_order[i + fp32_infos.size()] = static_cast<int>(fp16_infos[i].idx); } VLOG(10) << "Fill ParamGradInfo ends"; // Step 2: determine the numel_with_padding and numel_offset auto rank = ctx.Attr<int>("rank"); auto nranks = ctx.Attr<int>("nranks"); auto alignment = ctx.Attr<int>("alignment"); VLOG(10) << "rank = " << rank << ", nranks = " << nranks << " , alignment = " << alignment; if (alignment <= 0) { alignment = platform::GpuMinChunkSize(); } PADDLE_ENFORCE_GE(alignment, 1, platform::errors::InvalidArgument( "The attr(alignment) should be larger than 0.")); PADDLE_ENFORCE_EQ(alignment & (alignment - 1), 0, platform::errors::InvalidArgument( "The attr(alignment) should be the power of 2.")); PADDLE_ENFORCE_GE( rank, 0, platform::errors::InvalidArgument( "The attr(rank) should be equal to or larger than 0.")); PADDLE_ENFORCE_LT( rank, nranks, platform::errors::InvalidArgument( "The attr(rank) should be less than the attr(nranks).")); // NOTE: We guarantee that both fp32_numel and fp16_numel can be exactly // divided by alignment and nranks. auto fp32_numel = FillAlignmentPaddingInfo(&fp32_infos, alignment, nranks, phi::DataType::FLOAT32); VLOG(10) << "FP32 ParamGradInfo: " << string::join_strings(fp32_infos, " "); auto fp16_numel = FillAlignmentPaddingInfo(&fp16_infos, alignment, nranks, phi::DataType::FLOAT16); VLOG(10) << "FP16 ParamGradInfo: " << string::join_strings(fp16_infos, " "); auto total_numel = fp32_numel + fp16_numel; PADDLE_ENFORCE_LT( total_numel, std::numeric_limits<int>::max(), platform::errors::InvalidArgument("Too many parameter number.")); auto fp32_numel_each_device = fp32_numel / nranks; auto fp16_numel_each_device = fp16_numel / nranks; auto numel_each_device = fp32_numel_each_device + fp16_numel_each_device; VLOG(10) << "Fill padding ends. total_numel = " << total_numel << ", fp32_numel = " << fp32_numel << ", fp16_numel = " << fp16_numel << ", fp32_numel_each_device = " << fp32_numel_each_device << ", fp16_numel_each_device = " << fp16_numel_each_device; // Step 3: allocate output tensor and do initialization float *fused_fp32_param = nullptr, *fused_fp32_grad = nullptr; platform::float16 *fused_fp16_param = nullptr, *fused_fp16_grad = nullptr; framework::Tensor *fp32_p_t = nullptr, *fp16_p_t = nullptr, *fp32_g_t = nullptr, *fp16_g_t = nullptr; std::vector<framework::Tensor *> fp16_master_params; if (total_numel > 0) { fp32_p_t = ctx.Output<framework::Tensor>("FP32FusedParam"); fused_fp32_param = TensorFillConstant<float>( dev_ctx, fp32_p_t, {static_cast<int64_t>(total_numel)}, 0.0f); } if (fp32_numel > 0) { fp32_g_t = ctx.Output<framework::Tensor>("FP32FusedGrad"); fused_fp32_grad = TensorFillConstant<float>( dev_ctx, fp32_g_t, {static_cast<int64_t>(fp32_numel)}, 0.0f); } if (fp16_numel > 0) { fp16_p_t = ctx.Output<framework::Tensor>("FP16FusedParam"); fused_fp16_param = TensorFillConstant<platform::float16>( dev_ctx, fp16_p_t, {static_cast<int64_t>(fp16_numel)}, static_cast<platform::float16>(0)); fp16_g_t = ctx.Output<framework::Tensor>("FP16FusedGrad"); fused_fp16_grad = TensorFillConstant<platform::float16>( dev_ctx, fp16_g_t, {static_cast<int64_t>(fp16_numel)}, static_cast<platform::float16>(0)); } VLOG(10) << "Allocate FP32FusedParam/Grad, FP16FusedParam/Grad ends"; // (1) For FP32FusedParam, memcpy for fp32 param and then share data, cast // for fp16 master weight // (2) For FP16FusedParam, memcpy and then share data // (3) For FP32FusedGrad/FP16FusedGrad, memcpy if gradient has been inited for (const auto &info : fp32_infos) { auto sliced_tensor = CopyAndShareBufferForInitedTensor( info.param_t, fp32_p_t, info.numel_offset, stream); master_params[info.idx]->Resize(info.param_t->dims()); master_params[info.idx]->ShareBufferWith(sliced_tensor); PADDLE_ENFORCE_EQ(master_params[info.idx]->mutable_data<float>(place), sliced_tensor.data<float>(), platform::errors::InvalidArgument( "Invalid master weight tensor pointer.")); if (info.grad_t->IsInitialized()) { CopyAndShareBufferForInitedTensor(info.grad_t, fp32_g_t, info.numel_offset, stream); } else { ShareBufferForNonInitedTensor(info.grad_t, fp32_g_t, info.numel_offset, info.param_t->dims()); } } size_t fp16_numel_offset = 0; if (fp32_numel > 0) { auto last_fp32_info = fp32_infos.back(); fp16_numel_offset = last_fp32_info.numel_offset + last_fp32_info.numel_with_padding; } for (const auto &info : fp16_infos) { auto master_weight_offset = info.numel_offset + fp16_numel_offset; auto sliced_tensor = CastDataForInitedTensor( dev_ctx, info.param_t, fp32_p_t, master_weight_offset); master_params[info.idx]->Resize(info.param_t->dims()); master_params[info.idx]->ShareBufferWith(sliced_tensor); CopyAndShareBufferForInitedTensor(info.param_t, fp16_p_t, info.numel_offset, stream); PADDLE_ENFORCE_EQ(master_params[info.idx]->mutable_data<float>(place), sliced_tensor.data<float>(), platform::errors::InvalidArgument( "Invalid master weight tensor pointer.")); if (info.grad_t->IsInitialized()) { CopyAndShareBufferForInitedTensor(info.grad_t, fp16_g_t, info.numel_offset, stream); } else { ShareBufferForNonInitedTensor(info.grad_t, fp16_g_t, info.numel_offset, info.param_t->dims()); } } VLOG(10) << "Copy/share data for Param/Grad ends"; // Step 4: For Moment1, Moment2, Beta1Pow, Beta2Pow, just fill constant TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Moment1"), {static_cast<int64_t>(numel_each_device)}, 0.0f); TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Moment2"), {static_cast<int64_t>(numel_each_device)}, 0.0f); TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Beta1Pow"), {1}, ctx.Attr<float>("beta1")); TensorFillConstant<float>(dev_ctx, ctx.Output<framework::Tensor>("Beta2Pow"), {1}, ctx.Attr<float>("beta2")); VLOG(10) << "Init Moment and BetaPow ends"; // Step 5: Do sharding size_t fp32_start_idx, fp32_end_idx, fp32_start_numel_offset, fp32_end_numel_offset; GetParamGradShardInfo(fp32_infos, rank * fp32_numel_each_device, (rank + 1) * fp32_numel_each_device, &fp32_start_idx, &fp32_end_idx, &fp32_start_numel_offset, &fp32_end_numel_offset); size_t fp16_start_idx, fp16_end_idx, fp16_start_numel_offset, fp16_end_numel_offset; GetParamGradShardInfo(fp16_infos, rank * fp16_numel_each_device, (rank + 1) * fp16_numel_each_device, &fp16_start_idx, &fp16_end_idx, &fp16_start_numel_offset, &fp16_end_numel_offset); size_t fp32_local_param_num = fp32_numel_each_device > 0 ? fp32_end_idx - fp32_start_idx + 1 : 0; size_t fp16_local_param_num = fp16_numel_each_device > 0 ? fp16_end_idx - fp16_start_idx + 1 : 0; size_t total_local_param_num = fp32_local_param_num + fp16_local_param_num; VLOG(10) << "Found the sharding arguments"; auto *param_info_t = ctx.Output<framework::Tensor>("ParamInfo"); param_info_t->Resize({8}); auto *param_info = param_info_t->mutable_data<int>(platform::CPUPlace()); param_info[0] = static_cast<int>(fp32_start_idx); param_info[1] = static_cast<int>(fp32_local_param_num); param_info[2] = static_cast<int>(fp32_infos.size()); param_info[3] = ClipByBound<int>(fp32_wd_end_idx, fp32_start_idx, fp32_start_idx + fp32_local_param_num) - static_cast<int>(fp32_start_idx); param_info[4] = static_cast<int>(fp16_start_idx + fp32_infos.size()); param_info[5] = static_cast<int>(fp16_local_param_num); param_info[6] = static_cast<int>(fp16_infos.size()); param_info[7] = ClipByBound<int>(fp16_wd_end_idx, fp16_start_idx, fp16_start_idx + fp16_local_param_num) - static_cast<int>(fp16_start_idx); VLOG(10) << "Start FP32 idx: " << param_info[0]; VLOG(10) << "Local FP32 param num: " << param_info[1]; VLOG(10) << "Global FP32 param num: " << param_info[2]; VLOG(10) << "Start FP16 idx: " << param_info[4]; VLOG(10) << "Local FP16 param num: " << param_info[5]; VLOG(10) << "Global FP16 param num: " << param_info[6]; std::vector<int> numel_offsets; numel_offsets.reserve(params.size() + 1); for (const auto &info : fp32_infos) { numel_offsets.push_back(info.numel_offset); } for (const auto &info : fp16_infos) { numel_offsets.push_back(info.numel_offset + fp16_numel_offset); } numel_offsets.push_back(fp32_numel + fp16_numel); PADDLE_ENFORCE_EQ(numel_offsets.size(), params.size() + 1, platform::errors::InvalidArgument( "The numel_offsets number must be one larger than " "the parameter number.")); VLOG(10) << "Total numel offset: " << FlattenToString(numel_offsets); std::vector<int> fp32_partial_numel_offsets; fp32_partial_numel_offsets.reserve(fp32_local_param_num + 1); fp32_partial_numel_offsets.push_back(0); // Fill the partial_numel_offsets for (size_t i = fp32_start_idx; i < fp32_start_idx + fp32_local_param_num; ++i) { size_t valid_start_n = 0; if (i == fp32_start_idx) { valid_start_n = fp32_start_numel_offset; } size_t end_n = fp32_infos[i].numel_with_padding; if (i + 1 == fp32_start_idx + fp32_local_param_num) { end_n = std::min(end_n, fp32_end_numel_offset); } PADDLE_ENFORCE_NE(valid_start_n, end_n, platform::errors::InvalidArgument( "Indices sharding error. This may be a bug.")); VLOG(10) << "FP32 Partial numel = [" << valid_start_n + fp32_infos[i].numel << "," << end_n + fp32_infos[i].numel; auto len = end_n - valid_start_n; fp32_partial_numel_offsets.push_back(fp32_partial_numel_offsets.back() + len); } std::vector<int> fp16_partial_numel_offsets; fp16_partial_numel_offsets.reserve(fp16_local_param_num + 1); fp16_partial_numel_offsets.push_back(0); for (size_t i = fp16_start_idx; i < fp16_start_idx + fp16_local_param_num; ++i) { size_t valid_start_n = 0; if (i == fp16_start_idx) { valid_start_n = fp16_start_numel_offset; } size_t end_n = fp16_infos[i].numel_with_padding; if (i + 1 == fp16_start_idx + fp16_local_param_num) { end_n = std::min(end_n, fp16_end_numel_offset); } PADDLE_ENFORCE_NE(valid_start_n, end_n, platform::errors::InvalidArgument( "Indices sharding error. This may be a bug.")); auto len = end_n - valid_start_n; fp16_partial_numel_offsets.push_back(fp16_partial_numel_offsets.back() + len); } CopyVectorToCPUTensor(numel_offsets, ctx.Output<framework::Tensor>("FusedParamOffsets")); CopyVectorToCPUTensor( fp32_partial_numel_offsets, ctx.Output<framework::Tensor>("FP32ShardFusedParamOffsets")); CopyVectorToCPUTensor( fp16_partial_numel_offsets, ctx.Output<framework::Tensor>("FP16ShardFusedParamOffsets")); auto *global_scale = ctx.Output<framework::Tensor>("GlobalScale"); if (!global_scale->IsInitialized()) { TensorFillConstant<float>(dev_ctx, global_scale, {1}, 1.0f); } VLOG(10) << "Init global scale ends"; dev_ctx.Wait(); VLOG(10) << "Wait for H2D copy"; } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( distributed_fused_lamb_init, ops::DistributedFusedLambInitOpKernel<plat::CUDADeviceContext, float>);
d074db97b704560b910b9b2b72a714c7470c0466.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //#include <cutil_inline.h> #include <assert.h> #include <stdio.h> #include "mergeSort_common.h" inline __device__ void Comparator( uint& keyA, uint& valA, uint& keyB, uint& valB, uint arrowDir ){ uint t; if( (keyA > keyB) == arrowDir ){ t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } __global__ void bitonicSortSharedKernel( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint sortDir ){ //Shared memory storage for one or more short vectors __shared__ uint s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for(uint size = 2; size < arrayLength; size <<= 1){ //Bitonic merge uint dir = (threadIdx.x & (size / 2)) != 0; for(uint stride = size / 2; stride > 0; stride >>= 1){ __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); } } //ddd == sortDir for the last bitonic merge step { for(uint stride = arrayLength / 2; stride > 0; stride >>= 1){ __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], sortDir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //Helper function (also used by odd-even merge sort) extern "C" uint factorRadix2(uint *log2L, uint L){ if(!L){ *log2L = 0; return 0; }else{ for(*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++); return L; } } extern "C" void bitonicSortShared( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint sortDir ){ //Nothing to sort if(arrayLength < 2) return; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert( factorizationRemainder == 1 ); uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; assert(arrayLength <= SHARED_SIZE_LIMIT); assert( (batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0 ); printf("blockCount: %d, threadCount: %d\n", blockCount, threadCount); //bitonicSortSharedKernel<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, sortDir); hipLaunchKernelGGL(( bitonicSortSharedKernel), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, sortDir); //cutilCheckMsg("bitonicSortSharedKernel<<<>>> failed!\n"); } int main() { uint h_SrcKey[SHARED_SIZE_LIMIT]; uint h_SrcVal[SHARED_SIZE_LIMIT]; klee_make_symbolic(h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, "srckey_input"); klee_make_symbolic(h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, "srcval_input"); uint *d_DstKey, *d_DstVal, *d_SrcKey, *d_SrcVal; hipMalloc((void**)&d_DstKey, sizeof(uint) * SHARED_SIZE_LIMIT); hipMalloc((void**)&d_DstVal, sizeof(uint) * SHARED_SIZE_LIMIT); hipMalloc((void**)&d_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT); hipMalloc((void**)&d_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT); hipMemcpy(d_SrcKey, h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, hipMemcpyHostToDevice); hipMemcpy(d_SrcVal, h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, hipMemcpyHostToDevice); bitonicSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, 1, SHARED_SIZE_LIMIT, 1); }
d074db97b704560b910b9b2b72a714c7470c0466.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //#include <cutil_inline.h> #include <assert.h> #include <stdio.h> #include "mergeSort_common.h" inline __device__ void Comparator( uint& keyA, uint& valA, uint& keyB, uint& valB, uint arrowDir ){ uint t; if( (keyA > keyB) == arrowDir ){ t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } __global__ void bitonicSortSharedKernel( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint sortDir ){ //Shared memory storage for one or more short vectors __shared__ uint s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for(uint size = 2; size < arrayLength; size <<= 1){ //Bitonic merge uint dir = (threadIdx.x & (size / 2)) != 0; for(uint stride = size / 2; stride > 0; stride >>= 1){ __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); } } //ddd == sortDir for the last bitonic merge step { for(uint stride = arrayLength / 2; stride > 0; stride >>= 1){ __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], sortDir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //Helper function (also used by odd-even merge sort) extern "C" uint factorRadix2(uint *log2L, uint L){ if(!L){ *log2L = 0; return 0; }else{ for(*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++); return L; } } extern "C" void bitonicSortShared( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint sortDir ){ //Nothing to sort if(arrayLength < 2) return; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert( factorizationRemainder == 1 ); uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; assert(arrayLength <= SHARED_SIZE_LIMIT); assert( (batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0 ); printf("blockCount: %d, threadCount: %d\n", blockCount, threadCount); //bitonicSortSharedKernel<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, sortDir); bitonicSortSharedKernel<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, sortDir); //cutilCheckMsg("bitonicSortSharedKernel<<<>>> failed!\n"); } int main() { uint h_SrcKey[SHARED_SIZE_LIMIT]; uint h_SrcVal[SHARED_SIZE_LIMIT]; klee_make_symbolic(h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, "srckey_input"); klee_make_symbolic(h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, "srcval_input"); uint *d_DstKey, *d_DstVal, *d_SrcKey, *d_SrcVal; cudaMalloc((void**)&d_DstKey, sizeof(uint) * SHARED_SIZE_LIMIT); cudaMalloc((void**)&d_DstVal, sizeof(uint) * SHARED_SIZE_LIMIT); cudaMalloc((void**)&d_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT); cudaMalloc((void**)&d_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT); cudaMemcpy(d_SrcKey, h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, cudaMemcpyHostToDevice); cudaMemcpy(d_SrcVal, h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, cudaMemcpyHostToDevice); bitonicSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, 1, SHARED_SIZE_LIMIT, 1); }
6a0c305bf2628799e46046dfca241f52c9b81b12.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA Corporation is strictly prohibited. * * Edited by Marcos Luciano * https://www.github.com/marcoslucianops * */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include <string.h> inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); } __global__ void gpuYoloLayer(const float* input, float* output, const uint gridSize, const uint numOutputClasses, const uint numBBoxes, const uint new_coords, const float scale_x_y) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSize * gridSize; const int bbindex = y_id * gridSize + x_id; float alpha = scale_x_y; float beta = -0.5 * (scale_x_y - 1); if (new_coords == 1) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = pow(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] * 2, 2); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = pow(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] * 2, 2); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]; for (uint i = 0; i < numOutputClasses; ++i) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]; } } else { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); for (uint i = 0; i < numOutputClasses; ++i) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]); } } } __global__ void gpuRegionLayer(const float* input, float* output, const uint gridSize, const uint numOutputClasses, const uint numBBoxes) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSize * gridSize; const int bbindex = y_id * gridSize + x_id; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); float temp = 1.0; int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < numOutputClasses; ++i){ int val = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]; largest = (val>largest) ? val : largest; } for(i = 0; i < numOutputClasses; ++i){ float e = exp(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] / temp - largest / temp); sum += e; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = e; } for(i = 0; i < numOutputClasses; ++i){ output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] /= sum; } } hipError_t cudaYoloLayer(const void* input, void* output, const uint& batchSize, const uint& gridSize, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, hipStream_t stream, const uint modelCoords, const float modelScale, const uint modelType); hipError_t cudaYoloLayer(const void* input, void* output, const uint& batchSize, const uint& gridSize, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, hipStream_t stream, const uint modelCoords, const float modelScale, const uint modelType) { dim3 threads_per_block(16, 16, 4); dim3 number_of_blocks((gridSize / threads_per_block.x) + 1, (gridSize / threads_per_block.y) + 1, (numBBoxes / threads_per_block.z) + 1); if (modelType == 1) { for (unsigned int batch = 0; batch < batchSize; ++batch) { hipLaunchKernelGGL(( gpuYoloLayer), dim3(number_of_blocks), dim3(threads_per_block), 0, stream, reinterpret_cast<const float*>(input) + (batch * outputSize), reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses, numBBoxes, modelCoords, modelScale); } } else if (modelType == 0) { for (unsigned int batch = 0; batch < batchSize; ++batch) { hipLaunchKernelGGL(( gpuRegionLayer), dim3(number_of_blocks), dim3(threads_per_block), 0, stream, reinterpret_cast<const float*>(input) + (batch * outputSize), reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses, numBBoxes); } } return hipGetLastError(); }
6a0c305bf2628799e46046dfca241f52c9b81b12.cu
/* * Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA Corporation is strictly prohibited. * * Edited by Marcos Luciano * https://www.github.com/marcoslucianops * */ #include <cuda.h> #include <cuda_runtime.h> #include <stdint.h> #include <stdio.h> #include <string.h> inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); } __global__ void gpuYoloLayer(const float* input, float* output, const uint gridSize, const uint numOutputClasses, const uint numBBoxes, const uint new_coords, const float scale_x_y) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSize * gridSize; const int bbindex = y_id * gridSize + x_id; float alpha = scale_x_y; float beta = -0.5 * (scale_x_y - 1); if (new_coords == 1) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = pow(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] * 2, 2); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = pow(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] * 2, 2); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]; for (uint i = 0; i < numOutputClasses; ++i) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]; } } else { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * alpha + beta; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); for (uint i = 0; i < numOutputClasses; ++i) { output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]); } } } __global__ void gpuRegionLayer(const float* input, float* output, const uint gridSize, const uint numOutputClasses, const uint numBBoxes) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSize * gridSize; const int bbindex = y_id * gridSize + x_id; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); float temp = 1.0; int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < numOutputClasses; ++i){ int val = input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]; largest = (val>largest) ? val : largest; } for(i = 0; i < numOutputClasses; ++i){ float e = exp(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] / temp - largest / temp); sum += e; output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = e; } for(i = 0; i < numOutputClasses; ++i){ output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] /= sum; } } cudaError_t cudaYoloLayer(const void* input, void* output, const uint& batchSize, const uint& gridSize, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream, const uint modelCoords, const float modelScale, const uint modelType); cudaError_t cudaYoloLayer(const void* input, void* output, const uint& batchSize, const uint& gridSize, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream, const uint modelCoords, const float modelScale, const uint modelType) { dim3 threads_per_block(16, 16, 4); dim3 number_of_blocks((gridSize / threads_per_block.x) + 1, (gridSize / threads_per_block.y) + 1, (numBBoxes / threads_per_block.z) + 1); if (modelType == 1) { for (unsigned int batch = 0; batch < batchSize; ++batch) { gpuYoloLayer<<<number_of_blocks, threads_per_block, 0, stream>>>( reinterpret_cast<const float*>(input) + (batch * outputSize), reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses, numBBoxes, modelCoords, modelScale); } } else if (modelType == 0) { for (unsigned int batch = 0; batch < batchSize; ++batch) { gpuRegionLayer<<<number_of_blocks, threads_per_block, 0, stream>>>( reinterpret_cast<const float*>(input) + (batch * outputSize), reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses, numBBoxes); } } return cudaGetLastError(); }
380f03656ce0b466d20bff291096fb49f5df0b8d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <torch/extension.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 128 __host__ __device__ float bits_to_bound(int num_bits, int is_unsigned) { float bound = (1 << (num_bits - 1 + int(is_unsigned))) - 1; return bound; } template <typename T> __device__ T fake_tensor_quant_device(T input, T amax, int bound); template <> __device__ float fake_tensor_quant_device(float input, float amax, int bound) { float scale = bound / amax; float output = round(input * scale); output = output > bound ? bound : output; output = output < -bound ? -bound : output; return output / scale; } template <> __device__ at::Half fake_tensor_quant_device(at::Half input, at::Half amax, int bound) { float output = fake_tensor_quant_device(__half2float(input), __half2float(amax), bound); return __float2half(output); } // Sepcialize double only to pass Aten dispatch macros template <> __device__ double fake_tensor_quant_device(double input, double amax, int bound) { float output = fake_tensor_quant_device(input, amax, bound); return output; } template <typename T> __global__ void fake_tensor_quant_kernel( const T *inputs, size_t n, T *outputs, const T *amax, int num_bits=8, bool is_unsigned=false) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { float bound = bits_to_bound(num_bits, is_unsigned); outputs[tid] = fake_tensor_quant_device(inputs[tid], amax[0], bound); } } void fake_tensor_quant_cuda_inplace(at::Tensor inputs, at::Tensor amax, int num_bits=8, bool is_unsigned=false) { size_t numel = inputs.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputs.type().scalarType(), "fake_tensor_quant_cuda_inplace", [&] { hipLaunchKernelGGL(( fake_tensor_quant_kernel), dim3(numel/BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, inputs.data_ptr<scalar_t>(), numel, inputs.data_ptr<scalar_t>(), amax.data_ptr<scalar_t>(), num_bits, is_unsigned); }); } at::Tensor fake_tensor_quant_cuda(at::Tensor inputs, at::Tensor amax, int num_bits=8, bool is_unsigned=false) { size_t numel = inputs.numel(); auto outputs = torch::zeros_like(inputs); AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputs.type().scalarType(), "fake_tensor_quant_cuda", [&] { hipLaunchKernelGGL(( fake_tensor_quant_kernel), dim3(numel/BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, inputs.data_ptr<scalar_t>(), numel, outputs.data_ptr<scalar_t>(), amax.data_ptr<scalar_t>(), num_bits, is_unsigned); }); return outputs; } __global__ void fake_tensor_quant_with_axis_cuda_kernel( const float *inputs, size_t n, float *outputs, const float *amax, int axis_size, int outer_size, int num_bits=8, bool is_unsigned=false) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float bound = bits_to_bound(num_bits, is_unsigned); for (int idx = 4 * tid; idx < 4 * (tid + 1) && idx < n; ++idx) { int axis_idx = (idx / outer_size) % axis_size; outputs[idx] = fake_tensor_quant_device(inputs[idx], amax[axis_idx], bound); } } at::Tensor fake_tensor_quant_with_axis_cuda( at::Tensor inputs, at::Tensor amax, int axis, int num_bits=8, bool is_unsigned=false) { auto outputs = torch::empty_like(inputs); size_t numel = inputs.numel(); int axis_size = inputs.size(axis); int outer_size = 1; for (int i = axis + 1; i < inputs.dim(); ++i) { outer_size *= inputs.size(i); } hipLaunchKernelGGL(( fake_tensor_quant_with_axis_cuda_kernel), dim3(numel / (BLOCK_SIZE*4) + 1), dim3(BLOCK_SIZE), 0, 0, inputs.data_ptr<float>(), numel, outputs.data_ptr<float>(), amax.data_ptr<float>(), axis_size, outer_size, num_bits, is_unsigned); return outputs; }
380f03656ce0b466d20bff291096fb49f5df0b8d.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <torch/extension.h> #include <math.h> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #define BLOCK_SIZE 128 __host__ __device__ float bits_to_bound(int num_bits, int is_unsigned) { float bound = (1 << (num_bits - 1 + int(is_unsigned))) - 1; return bound; } template <typename T> __device__ T fake_tensor_quant_device(T input, T amax, int bound); template <> __device__ float fake_tensor_quant_device(float input, float amax, int bound) { float scale = bound / amax; float output = round(input * scale); output = output > bound ? bound : output; output = output < -bound ? -bound : output; return output / scale; } template <> __device__ at::Half fake_tensor_quant_device(at::Half input, at::Half amax, int bound) { float output = fake_tensor_quant_device(__half2float(input), __half2float(amax), bound); return __float2half(output); } // Sepcialize double only to pass Aten dispatch macros template <> __device__ double fake_tensor_quant_device(double input, double amax, int bound) { float output = fake_tensor_quant_device(input, amax, bound); return output; } template <typename T> __global__ void fake_tensor_quant_kernel( const T *inputs, size_t n, T *outputs, const T *amax, int num_bits=8, bool is_unsigned=false) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { float bound = bits_to_bound(num_bits, is_unsigned); outputs[tid] = fake_tensor_quant_device(inputs[tid], amax[0], bound); } } void fake_tensor_quant_cuda_inplace(at::Tensor inputs, at::Tensor amax, int num_bits=8, bool is_unsigned=false) { size_t numel = inputs.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputs.type().scalarType(), "fake_tensor_quant_cuda_inplace", [&] { fake_tensor_quant_kernel<<<numel/BLOCK_SIZE + 1, BLOCK_SIZE>>>( inputs.data_ptr<scalar_t>(), numel, inputs.data_ptr<scalar_t>(), amax.data_ptr<scalar_t>(), num_bits, is_unsigned); }); } at::Tensor fake_tensor_quant_cuda(at::Tensor inputs, at::Tensor amax, int num_bits=8, bool is_unsigned=false) { size_t numel = inputs.numel(); auto outputs = torch::zeros_like(inputs); AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputs.type().scalarType(), "fake_tensor_quant_cuda", [&] { fake_tensor_quant_kernel<<<numel/BLOCK_SIZE + 1, BLOCK_SIZE>>>( inputs.data_ptr<scalar_t>(), numel, outputs.data_ptr<scalar_t>(), amax.data_ptr<scalar_t>(), num_bits, is_unsigned); }); return outputs; } __global__ void fake_tensor_quant_with_axis_cuda_kernel( const float *inputs, size_t n, float *outputs, const float *amax, int axis_size, int outer_size, int num_bits=8, bool is_unsigned=false) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float bound = bits_to_bound(num_bits, is_unsigned); for (int idx = 4 * tid; idx < 4 * (tid + 1) && idx < n; ++idx) { int axis_idx = (idx / outer_size) % axis_size; outputs[idx] = fake_tensor_quant_device(inputs[idx], amax[axis_idx], bound); } } at::Tensor fake_tensor_quant_with_axis_cuda( at::Tensor inputs, at::Tensor amax, int axis, int num_bits=8, bool is_unsigned=false) { auto outputs = torch::empty_like(inputs); size_t numel = inputs.numel(); int axis_size = inputs.size(axis); int outer_size = 1; for (int i = axis + 1; i < inputs.dim(); ++i) { outer_size *= inputs.size(i); } fake_tensor_quant_with_axis_cuda_kernel<<<numel / (BLOCK_SIZE*4) + 1, BLOCK_SIZE>>>( inputs.data_ptr<float>(), numel, outputs.data_ptr<float>(), amax.data_ptr<float>(), axis_size, outer_size, num_bits, is_unsigned); return outputs; }
56418d17c45fc2e90b11847f820bdd18c971456f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu #include "comm.h" #include "wtime.h" #include <stdio.h> #include "iostream" #define max_thd 256 #define max_block 256 __global__ void warp_merge_kernel ( Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/32; int i = threadIdx.x%32; int p = threadIdx.x/32; long int mycount=0; __shared__ index_t local[max_thd]; __shared__ vertex_t A_diag[33*8]; __shared__ vertex_t B_diag[33*8]; while(tid<Ne){ vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); if(i==0){ A_diag[p*33+32]=m; B_diag[p*33+32]=n; } index_t index = (m+n)/32*i; vertex_t A_top, A_bottom, B_top, Ai, Bi; if(index>m){ A_top = m; B_top = index-m; } else if(index<=m){ A_top = index; B_top = 0; } if(index>n){ A_bottom = index-n; } else if(index<=n){ A_bottom = 0; } while(1){ int offset=(A_top-A_bottom)/2; if(A_top==A_bottom){ A_diag[p*33+i]=A_top; B_diag[p*33+i]=B_top; break; } Ai = A_top - offset; Bi = B_top + offset; if(offset<1){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_diag[p*33+i]=Ai-1; B_diag[p*33+i]=Bi+1; break; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } if(a[Ai]>b[Bi-1]){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_top = Ai-1; B_top = Bi+1; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } else if(a[Ai]<b[Bi-1]){ A_bottom = Ai+1; } else if(a[Ai]==b[Bi-1]){ A_diag[p*33+i]=Ai+1; B_diag[p*33+i]=Bi; break; } } // __syncthreads(); vertex_t lowA = A_diag[p*33+i]; vertex_t lowB = B_diag[p*33+i]; vertex_t highA = A_diag[p*33+i+1]; vertex_t highB = B_diag[p*33+i+1]; vertex_t x,y; while(lowA<highA && lowB<highB){ x=a[lowA]; y=b[lowB]; if(x<y){ lowA++; } else if(x>y){ lowB++; } else if(x==y){ lowA++; lowB++; mycount++; } } // tid += blockDim.x * gridDim.x/32; tid += gridDim.x*blockDim.x/32; // __syncthreads(); } //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } __global__ void block_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ // vertex_t A = head[tid]; // vertex_t B = adj[tid]; vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ //printf("find A %d B %d C %d\n",A,B,X); mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += gridDim.x*blockDim.x/256; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]+=val; count[blockIdx.x]=val; // if(val!=0) // printf("+ %d\n",count[blockIdx.x]); } } __global__ void warp_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; //if(i==0) printf("A %d B %d\n"); index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; //printf("find A %d B %d C %d\n",A,B,X); } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } // tid += GPU_NUM* blockDim.x*gridDim.x/32; tid += blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]=val; count[blockIdx.x]+=val; } __syncthreads(); } __global__ void init_count(index_t* count) { int tid = threadIdx.x; count[tid] = 0; } __global__ void reduce_kernel(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void graph::initDevice(int GPU_id,int Part_id){ //cuda memory copy of partAdj and partBegin // hipSetDevice(GPU_id); hipSetDevice(2); int P=Part_id; H_ERR(hipDeviceSynchronize() ); vertex_t* dev_adj; index_t* dev_begin; index_t* dev_count; Edge* buffer0; Edge* buffer1; index_t EdgeCount = partEdgeCount[P]; vertex_t* Adj = partAdj[P]; index_t* Begin = partBegin[P]; H_ERR(hipMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); H_ERR(hipMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMalloc(&buffer0, BufferSize*sizeof(Edge)) ); H_ERR(hipMalloc(&buffer1, BufferSize*sizeof(Edge)) ); gdata[GPU_id].adj = dev_adj; gdata[GPU_id].begin = dev_begin; gdata[GPU_id].count = dev_count; gdata[GPU_id].EdgeBuffer[0]= buffer0; gdata[GPU_id].EdgeBuffer[1]= buffer1; gdata[GPU_id].partition_id = P; gdata[GPU_id].currentBuffer= 0; hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count); } void graph::DeviceCompute(int GPU_id, index_t Chunk_id){ int P = gdata[GPU_id].partition_id; // if(ds_status[P][Chunk_id]!=0) return; // ds_status[P][Chunk_id]=1; // if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1; //control vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge* buffer =gdata[GPU_id].EdgeBuffer[gdata[GPU_id].currentBuffer%2]; gdata[GPU_id].currentBuffer =1-gdata[GPU_id].currentBuffer; index_t currentBufferSize = BufferSize; if(Chunk_id==upperEdgeCount/BufferSize){ currentBufferSize = upperEdgeCount % BufferSize; } hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count); H_ERR(hipMemcpy(buffer, &OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), hipMemcpyHostToDevice) ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( warp_merge_kernel), dim3(max_block),dim3(max_thd), 0, 0, // warp_binary_kernel, dim3(max_block),dim3(max_thd), 0, 0, buffer, dev_adj, dev_begin, 0, // GPU_id*256*256/32, currentBufferSize, dev_count ); //write the result of this chunk back H_ERR(hipDeviceSynchronize() ); index_t tempcount[max_block]; index_t mycount=0; H_ERR(hipMemcpy(tempcount, dev_count, max_block*sizeof(index_t), hipMemcpyDeviceToHost)); for(int i=0; i<max_block; i++) mycount += tempcount[i]; ds_count[P][Chunk_id] = mycount; } void graph::gpuReduce(int GPU_id){ vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge** buffer =gdata[GPU_id].EdgeBuffer; // H_ERR(hipDeviceSynchronize() ); // reduce_kernel <<<1,max_thd>>>(dev_count); // H_ERR(hipMemcpy(&count[GPU_id], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); // thd_count += count[i]; // count[i] = thd_count; H_ERR(hipFree(dev_adj) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(dev_count) ); H_ERR(hipFree(buffer[0]) ); H_ERR(hipFree(buffer[1]) ); // cout<<"GPU "<<GPU_id<<" finished"<<endl; } void graph::gpuProc(int GPU_id){ double t0 = wtime(); index_t total_count=0; for(int P=0; P<PART_NUM; P++){ // int P = GPU_id/4; // if(PART_NUM > 1) int P = GPU_id%PART_NUM; initDevice(GPU_id,P); index_t chunk_per_gpu = (ChunkNum-1)/GPU_NUM + 1; for(index_t i=GPU_id*chunk_per_gpu; i<(GPU_id+1)*chunk_per_gpu; i++ ){ // for(index_t i=GPU_id; i<ChunkNum; i+=GPU_NUM ){ // for(index_t i=GPU_id; i<ChunkNum; i+= 8 ){ // if(i%8<6) if(i<ChunkNum) DeviceCompute(GPU_id,i); } gpuReduce(GPU_id); total_count += count[GPU_id]; } count[GPU_id] = total_count; double t1 = wtime(); cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl; }
56418d17c45fc2e90b11847f820bdd18c971456f.cu
//scan.cu #include "comm.h" #include "wtime.h" #include <stdio.h> #include "iostream" #define max_thd 256 #define max_block 256 __global__ void warp_merge_kernel ( Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/32; int i = threadIdx.x%32; int p = threadIdx.x/32; long int mycount=0; __shared__ index_t local[max_thd]; __shared__ vertex_t A_diag[33*8]; __shared__ vertex_t B_diag[33*8]; while(tid<Ne){ vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); if(i==0){ A_diag[p*33+32]=m; B_diag[p*33+32]=n; } index_t index = (m+n)/32*i; vertex_t A_top, A_bottom, B_top, Ai, Bi; if(index>m){ A_top = m; B_top = index-m; } else if(index<=m){ A_top = index; B_top = 0; } if(index>n){ A_bottom = index-n; } else if(index<=n){ A_bottom = 0; } while(1){ int offset=(A_top-A_bottom)/2; if(A_top==A_bottom){ A_diag[p*33+i]=A_top; B_diag[p*33+i]=B_top; break; } Ai = A_top - offset; Bi = B_top + offset; if(offset<1){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_diag[p*33+i]=Ai-1; B_diag[p*33+i]=Bi+1; break; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } if(a[Ai]>b[Bi-1]){ if(a[Ai-1]<b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi; break; } else if(a[Ai-1]>b[Bi]){ A_top = Ai-1; B_top = Bi+1; } else if(a[Ai-1]==b[Bi]){ A_diag[p*33+i]=Ai; B_diag[p*33+i]=Bi+1; break; } } else if(a[Ai]<b[Bi-1]){ A_bottom = Ai+1; } else if(a[Ai]==b[Bi-1]){ A_diag[p*33+i]=Ai+1; B_diag[p*33+i]=Bi; break; } } // __syncthreads(); vertex_t lowA = A_diag[p*33+i]; vertex_t lowB = B_diag[p*33+i]; vertex_t highA = A_diag[p*33+i+1]; vertex_t highB = B_diag[p*33+i+1]; vertex_t x,y; while(lowA<highA && lowB<highB){ x=a[lowA]; y=b[lowB]; if(x<y){ lowA++; } else if(x>y){ lowB++; } else if(x==y){ lowA++; lowB++; mycount++; } } // tid += blockDim.x * gridDim.x/32; tid += gridDim.x*blockDim.x/32; // __syncthreads(); } //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } __global__ void block_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ // vertex_t A = head[tid]; // vertex_t B = adj[tid]; vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ //printf("find A %d B %d C %d\n",A,B,X); mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += gridDim.x*blockDim.x/256; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]+=val; count[blockIdx.x]=val; // if(val!=0) // printf("+ %d\n",count[blockIdx.x]); } } __global__ void warp_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; //if(i==0) printf("A %d B %d\n"); index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; //printf("find A %d B %d C %d\n",A,B,X); } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } // tid += GPU_NUM* blockDim.x*gridDim.x/32; tid += blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]=val; count[blockIdx.x]+=val; } __syncthreads(); } __global__ void init_count(index_t* count) { int tid = threadIdx.x; count[tid] = 0; } __global__ void reduce_kernel(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void graph::initDevice(int GPU_id,int Part_id){ //cuda memory copy of partAdj and partBegin // cudaSetDevice(GPU_id); cudaSetDevice(2); int P=Part_id; H_ERR(cudaDeviceSynchronize() ); vertex_t* dev_adj; index_t* dev_begin; index_t* dev_count; Edge* buffer0; Edge* buffer1; index_t EdgeCount = partEdgeCount[P]; vertex_t* Adj = partAdj[P]; index_t* Begin = partBegin[P]; H_ERR(cudaMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); H_ERR(cudaMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMalloc(&buffer0, BufferSize*sizeof(Edge)) ); H_ERR(cudaMalloc(&buffer1, BufferSize*sizeof(Edge)) ); gdata[GPU_id].adj = dev_adj; gdata[GPU_id].begin = dev_begin; gdata[GPU_id].count = dev_count; gdata[GPU_id].EdgeBuffer[0]= buffer0; gdata[GPU_id].EdgeBuffer[1]= buffer1; gdata[GPU_id].partition_id = P; gdata[GPU_id].currentBuffer= 0; init_count <<<1,max_thd>>>(dev_count); } void graph::DeviceCompute(int GPU_id, index_t Chunk_id){ int P = gdata[GPU_id].partition_id; // if(ds_status[P][Chunk_id]!=0) return; // ds_status[P][Chunk_id]=1; // if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1; //control vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge* buffer =gdata[GPU_id].EdgeBuffer[gdata[GPU_id].currentBuffer%2]; gdata[GPU_id].currentBuffer =1-gdata[GPU_id].currentBuffer; index_t currentBufferSize = BufferSize; if(Chunk_id==upperEdgeCount/BufferSize){ currentBufferSize = upperEdgeCount % BufferSize; } init_count <<<1,max_thd>>>(dev_count); H_ERR(cudaMemcpy(buffer, &OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), cudaMemcpyHostToDevice) ); H_ERR(cudaDeviceSynchronize() ); warp_merge_kernel<<<max_block,max_thd>>> // warp_binary_kernel<<<max_block,max_thd>>> ( buffer, dev_adj, dev_begin, 0, // GPU_id*256*256/32, currentBufferSize, dev_count ); //write the result of this chunk back H_ERR(cudaDeviceSynchronize() ); index_t tempcount[max_block]; index_t mycount=0; H_ERR(cudaMemcpy(tempcount, dev_count, max_block*sizeof(index_t), cudaMemcpyDeviceToHost)); for(int i=0; i<max_block; i++) mycount += tempcount[i]; ds_count[P][Chunk_id] = mycount; } void graph::gpuReduce(int GPU_id){ vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge** buffer =gdata[GPU_id].EdgeBuffer; // H_ERR(cudaDeviceSynchronize() ); // reduce_kernel <<<1,max_thd>>>(dev_count); // H_ERR(cudaMemcpy(&count[GPU_id], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); // thd_count += count[i]; // count[i] = thd_count; H_ERR(cudaFree(dev_adj) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(dev_count) ); H_ERR(cudaFree(buffer[0]) ); H_ERR(cudaFree(buffer[1]) ); // cout<<"GPU "<<GPU_id<<" finished"<<endl; } void graph::gpuProc(int GPU_id){ double t0 = wtime(); index_t total_count=0; for(int P=0; P<PART_NUM; P++){ // int P = GPU_id/4; // if(PART_NUM > 1) int P = GPU_id%PART_NUM; initDevice(GPU_id,P); index_t chunk_per_gpu = (ChunkNum-1)/GPU_NUM + 1; for(index_t i=GPU_id*chunk_per_gpu; i<(GPU_id+1)*chunk_per_gpu; i++ ){ // for(index_t i=GPU_id; i<ChunkNum; i+=GPU_NUM ){ // for(index_t i=GPU_id; i<ChunkNum; i+= 8 ){ // if(i%8<6) if(i<ChunkNum) DeviceCompute(GPU_id,i); } gpuReduce(GPU_id); total_count += count[GPU_id]; } count[GPU_id] = total_count; double t1 = wtime(); cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl; }
1f0fa9d235bc39610cecba502b7de515d625ea0e.hip
// !!! This is a file automatically generated by hipify!!! #define __CUDA 1 #include "fargo.h" #undef __CUDA //#include "glcmap.h" #include <GL/glew.h> #include <stdarg.h> #ifdef __APPLE #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif #include <hip/hip_runtime_api.h> #include <cuda_gl_interop.h> #include <helper_cuda.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/extrema.h> #define GL_WINSIZE_X WINSIZE #define GL_WINSIZE_Y WINSIZE // BLOCK_X : in azimuth // #define BLOCK_X DEF_BLOCK_X_GLEXT #define BLOCK_X 8 // BLOCK_Y : in radius #define BLOCK_Y 8 //#define DEBUGGING 1 // [RZS-MOD] // read my own colormaps #include "my_glcmap" unsigned int cmap[256]; char simdir[2048]; //PolarGrid *D2GMass; FILE *avconv = NULL; GLubyte *pixels; bool bFine = false; GLuint gl_PBO, gl_Tex, win; unsigned int *plot_rgba; //extern PolarGrid *FTD; // Field to display int CartesianView = 1, Xsize, Ysize; double minftd = 2e-4, maxftd = 2e-3; int DustGridType = 0; int color_idx = 0; double Zoom=0.05; float DispGamma = 1.0, Top=1.0; int CenterPlanet=1; double FrameRotate = 0.0; static int Update = 1; static int palette_nb = 6, old_palette_nb=6; #ifdef FARGO_INTEGRATION #include "../HIPERION-v1.7/interface_extfuncs.h" double *FineFTD; bool FlowPattern = NO; double FrameRadius = 0.0; //double dust_sizecm[4] = {1e-4,1e-1,1e2,1e5}; //double dust_sizecm[2] = {1e-3,1e-1}; extern int iDustBinNum; // inherited from HIPERION ???? extern double *dDustSize; #endif bool Lock = NO; bool LogDisplay = NO; bool DustParticles = NO; bool DustParticlesonGrid = NO; //bool Dust2GassMassRatio = NO; bool CalcVortensity = NO; bool CalcTemperature = NO; bool CalcDiskHeight = NO; bool CalcSoundSpeed = NO; bool CalcDustGasRatio = NO; bool CalcDustSize = NO; void reset_rgba (int mode, unsigned int *plot, unsigned int clBackground); unsigned int ColBackground = 0; void get_rgba(PolarGrid *FTD, unsigned int *plot, unsigned int *cmap, PlanetarySystem *sys, double ar, float fDispGamma, float fTop, double FrameRotate); void get_fine_rgba(double *FTD, unsigned int *plot, unsigned int *cmap, PlanetarySystem *sys, double ar, float fDispGamma, float fTop, double FrameRotate); void DrawField (); void resize(int w, int h); void keyCB (unsigned char key, int x, int y); void keySpecCB (int key, int x, int y); int xview, yview; char winTitle[1024], winTitleMessage[1024], avconv_str[1024], ppm_filename[1024], movie_filename[1024]; int av_count = 0; int nframes = 0; double delta_rad = 0.0, Shift = 0; // RZS [MOD] // function to get wall-clock timings //---------------------------------- #include <sys/time.h> double get_time2 () { struct timeval Tvalue; struct timezone dummy; gettimeofday(&Tvalue,&dummy); return ((double) Tvalue.tv_sec + 1.e-6*((double) Tvalue.tv_usec)); } //---------------------------------- void getminmax (PolarGrid *var) { int i, j, m, nr, ns; double min=1e30, max=-1e30; double *field; D2H (var); field = var->Field; nr = var->Nrad; ns = var->Nsec; for (i = 0; i < nr; i++) { for (j = 0; j < ns; j++) { m = j+i*ns; if (min > field[m]) min = field[m]; if (max < field[m]) max = field[m]; } } minftd = min; maxftd = max; } // RZS [MOD] // GPU-based minmax finding with thrust //------------------------------------- void get_minmax_gpu(PolarGrid* FTD, double* min, double *max) { // wrap raw pointer with a device_ptr thrust::device_ptr<double> d_ftd(FTD->gpu_field); // use thrust to find the maximum element int nelements = (FTD->Nrad) * (FTD->Nsec); thrust::device_ptr<double> d_ptr_max_r = thrust::max_element(d_ftd, d_ftd + nelements); thrust::device_ptr<double> d_ptr_min_r = thrust::min_element(d_ftd, d_ftd + nelements); // copy the max element from device memory to host memory hipMemcpy((void*)max, (void*)d_ptr_max_r.get(), sizeof(double), hipMemcpyDeviceToHost); hipMemcpy((void*)min, (void*)d_ptr_min_r.get(), sizeof(double), hipMemcpyDeviceToHost); } void get_minmax_fine_gpu(double* FTD, double* min, double *max) { // wrap raw pointer with a device_ptr thrust::device_ptr<double> d_ftd(FTD); // use thrust to find the maximum element int nelements = NRAD * GASOVERSAMPRAD * NSEC * GASOVERSAMPAZIM; thrust::device_ptr<double> d_ptr_max_r = thrust::max_element(d_ftd, d_ftd + nelements); thrust::device_ptr<double> d_ptr_min_r = thrust::min_element(d_ftd, d_ftd + nelements); // copy the max element from device memory to host memory hipMemcpy((void*)max, (void*)d_ptr_max_r.get(), sizeof(double), hipMemcpyDeviceToHost); hipMemcpy((void*)min, (void*)d_ptr_min_r.get(), sizeof(double), hipMemcpyDeviceToHost); } //------------------------------------- __global__ void fill_d2gm (int pitch, int nr, double* d2gm, const double *gas_dens, const double *dust_dens) { const int j = blockIdx.x*blockDim.x + threadIdx.x; const int i = blockIdx.y*blockDim.y + threadIdx.y; const int m = j+i*pitch; d2gm[m] = dust_dens[m]/gas_dens[m]; } /* void EvalD2GM (PolarGrid *D2GMass, PolarGrid *gas_dens, PolarGrid *dust_dens) { dim3 grid; dim3 block = dim3(BLOCK_X, BLOCK_Y); grid.x = NSEC / BLOCK_X; grid.y = NRAD / BLOCK_Y; //int nsec = (float) (FTD->pitch/sizeof(double)); fill_d2gm <<<grid, block>>> (FTD->pitch/sizeof(double), 0, D2GMass->gpu_field, gas_dens->gpu_field, dust_dens->gpu_field); }*/ void screenshot_ppm(const char *filename, unsigned int width, unsigned int height, GLubyte **pixels) { size_t i, j, cur; const size_t format_nchannels = 3; FILE *f = fopen(filename, "w"); fprintf(f, "P3\n%d %d\n%d\n", width, height, 255); *pixels = (GLubyte*) realloc(*pixels, format_nchannels * sizeof(GLubyte) * width * height); glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, *pixels); for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { cur = format_nchannels * ((height - i - 1) * width + j); fprintf(f, "%3d %3d %3d ", (*pixels)[cur], (*pixels)[cur + 1], (*pixels)[cur + 2]); } fprintf(f, "\n"); } fclose(f); } void load_palette () { if (palette_nb > 6) palette_nb = 1; switch (palette_nb) { case 0: memcpy (cmap, cmap0, 256*sizeof(unsigned int)); break; case 1: memcpy (cmap, cmap1, 256*sizeof(unsigned int)); break; case 2: memcpy (cmap, cmap2, 256*sizeof(unsigned int)); break; case 3: memcpy (cmap, cmap3, 256*sizeof(unsigned int)); break; case 4: memcpy (cmap, cmap4, 256*sizeof(unsigned int)); break; case 5: memcpy (cmap, cmap5, 256*sizeof(unsigned int)); break; case 6: memcpy (cmap, cmap6, 256*sizeof(unsigned int)); break; /* case 7: memcpy (cmap, cmap7, 256*sizeof(unsigned int)); break; case 8: memcpy (cmap, cmap8, 256*sizeof(unsigned int)); break; */} } void InitDisplay (int *argc, char **argv) { size_t pitch; memcpy (cmap, cmap1, 256*sizeof(unsigned int)); Xsize = NSEC; Ysize = NRAD; xview = GL_WINSIZE_X; yview = GL_WINSIZE_Y; glutInitWindowSize(xview, yview); glutInitWindowPosition(30, 30); glutInit(argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); if (strcmp(OUTPUTDIR, "./") == 0) getcwd (simdir, 2048); else snprintf(simdir, 2048, "%s", OUTPUTDIR); snprintf(winTitle, 1023, "GFARGO:%s - gas density", simdir); win = glutCreateWindow(winTitle); //Vortens = CreatePolarGrid (NRAD, NSEC, "Vortensity"); //if (DustGrid) // D2GMass = CreatePolarGrid (NRAD, NSEC, "D2GMass"); //if (Adiabatic) // Temperature = CreatePolarGrid (NRAD, NSEC, "Temperature"); // Check for OpenGL extension support if (verbose) printf("Loading OPENGL extensions: %s\n", glewGetErrorString(glewInit())); else glewGetErrorString(glewInit()); if(!glewIsSupported("GL_VERSION_2_0 " "GL_ARB_pixel_buffer_object " "GL_EXT_framebuffer_object ")){ fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return; } // Set up view glClearColor(0.0, 0.0, 0.0, 0.0); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0,Xsize,0.,Ysize, -200.0, 200.0); // Create texture which we use to display the result and bind to gl_Tex glEnable(GL_TEXTURE_2D); glGenTextures(1, &gl_Tex); // Generate 2D texture glBindTexture(GL_TEXTURE_2D, gl_Tex); // bind to gl_Tex // texture properties: glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Xsize, Ysize, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); pitch = NSEC*sizeof(double); // Create pixel buffer object and bind to gl_PBO. We store the data we want to // plot in memory on the graphics card - in a "pixel buffer". We can then // copy this to the texture defined above and send it to the screen glGenBuffers(1, &gl_PBO); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, gl_PBO); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, pitch*Ysize, NULL, GL_STREAM_COPY); checkCudaErrors( hipGLRegisterBufferObject(gl_PBO) ); // define glut functions glutDisplayFunc(DrawField); glutReshapeFunc(resize); glutIdleFunc(DrawField); glutKeyboardFunc(keyCB); glutSpecialFunc(keySpecCB); load_palette (); #ifdef DEBUGGING Paused = 1; #endif } void DisplayLoadDensity () { keyCB ('c', 0, 0); keyCB ('d', 0, 0); keyCB ('s', 0, 0); Zoom = 1.0/RMAX; } #include <unistd.h> static int frameNum = 0; void DrawField () { static int FirstTime = YES, wx, wy; static double previous, actual; //static double ts, te; double ar; // hydro calculation, if not movie generation is requested (option -m) if (!CreatingMovieOnly) Loop (); // just red snapshot file and display it else { ReadfromFile (gas_density, "gas_dens", frameNum); H2D (gas_density); frameNum++; usleep (100000); } #ifdef DEBUGGING Paused = 1; #endif // update window only if its is not turned off //if (Update == -1) // return; wx = glutGet (GLUT_WINDOW_WIDTH); wy = glutGet (GLUT_WINDOW_HEIGHT); ar = (double)wx/(double)wy; actual = clock(); if ((((actual - previous)/CLOCKS_PER_SEC) > (1./RefreshRate)) || (FirstTime == YES) || CreatingMovieOnly) { if (Update >= 0) { FirstTime = NO; #ifdef DEBUGGING get_minmax_gpu (FTD, &minftd, &maxftd); #endif // some additional values must be calculated if requested if (CalcVortensity == YES) CalcVortens_gpu (gas_density, gas_v_rad, gas_v_theta, Work); if (Adiabatic) { if(CalcTemperature) CalcTemp_gpu (gas_density, gas_energy, Work); else if (CalcDiskHeight) CalcDiskHeight_gpu (gas_density, gas_energy, Work); else if (CalcSoundSpeed) CalcSoundSpeed_gpu (gas_density, gas_energy, Work); } if (CalcDustGasRatio && DustGrid) CalcDustGasMassRatio_gpu(gas_density, dust_density[color_idx], Work); if (CalcDustSize) CalcDustSize_gpu(dust_size, dust_density[0], Work); #ifdef FARGO_INTEGRATON // if (Dust2GassMassRatio == YES) // EvalD2GM (D2GMass, gas_density, DustDens); #endif // Apparently there is no need to do a hipMalloc of plot_rgba, the following function does the job checkCudaErrors(hipGLMapBufferObject__((void**)&plot_rgba, gl_PBO)); //get_minmax_gpu (FTD, &minftd, &maxftd); // [RZS-MOD] // lock view to the planet 0 //-------------------------------------- if (Lock) { double lock_rad = sqrt(sys->y[0]*sys->y[0] + sys->x[0]*sys->x[0]); FrameRotate = atan2 (sys->y[0] , sys->x[0]); FrameRotate = (FrameRotate < 0.0 ? FrameRotate + 2.0 * M_PI: FrameRotate); FrameRotate -= (pow(lock_rad*(1.0+delta_rad), -1.5) - pow(lock_rad, -1.5))*PhysicalTime - Shift; FrameRotate = fmod (FrameRotate, 2.0*M_PI); } else FrameRotate = 0.0; //-------------------------------------- // plot filed if (FTD != NULL) { #ifdef FARGO_INTEGRATION if (bFine) get_fine_rgba(FineFTD, plot_rgba, cmap, sys, ar, DispGamma, Top, FrameRotate); else #endif get_rgba(FTD, plot_rgba, cmap, sys, ar, DispGamma, Top, FrameRotate); } // [RZS-MOD] // displaying dust particles //-------------------------- #ifdef FARGO_INTEGRATION if (DustParticles) { double xcenter = 0.0, ycenter = 0.0; if (CenterPlanet == YES) { xcenter = sys->x[0]; ycenter = sys->y[0]; } if (FTD == NULL) field (FlowPattern, plot_rgba, ColBackground); if (CartesianView) HIPERION_DisplayDust (0, NRAD, gas_density->pitch/sizeof(double), 0, 0, 0, 0, RMIN, RMAX, FrameRotate, color_idx, plot_rgba); else HIPERION_DisplayDust (1, Xsize, Ysize, ar, Zoom, Xsize/2, Ysize/2, xcenter, ycenter, FrameRotate, color_idx, plot_rgba); } #endif //-------------------------- checkCudaErrors(hipGLUnmapBufferObject(gl_PBO)); // Copy the pixel buffer to the texture, ready to display glTexSubImage2D(GL_TEXTURE_2D,0,0,0,Xsize,Ysize,GL_RGBA,GL_UNSIGNED_BYTE,0); // Render one quad to the screen and colour it using our texture // i.e. plot our plotvar data to the screen glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_QUADS); glTexCoord2f (0.0, 0.0); glVertex3f (0.0, 0.0, 0.0); glTexCoord2f (1.0, 0.0); glVertex3f (Xsize, 0.0, 0.0); glTexCoord2f (1.0, 1.0); glVertex3f (Xsize, Ysize, 0.0); glTexCoord2f (0.0, 1.0); glVertex3f (0.0, Ysize, 0.0); glEnd(); glutSwapBuffers(); /* save */ if (avconv) { glReadPixels(0, 0, Xsize, Ysize, GL_RGB, GL_UNSIGNED_BYTE, pixels); fwrite(pixels ,Xsize*Ysize*3 , 1, avconv); //add_frame_tomovie (&pixels); } // refesh rate dpending on Update or not if (Update==0) RefreshRate = 1; else RefreshRate = 50; } previous = actual; } } // GLUT special key functions void keySpecCB (int key, int x, int y) { switch (key) { case GLUT_KEY_UP: delta_rad += 0.01; break; case GLUT_KEY_DOWN: delta_rad -= 0.01; break; case GLUT_KEY_LEFT: Shift -= 0.1; if (Shift > 2.0*M_PI) Shift = 0; break; case GLUT_KEY_RIGHT: Shift += 0.1; if (Shift < 0) Shift = 0; break; } } // GLUT normal key functions void keyCB (unsigned char key, int x, int y) { size_t pitch; static int fullscreen = 0, px, py, wx, wy; #ifdef DEBUGGING if (key == ' ') { Paused = 0; } #else // stop simulation if (key == ' ') { Paused = 1 - Paused; } #endif // display control functions //------------------------------------------------------------------------------------------------- // zoom in if (key == '+') { Zoom *= 1.4; if (DustParticles) reset_rgba (0, plot_rgba, ColBackground); } // zoom out if (key == '-') { Zoom /= 1.4; if (DustParticles) reset_rgba (0, plot_rgba, ColBackground); } // centering on planet 0 if (key == 's') { CenterPlanet = 1-CenterPlanet; if (DustParticles) reset_rgba (0, plot_rgba, ColBackground); } // full screen if (key == 'f') { // if video is creating do not change anithing if (avconv) return; fullscreen = 1-fullscreen; if (fullscreen == 1) { px = glutGet (GLUT_WINDOW_X); py = glutGet (GLUT_WINDOW_Y); wx = glutGet (GLUT_WINDOW_WIDTH); wy = glutGet (GLUT_WINDOW_HEIGHT); glutFullScreen(); } if (fullscreen == 0) { glutReshapeWindow(wx, wy); glutPositionWindow(px, py); } } // temporarily turn off real time displaying if (key == 'w') { Update = !Update; if (Update == 0) { old_palette_nb = palette_nb; palette_nb = 0; } else if (Update == 1) { palette_nb = old_palette_nb; if (FTD != NULL) get_minmax_gpu (FTD, &minftd, &maxftd); } load_palette (); } // exit simulation if (key == 27) { printf ("\n"); // close movie if it is creating if (avconv) { pclose(avconv); free (pixels); } exit (0); } // change view type between polar and cartesian if (key == 'c') { // if video is creating do not change anithing if (avconv) return; // no full screen for if (fullscreen == 1) return; CartesianView = 1-CartesianView; // cartesian grid if (CartesianView == 1) { Xsize = NSEC; Ysize = NRAD; xview = NSEC; yview = NRAD; } // polar grid else { Ysize = xview = WINSIZE; Xsize = yview = WINSIZE; } glutReshapeWindow (xview, yview); glutPostRedisplay(); checkCudaErrors(hipGLUnregisterBufferObject (gl_PBO)); glDeleteBuffers (1, &gl_PBO); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Xsize, Ysize, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); pitch = Xsize*sizeof(double); glGenBuffers(1, &gl_PBO); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, gl_PBO); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, pitch*Ysize, NULL, GL_STREAM_COPY); checkCudaErrors( hipGLRegisterBufferObject(gl_PBO) ); glFlush (); } // change color table for hydro if (key == 'p') { palette_nb++; load_palette (); } // lock planet if (key == 'l') { Lock = !Lock; } // logarithmic filed if (key == 'L') { LogDisplay = !LogDisplay; } // increase gamma if (key == '1') { DispGamma /= 1.25; if (DispGamma < 0.0001) DispGamma = 0.0001; } // decrease gamma if (key == '2') { DispGamma *= 1.25; if (DispGamma > 1000) DispGamma = 1000; } // decrease maximum value to plot if (key == '3') { Top /= 1.25; if (Top < 1e-6) Top = 1e-6; } // increase maximum value to plot if (key == '4') { Top *= 1.25; if (Top > 1) Top = 1; } // reset max, min and gamma if (key == '0') { DispGamma = 1.0; Top = 1.0; // recalcalculate requested value if (CalcVortensity == YES) CalcVortens_gpu (gas_density, gas_v_rad, gas_v_theta, Work); if (Adiabatic) { if(CalcTemperature) CalcTemp_gpu (gas_density, gas_energy, Work); else if (CalcDiskHeight) CalcDiskHeight_gpu (gas_density, gas_energy, Work); else if (CalcSoundSpeed) CalcSoundSpeed_gpu (gas_density, gas_energy, Work); } if (CalcDustGasRatio && DustGrid) CalcDustGasMassRatio_gpu(gas_density, dust_density[color_idx], Work); if (CalcDustSize) CalcDustSize_gpu(dust_size, dust_density[0], Work); if (FTD != NULL) { #ifdef FARGO_INTEGRATION if (bFine) get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); else #endif get_minmax_gpu (FTD, &minftd, &maxftd); /* if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } */ } printf ("\n%s min/max: %e/%e\n", FTD->Name, minftd, maxftd); } // change background color if (key == 'I') { // to black if (ColBackground == 255+256*255+256*256*255) ColBackground = 0; //to white else if (ColBackground == 0) ColBackground = 255+256*255+256*256*255; } //------------------------------------------------------------------------------------------------- // select gas field to display //------------------------------------------------------------------------------------------------- // gas surface mass density if (key == 'W') { bFine = false; FTD = myWork; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; get_minmax_gpu (FTD, &minftd, &maxftd); if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } DispGamma = 1.0; Top = 1.0; snprintf(winTitle, 1023, "GFARGO:%s - ???????? %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas surface mass density if (key == 'd') { bFine = false; FTD = gas_density; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; get_minmax_gpu (FTD, &minftd, &maxftd); if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } DispGamma = 1.0; Top = 1.0; snprintf(winTitle, 1023, "GFARGO:%s - gas density %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas radial velocity if (key == 'r') { bFine = false; FTD = gas_v_rad; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; get_minmax_gpu (FTD, &minftd, &maxftd); DispGamma = 1.0; Top = 1.0; snprintf(winTitle, 1023, "GFARGO:%s - gas radial velocity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas azimuthal velocity field if (key == 't') { bFine = false; FTD = gas_v_theta; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas azimuthal velocity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas vortensity /*if (key == 'v') { bFine = false; FTD = Work; DustGridType = 0; CalcVortensity = YES; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcVortens_gpu (gas_density, gas_v_rad, gas_v_theta, Work); get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas vortensity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); }*/ // gas disk eccentricity if (key == 'e') { bFine = false; FTD = disk_ecc; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas disk eccentricity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas disk eccentricity if (key == 'P') { bFine = false; FTD = Potential; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - disk grap pot %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // select adiabatic gas field to display //------------------------------------------------------------------------------------------------- if (Adiabatic || AdaptiveViscosity) { // viscosity field (for adaptive or adiabatic disks) if (key == 'a') { bFine = false; FTD = Viscosity; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - viscosity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } } if (Adiabatic) { // gas internal energy if (key == 'q') { bFine = false; FTD = gas_energy; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas specific energy %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas temperature if (key == 'Q') { bFine = false; FTD = Work; CalcTemperature = YES; DustGridType = 0; CalcVortensity = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; CalcTemp_gpu (gas_density, gas_energy, Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas temperature %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // disk adiabatic height if (key == 'h') { bFine = false; FTD = Work; CalcDiskHeight = YES; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; CalcDiskHeight_gpu (gas_density, gas_energy, Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - disk height %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // disk soundspeed if (key == 'H') { bFine = false; FTD = Work; CalcSoundSpeed = YES; CalcDiskHeight = NO; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; CalcDustSize = NO; CalcSoundSpeed_gpu (gas_density, gas_energy, Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - disk cs %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } } // select dust field to display //------------------------------------------------------------------------------------------------- // dust surface mass density if (DustGrid) { if (key == 'x') { bFine = false; FTD = dust_density[color_idx]; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustGridType = 1; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; CalcDustSize = NO; get_minmax_gpu (FTD, &minftd, &maxftd); if (DustConstStokes) sprintf (winTitle, "GFARGO:%s - dust dens [St=%0.2e] %s", simdir, DustSizeBin[color_idx], winTitleMessage); else if (DustGrowth) if (color_idx == 0) sprintf (winTitle, "GFARGO:%s - grown dust dens %s", simdir, winTitleMessage); else sprintf (winTitle, "GFARGO:%s - small dust dens %s", simdir, winTitleMessage); else sprintf (winTitle, "GFARGO:%s - dust dens [s=%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // dust radial velocity if (key == 'y') { bFine = false; FTD = dust_v_rad[color_idx]; DustGridType = 2; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - dust vrad [%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // dust azimuthal velocity if (key == 'z') { bFine = false; FTD = dust_v_theta[color_idx]; DustGridType = 3; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - dust vth [%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // grown dust size if (DustGrowth) if (key == 'b') { bFine = false; FTD = dust_size; //FTD = Work; DustGridType = 4; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; CalcDustSize = NO; //CalcDustSize = YES; //CalcDustSize_gpu(dust_size, dust_density[0], Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - dust size %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // dust-to-gas mass ratio if (key == 'G') { bFine = false; FTD = Work; DustGridType = 5; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = YES; CalcDustSize = NO; CalcDustGasMassRatio_gpu(gas_density, dust_density[color_idx], Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - Md/Mg [%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // change color palette for dust representation if (key == ']') { if (color_idx < DustBinNum-1) color_idx++; else color_idx=0; switch (DustGridType) { case 1: keyCB ('x', 0,0); break; case 2: keyCB ('y', 0,0); break; case 3: keyCB ('z', 0,0); break; case 4: keyCB ('b', 0,0); break; case 5: keyCB ('G', 0,0); break; } } // change color palette for dust representation if (key == '[') { if (color_idx > 0) color_idx--; else color_idx=DustBinNum-1; switch (DustGridType) { case 1: keyCB ('x', 0,0); break; case 2: keyCB ('y', 0,0); break; case 3: keyCB ('z', 0,0); break; case 4: keyCB ('b', 0,0); break; case 5: keyCB ('G', 0,0); break; } } } //------------------------------------------------------------------------------------------------- #ifdef FARGO_INTEGRATION // HIPERION integration //------------------------------------------------------------------------------------------------- // interpolated surface mass density on the fine grid if (key == 'D') { bFine = true; FineFTD = fine_gas_density; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } snprintf(winTitle, 1023, "GFARGO:%s - gas density (ovrs) %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // interpolated radial gas velocity on the fine grid if (key == 'R') { bFine = true; FineFTD = fine_gas_dv_rad; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas radial velocity (ovrs) %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // interpolated azimuthal gas velocity field on the fine grid if (key == 'T') { bFine = true; FineFTD = fine_gas_dv_theta; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas azimuthal velocity (ovrs) %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // flow pattern of dust if (key == 'n') { FlowPattern = !FlowPattern; if (!FlowPattern) field (0, plot_rgba, ColBackground); } // dust distribution if (key == 'b') { Vortensity = NO; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticles = YES; DustParticlesonGrid = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; sprintf (winTitle, "GFARGO:%s - dust [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); field (0, plot_rgba,ColBackground); FTD = NULL; } // turn off if (key == 'B') { CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; if (FTD != NULL) DustParticles = !DustParticles; palette_nb = 0; load_palette (); } // change color palette for dust representation if (key == ']') { if (color_idx < iDustBinNum-1) color_idx++; else color_idx=0; if (DustParticles) { sprintf (winTitle, "GFARGO:%s - dust [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } } if (key == '[') { if (color_idx > 0) color_idx--; else color_idx=iDustBinNum-1; if (DustParticles) { sprintf (winTitle, "GFARGO:%s - dust [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } } #ifdef DUST_FEEDBACK if (key == 'g') { DustParticles = NO; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = YES; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; FTD = DustDens; get_minmax_gpu (FTD, &minftd, &maxftd); DispGamma = 1.0; Top = 1.0; sprintf (winTitle, "GFARGO:%s - dust on grid [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } /*if (key == 'G') { CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticles = NO; //Dust2GassMassRatio = YES; FTD = D2GMass; get_minmax_gpu (FTD, &minftd, &maxftd); printf ("\n%e %e\n", minftd, maxftd); sprintf (winTitle, "GFARGO:%s - dust-to-gas mass ratio %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); */ } #endif //------------------------------------------------------------------------------------------------- #endif // screenshots & movie //------------------------------------------------------------------------------------------------- // initiate movie if (key == 'M') { av_count++; snprintf(movie_filename, 1023, "GFARGO_%i.mov", av_count); //snprintf (avconv_str, 1024, "avconv -y -f rawvideo -qscale 10 -s %ix%i -pix_fmt rgb24 -r 25 -i - -vf vflip -an -b:v 20M %s", Xsize, Ysize, movie_filename); snprintf (avconv_str, 1024, "avconv -y -f rawvideo -s %ix%i -pix_fmt rgb24 -r 25 -i - -vf vflip -an -b:v 20M %s", Xsize, Ysize, movie_filename); avconv = popen(avconv_str, "w"); pixels = (GLubyte *) malloc(3 * 768 * 768); snprintf(winTitleMessage, 1023, "(Snapshot: %s)", movie_filename); snprintf(winTitle, 1023, "%s %s", winTitle, winTitleMessage); glutSetWindowTitle (winTitle); } // stop movie if (key == 'm') { // close movie if it is opened if (avconv) { pclose(avconv); //ffmpeg_encoder_finish(); free (pixels); avconv = NULL; glutSetWindowTitle (winTitle); snprintf(winTitleMessage, 1023, "Movie finished"); } } // create a snapshot png if (key == '>') { snprintf(ppm_filename, 1023, "./hydro_snapshot_%d.ppm", nframes); screenshot_ppm(ppm_filename, Xsize, Ysize, &pixels); nframes++; printf ("\nSnapshot is taken to <%s>\n", ppm_filename); } //------------------------------------------------------------------------------------------------- } void resize(int w, int h) { glViewport (0, 0, w, h); glMatrixMode (GL_PROJECTION); glLoadIdentity (); glOrtho (0., Xsize, 0., Ysize, -200. ,200.); glMatrixMode (GL_MODELVIEW); glLoadIdentity (); } void StartMainLoop () { glutMainLoop(); }
1f0fa9d235bc39610cecba502b7de515d625ea0e.cu
#define __CUDA 1 #include "fargo.h" #undef __CUDA //#include "glcmap.h" #include <GL/glew.h> #include <stdarg.h> #ifdef __APPLE #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif #include <cuda_runtime_api.h> #include <cuda_gl_interop.h> #include <helper_cuda.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/extrema.h> #define GL_WINSIZE_X WINSIZE #define GL_WINSIZE_Y WINSIZE // BLOCK_X : in azimuth // #define BLOCK_X DEF_BLOCK_X_GLEXT #define BLOCK_X 8 // BLOCK_Y : in radius #define BLOCK_Y 8 //#define DEBUGGING 1 // [RZS-MOD] // read my own colormaps #include "my_glcmap" unsigned int cmap[256]; char simdir[2048]; //PolarGrid *D2GMass; FILE *avconv = NULL; GLubyte *pixels; bool bFine = false; GLuint gl_PBO, gl_Tex, win; unsigned int *plot_rgba; //extern PolarGrid *FTD; // Field to display int CartesianView = 1, Xsize, Ysize; double minftd = 2e-4, maxftd = 2e-3; int DustGridType = 0; int color_idx = 0; double Zoom=0.05; float DispGamma = 1.0, Top=1.0; int CenterPlanet=1; double FrameRotate = 0.0; static int Update = 1; static int palette_nb = 6, old_palette_nb=6; #ifdef FARGO_INTEGRATION #include "../HIPERION-v1.7/interface_extfuncs.h" double *FineFTD; bool FlowPattern = NO; double FrameRadius = 0.0; //double dust_sizecm[4] = {1e-4,1e-1,1e2,1e5}; //double dust_sizecm[2] = {1e-3,1e-1}; extern int iDustBinNum; // inherited from HIPERION ???? extern double *dDustSize; #endif bool Lock = NO; bool LogDisplay = NO; bool DustParticles = NO; bool DustParticlesonGrid = NO; //bool Dust2GassMassRatio = NO; bool CalcVortensity = NO; bool CalcTemperature = NO; bool CalcDiskHeight = NO; bool CalcSoundSpeed = NO; bool CalcDustGasRatio = NO; bool CalcDustSize = NO; void reset_rgba (int mode, unsigned int *plot, unsigned int clBackground); unsigned int ColBackground = 0; void get_rgba(PolarGrid *FTD, unsigned int *plot, unsigned int *cmap, PlanetarySystem *sys, double ar, float fDispGamma, float fTop, double FrameRotate); void get_fine_rgba(double *FTD, unsigned int *plot, unsigned int *cmap, PlanetarySystem *sys, double ar, float fDispGamma, float fTop, double FrameRotate); void DrawField (); void resize(int w, int h); void keyCB (unsigned char key, int x, int y); void keySpecCB (int key, int x, int y); int xview, yview; char winTitle[1024], winTitleMessage[1024], avconv_str[1024], ppm_filename[1024], movie_filename[1024]; int av_count = 0; int nframes = 0; double delta_rad = 0.0, Shift = 0; // RZS [MOD] // function to get wall-clock timings //---------------------------------- #include <sys/time.h> double get_time2 () { struct timeval Tvalue; struct timezone dummy; gettimeofday(&Tvalue,&dummy); return ((double) Tvalue.tv_sec + 1.e-6*((double) Tvalue.tv_usec)); } //---------------------------------- void getminmax (PolarGrid *var) { int i, j, m, nr, ns; double min=1e30, max=-1e30; double *field; D2H (var); field = var->Field; nr = var->Nrad; ns = var->Nsec; for (i = 0; i < nr; i++) { for (j = 0; j < ns; j++) { m = j+i*ns; if (min > field[m]) min = field[m]; if (max < field[m]) max = field[m]; } } minftd = min; maxftd = max; } // RZS [MOD] // GPU-based minmax finding with thrust //------------------------------------- void get_minmax_gpu(PolarGrid* FTD, double* min, double *max) { // wrap raw pointer with a device_ptr thrust::device_ptr<double> d_ftd(FTD->gpu_field); // use thrust to find the maximum element int nelements = (FTD->Nrad) * (FTD->Nsec); thrust::device_ptr<double> d_ptr_max_r = thrust::max_element(d_ftd, d_ftd + nelements); thrust::device_ptr<double> d_ptr_min_r = thrust::min_element(d_ftd, d_ftd + nelements); // copy the max element from device memory to host memory cudaMemcpy((void*)max, (void*)d_ptr_max_r.get(), sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy((void*)min, (void*)d_ptr_min_r.get(), sizeof(double), cudaMemcpyDeviceToHost); } void get_minmax_fine_gpu(double* FTD, double* min, double *max) { // wrap raw pointer with a device_ptr thrust::device_ptr<double> d_ftd(FTD); // use thrust to find the maximum element int nelements = NRAD * GASOVERSAMPRAD * NSEC * GASOVERSAMPAZIM; thrust::device_ptr<double> d_ptr_max_r = thrust::max_element(d_ftd, d_ftd + nelements); thrust::device_ptr<double> d_ptr_min_r = thrust::min_element(d_ftd, d_ftd + nelements); // copy the max element from device memory to host memory cudaMemcpy((void*)max, (void*)d_ptr_max_r.get(), sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy((void*)min, (void*)d_ptr_min_r.get(), sizeof(double), cudaMemcpyDeviceToHost); } //------------------------------------- __global__ void fill_d2gm (int pitch, int nr, double* d2gm, const double *gas_dens, const double *dust_dens) { const int j = blockIdx.x*blockDim.x + threadIdx.x; const int i = blockIdx.y*blockDim.y + threadIdx.y; const int m = j+i*pitch; d2gm[m] = dust_dens[m]/gas_dens[m]; } /* void EvalD2GM (PolarGrid *D2GMass, PolarGrid *gas_dens, PolarGrid *dust_dens) { dim3 grid; dim3 block = dim3(BLOCK_X, BLOCK_Y); grid.x = NSEC / BLOCK_X; grid.y = NRAD / BLOCK_Y; //int nsec = (float) (FTD->pitch/sizeof(double)); fill_d2gm <<<grid, block>>> (FTD->pitch/sizeof(double), 0, D2GMass->gpu_field, gas_dens->gpu_field, dust_dens->gpu_field); }*/ void screenshot_ppm(const char *filename, unsigned int width, unsigned int height, GLubyte **pixels) { size_t i, j, cur; const size_t format_nchannels = 3; FILE *f = fopen(filename, "w"); fprintf(f, "P3\n%d %d\n%d\n", width, height, 255); *pixels = (GLubyte*) realloc(*pixels, format_nchannels * sizeof(GLubyte) * width * height); glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, *pixels); for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { cur = format_nchannels * ((height - i - 1) * width + j); fprintf(f, "%3d %3d %3d ", (*pixels)[cur], (*pixels)[cur + 1], (*pixels)[cur + 2]); } fprintf(f, "\n"); } fclose(f); } void load_palette () { if (palette_nb > 6) palette_nb = 1; switch (palette_nb) { case 0: memcpy (cmap, cmap0, 256*sizeof(unsigned int)); break; case 1: memcpy (cmap, cmap1, 256*sizeof(unsigned int)); break; case 2: memcpy (cmap, cmap2, 256*sizeof(unsigned int)); break; case 3: memcpy (cmap, cmap3, 256*sizeof(unsigned int)); break; case 4: memcpy (cmap, cmap4, 256*sizeof(unsigned int)); break; case 5: memcpy (cmap, cmap5, 256*sizeof(unsigned int)); break; case 6: memcpy (cmap, cmap6, 256*sizeof(unsigned int)); break; /* case 7: memcpy (cmap, cmap7, 256*sizeof(unsigned int)); break; case 8: memcpy (cmap, cmap8, 256*sizeof(unsigned int)); break; */} } void InitDisplay (int *argc, char **argv) { size_t pitch; memcpy (cmap, cmap1, 256*sizeof(unsigned int)); Xsize = NSEC; Ysize = NRAD; xview = GL_WINSIZE_X; yview = GL_WINSIZE_Y; glutInitWindowSize(xview, yview); glutInitWindowPosition(30, 30); glutInit(argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); if (strcmp(OUTPUTDIR, "./") == 0) getcwd (simdir, 2048); else snprintf(simdir, 2048, "%s", OUTPUTDIR); snprintf(winTitle, 1023, "GFARGO:%s - gas density", simdir); win = glutCreateWindow(winTitle); //Vortens = CreatePolarGrid (NRAD, NSEC, "Vortensity"); //if (DustGrid) // D2GMass = CreatePolarGrid (NRAD, NSEC, "D2GMass"); //if (Adiabatic) // Temperature = CreatePolarGrid (NRAD, NSEC, "Temperature"); // Check for OpenGL extension support if (verbose) printf("Loading OPENGL extensions: %s\n", glewGetErrorString(glewInit())); else glewGetErrorString(glewInit()); if(!glewIsSupported("GL_VERSION_2_0 " "GL_ARB_pixel_buffer_object " "GL_EXT_framebuffer_object ")){ fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return; } // Set up view glClearColor(0.0, 0.0, 0.0, 0.0); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0,Xsize,0.,Ysize, -200.0, 200.0); // Create texture which we use to display the result and bind to gl_Tex glEnable(GL_TEXTURE_2D); glGenTextures(1, &gl_Tex); // Generate 2D texture glBindTexture(GL_TEXTURE_2D, gl_Tex); // bind to gl_Tex // texture properties: glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Xsize, Ysize, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); pitch = NSEC*sizeof(double); // Create pixel buffer object and bind to gl_PBO. We store the data we want to // plot in memory on the graphics card - in a "pixel buffer". We can then // copy this to the texture defined above and send it to the screen glGenBuffers(1, &gl_PBO); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, gl_PBO); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, pitch*Ysize, NULL, GL_STREAM_COPY); checkCudaErrors( cudaGLRegisterBufferObject(gl_PBO) ); // define glut functions glutDisplayFunc(DrawField); glutReshapeFunc(resize); glutIdleFunc(DrawField); glutKeyboardFunc(keyCB); glutSpecialFunc(keySpecCB); load_palette (); #ifdef DEBUGGING Paused = 1; #endif } void DisplayLoadDensity () { keyCB ('c', 0, 0); keyCB ('d', 0, 0); keyCB ('s', 0, 0); Zoom = 1.0/RMAX; } #include <unistd.h> static int frameNum = 0; void DrawField () { static int FirstTime = YES, wx, wy; static double previous, actual; //static double ts, te; double ar; // hydro calculation, if not movie generation is requested (option -m) if (!CreatingMovieOnly) Loop (); // just red snapshot file and display it else { ReadfromFile (gas_density, "gas_dens", frameNum); H2D (gas_density); frameNum++; usleep (100000); } #ifdef DEBUGGING Paused = 1; #endif // update window only if its is not turned off //if (Update == -1) // return; wx = glutGet (GLUT_WINDOW_WIDTH); wy = glutGet (GLUT_WINDOW_HEIGHT); ar = (double)wx/(double)wy; actual = clock(); if ((((actual - previous)/CLOCKS_PER_SEC) > (1./RefreshRate)) || (FirstTime == YES) || CreatingMovieOnly) { if (Update >= 0) { FirstTime = NO; #ifdef DEBUGGING get_minmax_gpu (FTD, &minftd, &maxftd); #endif // some additional values must be calculated if requested if (CalcVortensity == YES) CalcVortens_gpu (gas_density, gas_v_rad, gas_v_theta, Work); if (Adiabatic) { if(CalcTemperature) CalcTemp_gpu (gas_density, gas_energy, Work); else if (CalcDiskHeight) CalcDiskHeight_gpu (gas_density, gas_energy, Work); else if (CalcSoundSpeed) CalcSoundSpeed_gpu (gas_density, gas_energy, Work); } if (CalcDustGasRatio && DustGrid) CalcDustGasMassRatio_gpu(gas_density, dust_density[color_idx], Work); if (CalcDustSize) CalcDustSize_gpu(dust_size, dust_density[0], Work); #ifdef FARGO_INTEGRATON // if (Dust2GassMassRatio == YES) // EvalD2GM (D2GMass, gas_density, DustDens); #endif // Apparently there is no need to do a cudaMalloc of plot_rgba, the following function does the job checkCudaErrors(cudaGLMapBufferObject((void**)&plot_rgba, gl_PBO)); //get_minmax_gpu (FTD, &minftd, &maxftd); // [RZS-MOD] // lock view to the planet 0 //-------------------------------------- if (Lock) { double lock_rad = sqrt(sys->y[0]*sys->y[0] + sys->x[0]*sys->x[0]); FrameRotate = atan2 (sys->y[0] , sys->x[0]); FrameRotate = (FrameRotate < 0.0 ? FrameRotate + 2.0 * M_PI: FrameRotate); FrameRotate -= (pow(lock_rad*(1.0+delta_rad), -1.5) - pow(lock_rad, -1.5))*PhysicalTime - Shift; FrameRotate = fmod (FrameRotate, 2.0*M_PI); } else FrameRotate = 0.0; //-------------------------------------- // plot filed if (FTD != NULL) { #ifdef FARGO_INTEGRATION if (bFine) get_fine_rgba(FineFTD, plot_rgba, cmap, sys, ar, DispGamma, Top, FrameRotate); else #endif get_rgba(FTD, plot_rgba, cmap, sys, ar, DispGamma, Top, FrameRotate); } // [RZS-MOD] // displaying dust particles //-------------------------- #ifdef FARGO_INTEGRATION if (DustParticles) { double xcenter = 0.0, ycenter = 0.0; if (CenterPlanet == YES) { xcenter = sys->x[0]; ycenter = sys->y[0]; } if (FTD == NULL) field (FlowPattern, plot_rgba, ColBackground); if (CartesianView) HIPERION_DisplayDust (0, NRAD, gas_density->pitch/sizeof(double), 0, 0, 0, 0, RMIN, RMAX, FrameRotate, color_idx, plot_rgba); else HIPERION_DisplayDust (1, Xsize, Ysize, ar, Zoom, Xsize/2, Ysize/2, xcenter, ycenter, FrameRotate, color_idx, plot_rgba); } #endif //-------------------------- checkCudaErrors(cudaGLUnmapBufferObject(gl_PBO)); // Copy the pixel buffer to the texture, ready to display glTexSubImage2D(GL_TEXTURE_2D,0,0,0,Xsize,Ysize,GL_RGBA,GL_UNSIGNED_BYTE,0); // Render one quad to the screen and colour it using our texture // i.e. plot our plotvar data to the screen glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_QUADS); glTexCoord2f (0.0, 0.0); glVertex3f (0.0, 0.0, 0.0); glTexCoord2f (1.0, 0.0); glVertex3f (Xsize, 0.0, 0.0); glTexCoord2f (1.0, 1.0); glVertex3f (Xsize, Ysize, 0.0); glTexCoord2f (0.0, 1.0); glVertex3f (0.0, Ysize, 0.0); glEnd(); glutSwapBuffers(); /* save */ if (avconv) { glReadPixels(0, 0, Xsize, Ysize, GL_RGB, GL_UNSIGNED_BYTE, pixels); fwrite(pixels ,Xsize*Ysize*3 , 1, avconv); //add_frame_tomovie (&pixels); } // refesh rate dpending on Update or not if (Update==0) RefreshRate = 1; else RefreshRate = 50; } previous = actual; } } // GLUT special key functions void keySpecCB (int key, int x, int y) { switch (key) { case GLUT_KEY_UP: delta_rad += 0.01; break; case GLUT_KEY_DOWN: delta_rad -= 0.01; break; case GLUT_KEY_LEFT: Shift -= 0.1; if (Shift > 2.0*M_PI) Shift = 0; break; case GLUT_KEY_RIGHT: Shift += 0.1; if (Shift < 0) Shift = 0; break; } } // GLUT normal key functions void keyCB (unsigned char key, int x, int y) { size_t pitch; static int fullscreen = 0, px, py, wx, wy; #ifdef DEBUGGING if (key == ' ') { Paused = 0; } #else // stop simulation if (key == ' ') { Paused = 1 - Paused; } #endif // display control functions //------------------------------------------------------------------------------------------------- // zoom in if (key == '+') { Zoom *= 1.4; if (DustParticles) reset_rgba (0, plot_rgba, ColBackground); } // zoom out if (key == '-') { Zoom /= 1.4; if (DustParticles) reset_rgba (0, plot_rgba, ColBackground); } // centering on planet 0 if (key == 's') { CenterPlanet = 1-CenterPlanet; if (DustParticles) reset_rgba (0, plot_rgba, ColBackground); } // full screen if (key == 'f') { // if video is creating do not change anithing if (avconv) return; fullscreen = 1-fullscreen; if (fullscreen == 1) { px = glutGet (GLUT_WINDOW_X); py = glutGet (GLUT_WINDOW_Y); wx = glutGet (GLUT_WINDOW_WIDTH); wy = glutGet (GLUT_WINDOW_HEIGHT); glutFullScreen(); } if (fullscreen == 0) { glutReshapeWindow(wx, wy); glutPositionWindow(px, py); } } // temporarily turn off real time displaying if (key == 'w') { Update = !Update; if (Update == 0) { old_palette_nb = palette_nb; palette_nb = 0; } else if (Update == 1) { palette_nb = old_palette_nb; if (FTD != NULL) get_minmax_gpu (FTD, &minftd, &maxftd); } load_palette (); } // exit simulation if (key == 27) { printf ("\n"); // close movie if it is creating if (avconv) { pclose(avconv); free (pixels); } exit (0); } // change view type between polar and cartesian if (key == 'c') { // if video is creating do not change anithing if (avconv) return; // no full screen for if (fullscreen == 1) return; CartesianView = 1-CartesianView; // cartesian grid if (CartesianView == 1) { Xsize = NSEC; Ysize = NRAD; xview = NSEC; yview = NRAD; } // polar grid else { Ysize = xview = WINSIZE; Xsize = yview = WINSIZE; } glutReshapeWindow (xview, yview); glutPostRedisplay(); checkCudaErrors(cudaGLUnregisterBufferObject (gl_PBO)); glDeleteBuffers (1, &gl_PBO); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Xsize, Ysize, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); pitch = Xsize*sizeof(double); glGenBuffers(1, &gl_PBO); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, gl_PBO); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, pitch*Ysize, NULL, GL_STREAM_COPY); checkCudaErrors( cudaGLRegisterBufferObject(gl_PBO) ); glFlush (); } // change color table for hydro if (key == 'p') { palette_nb++; load_palette (); } // lock planet if (key == 'l') { Lock = !Lock; } // logarithmic filed if (key == 'L') { LogDisplay = !LogDisplay; } // increase gamma if (key == '1') { DispGamma /= 1.25; if (DispGamma < 0.0001) DispGamma = 0.0001; } // decrease gamma if (key == '2') { DispGamma *= 1.25; if (DispGamma > 1000) DispGamma = 1000; } // decrease maximum value to plot if (key == '3') { Top /= 1.25; if (Top < 1e-6) Top = 1e-6; } // increase maximum value to plot if (key == '4') { Top *= 1.25; if (Top > 1) Top = 1; } // reset max, min and gamma if (key == '0') { DispGamma = 1.0; Top = 1.0; // recalcalculate requested value if (CalcVortensity == YES) CalcVortens_gpu (gas_density, gas_v_rad, gas_v_theta, Work); if (Adiabatic) { if(CalcTemperature) CalcTemp_gpu (gas_density, gas_energy, Work); else if (CalcDiskHeight) CalcDiskHeight_gpu (gas_density, gas_energy, Work); else if (CalcSoundSpeed) CalcSoundSpeed_gpu (gas_density, gas_energy, Work); } if (CalcDustGasRatio && DustGrid) CalcDustGasMassRatio_gpu(gas_density, dust_density[color_idx], Work); if (CalcDustSize) CalcDustSize_gpu(dust_size, dust_density[0], Work); if (FTD != NULL) { #ifdef FARGO_INTEGRATION if (bFine) get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); else #endif get_minmax_gpu (FTD, &minftd, &maxftd); /* if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } */ } printf ("\n%s min/max: %e/%e\n", FTD->Name, minftd, maxftd); } // change background color if (key == 'I') { // to black if (ColBackground == 255+256*255+256*256*255) ColBackground = 0; //to white else if (ColBackground == 0) ColBackground = 255+256*255+256*256*255; } //------------------------------------------------------------------------------------------------- // select gas field to display //------------------------------------------------------------------------------------------------- // gas surface mass density if (key == 'W') { bFine = false; FTD = myWork; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; get_minmax_gpu (FTD, &minftd, &maxftd); if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } DispGamma = 1.0; Top = 1.0; snprintf(winTitle, 1023, "GFARGO:%s - ???????? %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas surface mass density if (key == 'd') { bFine = false; FTD = gas_density; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; get_minmax_gpu (FTD, &minftd, &maxftd); if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } DispGamma = 1.0; Top = 1.0; snprintf(winTitle, 1023, "GFARGO:%s - gas density %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas radial velocity if (key == 'r') { bFine = false; FTD = gas_v_rad; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; get_minmax_gpu (FTD, &minftd, &maxftd); DispGamma = 1.0; Top = 1.0; snprintf(winTitle, 1023, "GFARGO:%s - gas radial velocity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas azimuthal velocity field if (key == 't') { bFine = false; FTD = gas_v_theta; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas azimuthal velocity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas vortensity /*if (key == 'v') { bFine = false; FTD = Work; DustGridType = 0; CalcVortensity = YES; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcVortens_gpu (gas_density, gas_v_rad, gas_v_theta, Work); get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas vortensity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); }*/ // gas disk eccentricity if (key == 'e') { bFine = false; FTD = disk_ecc; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas disk eccentricity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas disk eccentricity if (key == 'P') { bFine = false; FTD = Potential; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - disk grap pot %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // select adiabatic gas field to display //------------------------------------------------------------------------------------------------- if (Adiabatic || AdaptiveViscosity) { // viscosity field (for adaptive or adiabatic disks) if (key == 'a') { bFine = false; FTD = Viscosity; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - viscosity %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } } if (Adiabatic) { // gas internal energy if (key == 'q') { bFine = false; FTD = gas_energy; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; // Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas specific energy %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // gas temperature if (key == 'Q') { bFine = false; FTD = Work; CalcTemperature = YES; DustGridType = 0; CalcVortensity = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; CalcTemp_gpu (gas_density, gas_energy, Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas temperature %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // disk adiabatic height if (key == 'h') { bFine = false; FTD = Work; CalcDiskHeight = YES; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcSoundSpeed = NO; CalcDustGasRatio = NO; CalcDustSize = NO; CalcDiskHeight_gpu (gas_density, gas_energy, Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - disk height %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // disk soundspeed if (key == 'H') { bFine = false; FTD = Work; CalcSoundSpeed = YES; CalcDiskHeight = NO; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; CalcDustSize = NO; CalcSoundSpeed_gpu (gas_density, gas_energy, Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - disk cs %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } } // select dust field to display //------------------------------------------------------------------------------------------------- // dust surface mass density if (DustGrid) { if (key == 'x') { bFine = false; FTD = dust_density[color_idx]; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustGridType = 1; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; CalcDustSize = NO; get_minmax_gpu (FTD, &minftd, &maxftd); if (DustConstStokes) sprintf (winTitle, "GFARGO:%s - dust dens [St=%0.2e] %s", simdir, DustSizeBin[color_idx], winTitleMessage); else if (DustGrowth) if (color_idx == 0) sprintf (winTitle, "GFARGO:%s - grown dust dens %s", simdir, winTitleMessage); else sprintf (winTitle, "GFARGO:%s - small dust dens %s", simdir, winTitleMessage); else sprintf (winTitle, "GFARGO:%s - dust dens [s=%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // dust radial velocity if (key == 'y') { bFine = false; FTD = dust_v_rad[color_idx]; DustGridType = 2; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - dust vrad [%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // dust azimuthal velocity if (key == 'z') { bFine = false; FTD = dust_v_theta[color_idx]; DustGridType = 3; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; CalcDustSize = NO; DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - dust vth [%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // grown dust size if (DustGrowth) if (key == 'b') { bFine = false; FTD = dust_size; //FTD = Work; DustGridType = 4; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = NO; CalcDustSize = NO; //CalcDustSize = YES; //CalcDustSize_gpu(dust_size, dust_density[0], Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - dust size %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // dust-to-gas mass ratio if (key == 'G') { bFine = false; FTD = Work; DustGridType = 5; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; CalcDustGasRatio = YES; CalcDustSize = NO; CalcDustGasMassRatio_gpu(gas_density, dust_density[color_idx], Work); DispGamma = 1.0; Top = 1.0; get_minmax_gpu (FTD, &minftd, &maxftd); sprintf (winTitle, "GFARGO:%s - Md/Mg [%0.2e cm] %s", simdir, DustSizeBin[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } // change color palette for dust representation if (key == ']') { if (color_idx < DustBinNum-1) color_idx++; else color_idx=0; switch (DustGridType) { case 1: keyCB ('x', 0,0); break; case 2: keyCB ('y', 0,0); break; case 3: keyCB ('z', 0,0); break; case 4: keyCB ('b', 0,0); break; case 5: keyCB ('G', 0,0); break; } } // change color palette for dust representation if (key == '[') { if (color_idx > 0) color_idx--; else color_idx=DustBinNum-1; switch (DustGridType) { case 1: keyCB ('x', 0,0); break; case 2: keyCB ('y', 0,0); break; case 3: keyCB ('z', 0,0); break; case 4: keyCB ('b', 0,0); break; case 5: keyCB ('G', 0,0); break; } } } //------------------------------------------------------------------------------------------------- #ifdef FARGO_INTEGRATION // HIPERION integration //------------------------------------------------------------------------------------------------- // interpolated surface mass density on the fine grid if (key == 'D') { bFine = true; FineFTD = fine_gas_density; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); if (minftd == maxftd) { minftd = SIGMA0 / 3.0; maxftd = SIGMA0 * 3.0; } snprintf(winTitle, 1023, "GFARGO:%s - gas density (ovrs) %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // interpolated radial gas velocity on the fine grid if (key == 'R') { bFine = true; FineFTD = fine_gas_dv_rad; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas radial velocity (ovrs) %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // interpolated azimuthal gas velocity field on the fine grid if (key == 'T') { bFine = true; FineFTD = fine_gas_dv_theta; DustGridType = 0; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = NO; DustParticles = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; get_minmax_fine_gpu (FineFTD, &minftd, &maxftd); snprintf(winTitle, 1023, "GFARGO:%s - gas azimuthal velocity (ovrs) %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); } // flow pattern of dust if (key == 'n') { FlowPattern = !FlowPattern; if (!FlowPattern) field (0, plot_rgba, ColBackground); } // dust distribution if (key == 'b') { Vortensity = NO; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticles = YES; DustParticlesonGrid = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; DispGamma = 1.0; Top = 1.0; sprintf (winTitle, "GFARGO:%s - dust [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); field (0, plot_rgba,ColBackground); FTD = NULL; } // turn off if (key == 'B') { CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; if (FTD != NULL) DustParticles = !DustParticles; palette_nb = 0; load_palette (); } // change color palette for dust representation if (key == ']') { if (color_idx < iDustBinNum-1) color_idx++; else color_idx=0; if (DustParticles) { sprintf (winTitle, "GFARGO:%s - dust [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } } if (key == '[') { if (color_idx > 0) color_idx--; else color_idx=iDustBinNum-1; if (DustParticles) { sprintf (winTitle, "GFARGO:%s - dust [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } } #ifdef DUST_FEEDBACK if (key == 'g') { DustParticles = NO; CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticlesonGrid = YES; //Dust2GassMassRatio = NO; CalcDustGasRatio = NO; FTD = DustDens; get_minmax_gpu (FTD, &minftd, &maxftd); DispGamma = 1.0; Top = 1.0; sprintf (winTitle, "GFARGO:%s - dust on grid [%0.2e cm] %s", simdir, dDustSize[color_idx], winTitleMessage); glutSetWindowTitle (winTitle); } /*if (key == 'G') { CalcVortensity = NO; CalcTemperature = NO; CalcDiskHeight = NO; DustParticles = NO; //Dust2GassMassRatio = YES; FTD = D2GMass; get_minmax_gpu (FTD, &minftd, &maxftd); printf ("\n%e %e\n", minftd, maxftd); sprintf (winTitle, "GFARGO:%s - dust-to-gas mass ratio %s", simdir, winTitleMessage); glutSetWindowTitle (winTitle); */ } #endif //------------------------------------------------------------------------------------------------- #endif // screenshots & movie //------------------------------------------------------------------------------------------------- // initiate movie if (key == 'M') { av_count++; snprintf(movie_filename, 1023, "GFARGO_%i.mov", av_count); //snprintf (avconv_str, 1024, "avconv -y -f rawvideo -qscale 10 -s %ix%i -pix_fmt rgb24 -r 25 -i - -vf vflip -an -b:v 20M %s", Xsize, Ysize, movie_filename); snprintf (avconv_str, 1024, "avconv -y -f rawvideo -s %ix%i -pix_fmt rgb24 -r 25 -i - -vf vflip -an -b:v 20M %s", Xsize, Ysize, movie_filename); avconv = popen(avconv_str, "w"); pixels = (GLubyte *) malloc(3 * 768 * 768); snprintf(winTitleMessage, 1023, "(Snapshot: %s)", movie_filename); snprintf(winTitle, 1023, "%s %s", winTitle, winTitleMessage); glutSetWindowTitle (winTitle); } // stop movie if (key == 'm') { // close movie if it is opened if (avconv) { pclose(avconv); //ffmpeg_encoder_finish(); free (pixels); avconv = NULL; glutSetWindowTitle (winTitle); snprintf(winTitleMessage, 1023, "Movie finished"); } } // create a snapshot png if (key == '>') { snprintf(ppm_filename, 1023, "./hydro_snapshot_%d.ppm", nframes); screenshot_ppm(ppm_filename, Xsize, Ysize, &pixels); nframes++; printf ("\nSnapshot is taken to <%s>\n", ppm_filename); } //------------------------------------------------------------------------------------------------- } void resize(int w, int h) { glViewport (0, 0, w, h); glMatrixMode (GL_PROJECTION); glLoadIdentity (); glOrtho (0., Xsize, 0., Ysize, -200. ,200.); glMatrixMode (GL_MODELVIEW); glLoadIdentity (); } void StartMainLoop () { glutMainLoop(); }
3c711df0b11277f688cbc5ae7c1e29e10fe5ce3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define IDX3(X, n1, n2, n3, i1, i2, i3) (X[(i1)*((n2)*(n3)) + (i2)*(n3) + (i3)]) template<class T> __device__ void im2col_ker(const T *im, T *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { int total_threads = gridDim.x * blockDim.x; int patch = blockIdx.x * blockDim.x + threadIdx.x; int patches_per_img = npatches / nimgs; for (; patch < npatches; patch += total_threads) { int im_k = patch / patches_per_img; /* image index */ int im_j0 = patch / (im_ni - p_ni + 1); /* patch topleft j in image */ int im_i0 = patch % (im_ni - p_ni + 1); /* patch topleft i in image */ for (int pj = 0; pj < p_nj; ++pj) { for (int pi = 0; pi < p_ni; ++pi) { IDX3(patches, npatches, p_nj, p_ni, patch, pj, pi) = IDX3(im, nimgs, im_nj, im_ni, im_k, im_j0 + pj, im_i0 + pi); } } } } template<class T> __device__ void scol2im_ker(T *im, const T *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { int total_threads = gridDim.x * blockDim.x; int pixel = blockIdx.x * blockDim.x + threadIdx.x; int valid_nj = im_nj - p_nj + 1; int valid_ni = im_ni - p_ni + 1; int npixels = nimgs * im_nj * im_ni; int patches_per_img = npatches / nimgs; for (; pixel < npixels; pixel += total_threads) { T x = 0; int im_k = pixel / (im_ni * im_nj); /* image index */ int im_j = pixel / im_ni; /* pixel in image */ int im_i = pixel % im_ni; for (int pj = 0; pj < p_nj; ++pj) { for (int pi = 0; pi < p_ni; ++pi) { int im_pj = im_j - pj; /* topleft of patch in image */ int im_pi = im_i - pi; /* topleft of patch in image */ if (im_pi < 0 || im_pj < 0 || im_pj >= valid_nj || im_pi >= valid_ni) continue; int patch = im_k * patches_per_img + im_pj * valid_ni + im_pi; x += IDX3(patches, npatches, p_nj, p_ni, patch, pj, pi); } } IDX3(im, nimgs, im_nj, im_ni, im_k, im_j, im_i) = x; } } __global__ void im2col_d(const double *im, double *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { im2col_ker<double>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); } __global__ void scol2im_d(double *im, const double *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { scol2im_ker<double>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); } __global__ void im2col_f(const float *im, float *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { im2col_ker<float>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); } __global__ void scol2im_f(float *im, const float *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { scol2im_ker<float>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); }
3c711df0b11277f688cbc5ae7c1e29e10fe5ce3c.cu
#define IDX3(X, n1, n2, n3, i1, i2, i3) (X[(i1)*((n2)*(n3)) + (i2)*(n3) + (i3)]) template<class T> __device__ void im2col_ker(const T *im, T *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { int total_threads = gridDim.x * blockDim.x; int patch = blockIdx.x * blockDim.x + threadIdx.x; int patches_per_img = npatches / nimgs; for (; patch < npatches; patch += total_threads) { int im_k = patch / patches_per_img; /* image index */ int im_j0 = patch / (im_ni - p_ni + 1); /* patch topleft j in image */ int im_i0 = patch % (im_ni - p_ni + 1); /* patch topleft i in image */ for (int pj = 0; pj < p_nj; ++pj) { for (int pi = 0; pi < p_ni; ++pi) { IDX3(patches, npatches, p_nj, p_ni, patch, pj, pi) = IDX3(im, nimgs, im_nj, im_ni, im_k, im_j0 + pj, im_i0 + pi); } } } } template<class T> __device__ void scol2im_ker(T *im, const T *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { int total_threads = gridDim.x * blockDim.x; int pixel = blockIdx.x * blockDim.x + threadIdx.x; int valid_nj = im_nj - p_nj + 1; int valid_ni = im_ni - p_ni + 1; int npixels = nimgs * im_nj * im_ni; int patches_per_img = npatches / nimgs; for (; pixel < npixels; pixel += total_threads) { T x = 0; int im_k = pixel / (im_ni * im_nj); /* image index */ int im_j = pixel / im_ni; /* pixel in image */ int im_i = pixel % im_ni; for (int pj = 0; pj < p_nj; ++pj) { for (int pi = 0; pi < p_ni; ++pi) { int im_pj = im_j - pj; /* topleft of patch in image */ int im_pi = im_i - pi; /* topleft of patch in image */ if (im_pi < 0 || im_pj < 0 || im_pj >= valid_nj || im_pi >= valid_ni) continue; int patch = im_k * patches_per_img + im_pj * valid_ni + im_pi; x += IDX3(patches, npatches, p_nj, p_ni, patch, pj, pi); } } IDX3(im, nimgs, im_nj, im_ni, im_k, im_j, im_i) = x; } } __global__ void im2col_d(const double *im, double *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { im2col_ker<double>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); } __global__ void scol2im_d(double *im, const double *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { scol2im_ker<double>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); } __global__ void im2col_f(const float *im, float *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { im2col_ker<float>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); } __global__ void scol2im_f(float *im, const float *patches, int im_ni, int im_nj, int nimgs, int p_ni, int p_nj, int npatches) { scol2im_ker<float>(im, patches, im_ni, im_nj, nimgs, p_ni, p_nj, npatches); }
b3548165e838ced44e4c630eb3e4500a0208d006.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> int main(){ std::cout << "hi" << std::endl; int devicesCount; hipGetDeviceCount(&devicesCount); for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) { hipDeviceProp_t deviceProperties; hipGetDeviceProperties(&deviceProperties, deviceIndex); // printf("Device name: %s", deviceProperties.name); std::cout << deviceProperties.name << std::endl; } return 0; }
b3548165e838ced44e4c630eb3e4500a0208d006.cu
#include <iostream> int main(){ std::cout << "hi" << std::endl; int devicesCount; cudaGetDeviceCount(&devicesCount); for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) { cudaDeviceProp deviceProperties; cudaGetDeviceProperties(&deviceProperties, deviceIndex); // printf("Device name: %s", deviceProperties.name); std::cout << deviceProperties.name << std::endl; } return 0; }
a0a81a5931d46bb100d5df18b3bef7608426fcae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2020 by Contributors * \file array/cuda/coo_sort.cc * \brief Sort COO index */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "../../c_api_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// COOSort_ ///////////////////////////// /** * @brief Encode row and column IDs into a single scalar per edge. * * @tparam IdType The type to encode as. * @param row The row (src) IDs per edge. * @param col The column (dst) IDs per edge. * @param nnz The number of edges. * @param col_bits The number of bits used to encode the destination. The row * information is packed into the remaining bits. * @param key The encoded edges (output). */ template <typename IdType> __global__ void _COOEncodeEdgesKernel( const IdType* const row, const IdType* const col, const int64_t nnz, const int col_bits, IdType * const key) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { key[tx] = row[tx] << col_bits | col[tx]; } } /** * @brief Decode row and column IDs from the encoded edges. * * @tparam IdType The type the edges are encoded as. * @param key The encoded edges. * @param nnz The number of edges. * @param col_bits The number of bits used to store the column/dst ID. * @param row The row (src) IDs per edge (output). * @param col The col (dst) IDs per edge (output). */ template <typename IdType> __global__ void _COODecodeEdgesKernel( const IdType* const key, const int64_t nnz, const int col_bits, IdType * const row, IdType * const col) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { const IdType k = key[tx]; row[tx] = k >> col_bits; col[tx] = k & ((1 << col_bits) - 1); } } template<typename T> int _NumberOfBits(const T& range) { if (range <= 1) { // ranges of 0 or 1 require no bits to store return 0; } int bits = 1; while (bits < static_cast<int>(sizeof(T)*8) && (1 << bits) < range) { ++bits; } CHECK_EQ((range-1) >> bits, 0); CHECK_NE((range-1) >> (bits-1), 0); return bits; } template <DLDeviceType XPU, typename IdType> void COOSort_(COOMatrix* coo, bool sort_column) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int row_bits = _NumberOfBits(coo->num_rows); const int64_t nnz = coo->row->shape[0]; if (sort_column) { const int col_bits = _NumberOfBits(coo->num_cols); const int num_bits = row_bits + col_bits; const int nt = 256; const int nb = (nnz+nt-1)/nt; CHECK(static_cast<int64_t>(nb)*nt >= nnz); IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits); CUDA_KERNEL_CALL(_COOEncodeEdgesKernel, nb, nt, 0, thr_entry->stream, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>(), nnz, col_bits, pos.Ptr<IdType>()); auto sorted = Sort(pos, num_bits); CUDA_KERNEL_CALL(_COODecodeEdgesKernel, nb, nt, 0, thr_entry->stream, sorted.first.Ptr<IdType>(), nnz, col_bits, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>()); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = coo->col_sorted = true; } else { const int num_bits = row_bits; auto sorted = Sort(coo->row, num_bits); coo->row = sorted.first; coo->col = IndexSelect(coo->col, sorted.second); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = true; } } template void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column); template void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column); ///////////////////////////// COOIsSorted ///////////////////////////// template <typename IdType> __global__ void _COOIsSortedKernel( const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < nnz) { if (tx == 0) { row_sorted[0] = 1; col_sorted[0] = 1; } else { row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]); col_sorted[tx] = static_cast<int8_t>( row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]); } tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::pair<bool, bool> COOIsSorted(COOMatrix coo) { const int64_t nnz = coo.row->shape[0]; const auto& ctx = coo.row->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but should // be fine. int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); const int nt = cuda::FindNumThreads(nnz); const int nb = (nnz + nt - 1) / nt; CUDA_KERNEL_CALL(_COOIsSortedKernel, nb, nt, 0, thr_entry->stream, coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(), nnz, row_flags, col_flags); const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx); const bool col_sorted = row_sorted? cuda::AllTrue(col_flags, nnz, ctx) : false; device->FreeWorkspace(ctx, row_flags); device->FreeWorkspace(ctx, col_flags); return {row_sorted, col_sorted}; } template std::pair<bool, bool> COOIsSorted<kDLGPU, int32_t>(COOMatrix coo); template std::pair<bool, bool> COOIsSorted<kDLGPU, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
a0a81a5931d46bb100d5df18b3bef7608426fcae.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/coo_sort.cc * \brief Sort COO index */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "../../c_api_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// COOSort_ ///////////////////////////// /** * @brief Encode row and column IDs into a single scalar per edge. * * @tparam IdType The type to encode as. * @param row The row (src) IDs per edge. * @param col The column (dst) IDs per edge. * @param nnz The number of edges. * @param col_bits The number of bits used to encode the destination. The row * information is packed into the remaining bits. * @param key The encoded edges (output). */ template <typename IdType> __global__ void _COOEncodeEdgesKernel( const IdType* const row, const IdType* const col, const int64_t nnz, const int col_bits, IdType * const key) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { key[tx] = row[tx] << col_bits | col[tx]; } } /** * @brief Decode row and column IDs from the encoded edges. * * @tparam IdType The type the edges are encoded as. * @param key The encoded edges. * @param nnz The number of edges. * @param col_bits The number of bits used to store the column/dst ID. * @param row The row (src) IDs per edge (output). * @param col The col (dst) IDs per edge (output). */ template <typename IdType> __global__ void _COODecodeEdgesKernel( const IdType* const key, const int64_t nnz, const int col_bits, IdType * const row, IdType * const col) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { const IdType k = key[tx]; row[tx] = k >> col_bits; col[tx] = k & ((1 << col_bits) - 1); } } template<typename T> int _NumberOfBits(const T& range) { if (range <= 1) { // ranges of 0 or 1 require no bits to store return 0; } int bits = 1; while (bits < static_cast<int>(sizeof(T)*8) && (1 << bits) < range) { ++bits; } CHECK_EQ((range-1) >> bits, 0); CHECK_NE((range-1) >> (bits-1), 0); return bits; } template <DLDeviceType XPU, typename IdType> void COOSort_(COOMatrix* coo, bool sort_column) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int row_bits = _NumberOfBits(coo->num_rows); const int64_t nnz = coo->row->shape[0]; if (sort_column) { const int col_bits = _NumberOfBits(coo->num_cols); const int num_bits = row_bits + col_bits; const int nt = 256; const int nb = (nnz+nt-1)/nt; CHECK(static_cast<int64_t>(nb)*nt >= nnz); IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits); CUDA_KERNEL_CALL(_COOEncodeEdgesKernel, nb, nt, 0, thr_entry->stream, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>(), nnz, col_bits, pos.Ptr<IdType>()); auto sorted = Sort(pos, num_bits); CUDA_KERNEL_CALL(_COODecodeEdgesKernel, nb, nt, 0, thr_entry->stream, sorted.first.Ptr<IdType>(), nnz, col_bits, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>()); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = coo->col_sorted = true; } else { const int num_bits = row_bits; auto sorted = Sort(coo->row, num_bits); coo->row = sorted.first; coo->col = IndexSelect(coo->col, sorted.second); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = true; } } template void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column); template void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column); ///////////////////////////// COOIsSorted ///////////////////////////// template <typename IdType> __global__ void _COOIsSortedKernel( const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < nnz) { if (tx == 0) { row_sorted[0] = 1; col_sorted[0] = 1; } else { row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]); col_sorted[tx] = static_cast<int8_t>( row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]); } tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::pair<bool, bool> COOIsSorted(COOMatrix coo) { const int64_t nnz = coo.row->shape[0]; const auto& ctx = coo.row->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but should // be fine. int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); const int nt = cuda::FindNumThreads(nnz); const int nb = (nnz + nt - 1) / nt; CUDA_KERNEL_CALL(_COOIsSortedKernel, nb, nt, 0, thr_entry->stream, coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(), nnz, row_flags, col_flags); const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx); const bool col_sorted = row_sorted? cuda::AllTrue(col_flags, nnz, ctx) : false; device->FreeWorkspace(ctx, row_flags); device->FreeWorkspace(ctx, col_flags); return {row_sorted, col_sorted}; } template std::pair<bool, bool> COOIsSorted<kDLGPU, int32_t>(COOMatrix coo); template std::pair<bool, bool> COOIsSorted<kDLGPU, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
b3125ac8cef8e9a60ecb43b1c95c8c0b73aac55f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_conv_layer.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { #if CUDNN_VERSION_MIN(4, 0, 0) CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); #else CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); #endif } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData_v3( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
b3125ac8cef8e9a60ecb43b1c95c8c0b73aac55f.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_conv_layer.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { #if CUDNN_VERSION_MIN(4, 0, 0) CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); #else CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C, cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); #endif } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData_v3( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
7efb326810fa83cc4d506c713c3a30db09ce3151.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <time.h> #include <float.h> #include <hiprand/hiprand_kernel.h> #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "material.h" #include "algorithm" using namespace std; // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) #define SPP 1024 #define MAX_DEPTH 64 #define RR_DEPTH 16 void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } __device__ vec3 renderPixel(const ray& r, hitable_list **d_world, hiprandState_t *rand_state) { vec3 throughput(1, 1, 1); vec3 value; ray in = r; ray out; int depth = 0; bool scattered = false; while (depth <= MAX_DEPTH || MAX_DEPTH < 0) { hit_record rec; if (d_world[0]->hit(in, 0.001f, FLT_MAX, rec)) { if (rec.mat_ptr->scatter(in, rec, value, out, rand_state)) { throughput *= value; in = out; scattered = true; } else { if (rec.mat_ptr->is_emitter()) { return throughput * value; } else { return vec3(0, 0, 0); } } } else { /// environment map(sky) vec3 u = unit_vector(in.direction()); float w = 0.5f * (u.y() + 1.0f); value = (1.0f - w) * vec3(0.5, 0.7, 1.0) + w * vec3(0.5, 0.0, 1.0); return throughput * value;; } /// russian roullete if (depth++ >= RR_DEPTH) { float q = min(max(throughput.x(), max(throughput.y(), throughput.z())), 0.95f); if (hiprand_uniform(rand_state) >= q) return vec3(0, 0, 0); throughput /= q; } } return vec3(0, 0, 0); } __global__ void render(vec3 *fb, int max_x, int max_y, camera **cam, hitable_list **d_world, hiprandState_t *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; hiprandState_t local_rand_state = rand_state[pixel_index]; vec3 spec(0, 0, 0); for (int s = 0; s < SPP; s++) { /// normalized uv coordinate float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x); float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u, v); spec += renderPixel(r, d_world, &local_rand_state); } rand_state[pixel_index] = local_rand_state; /// gamma correction spec /= float(SPP); spec[0] = sqrt(spec[0]); spec[1] = sqrt(spec[1]); spec[2] = sqrt(spec[2]); fb[pixel_index] = spec; } __global__ void create_world(hitable **d_list, hitable_list **d_world, camera **d_camera) { if (threadIdx.x == 0 && blockIdx.x == 0) { d_list[0] = new sphere(vec3(0, 0, -1), 0.5, new lambertian(vec3(0.8, 0.3, 0.3))); d_list[1] = new sphere(vec3(0, -100.5, -1), 100, new lambertian(vec3(0.8, 0.8, 0.0))); d_list[2] = new sphere(vec3(1, 0, -1), 0.5, new metal(vec3(0.8, 0.6, 0.2), 0.2)); d_list[3] = new sphere(vec3(-1, 0, -1), 0.5, new mirror(vec3(0.8, 0.8, 0.8))); d_list[4] = new sphere(vec3(0, 0, 1), 0.2, new emitter(vec3(1, 1, 1))); *d_world = new hitable_list(d_list, 5); *d_camera = new camera(); } } __global__ void free_world(hitable **d_list, hitable_list **d_world, camera **d_camera) { delete d_list[0]; delete d_list[1]; delete d_world[0]; delete d_camera[0]; } __global__ void camere_init(int max_x, int max_y, hiprandState_t *rand_state) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } int main() { /* Initiliaztion */ int nx = 1200; int ny = 600; int tx = 8; int ty = 8; int num_pixels = nx*ny; int num_hitables = 5; size_t fb_size = num_pixels*sizeof(vec3); hitable **d_list; // list of hitable pointer hitable_list **d_world; hiprandState_t *d_rand_state; camera **d_camera; std::cerr << "Rendering a " << nx << "x" << ny << " image "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; // File open ofstream file; file.open("image.ppm"); clock_t start, stop; start = clock(); // allocate FB vec3 *fb; checkCudaErrors(hipMallocManaged((void **)&fb, fb_size)); /* Scene Initializer */ checkCudaErrors(hipMalloc((void **)&d_list, num_hitables * sizeof(hitable *))); checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable_list *))); checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *))); create_world << <1, 1 >> >(d_list, d_world, d_camera); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); /* Camera Initiailizer */ dim3 blocks(nx / tx + 1, ny / ty + 1); dim3 threads(tx, ty); checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels * sizeof(hiprandState_t))); camere_init << <blocks, threads >> >(nx, ny, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); /* Setting thread blocks and Run */ hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, d_camera, d_world, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; /* Output FB as Image */ file << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny - 1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j * nx + i; vec3 col = fb[pixel_index]; int ir = int(255.99f*col[0]); int ig = int(255.99f*col[1]); int ib = int(255.99f*col[2]); file << ir << " " << ig << " " << ib << "\n"; } } file.close(); checkCudaErrors(hipDeviceSynchronize()); free_world << <1, 1 >> >(d_list, d_world, d_camera); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(d_list)); checkCudaErrors(hipFree(d_world)); checkCudaErrors(hipFree(d_camera)); checkCudaErrors(hipFree(d_rand_state)); checkCudaErrors(hipFree(fb)); hipDeviceReset(); }
7efb326810fa83cc4d506c713c3a30db09ce3151.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <time.h> #include <float.h> #include <curand_kernel.h> #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "material.h" #include "algorithm" using namespace std; // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) #define SPP 1024 #define MAX_DEPTH 64 #define RR_DEPTH 16 void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } __device__ vec3 renderPixel(const ray& r, hitable_list **d_world, curandState *rand_state) { vec3 throughput(1, 1, 1); vec3 value; ray in = r; ray out; int depth = 0; bool scattered = false; while (depth <= MAX_DEPTH || MAX_DEPTH < 0) { hit_record rec; if (d_world[0]->hit(in, 0.001f, FLT_MAX, rec)) { if (rec.mat_ptr->scatter(in, rec, value, out, rand_state)) { throughput *= value; in = out; scattered = true; } else { if (rec.mat_ptr->is_emitter()) { return throughput * value; } else { return vec3(0, 0, 0); } } } else { /// environment map(sky) vec3 u = unit_vector(in.direction()); float w = 0.5f * (u.y() + 1.0f); value = (1.0f - w) * vec3(0.5, 0.7, 1.0) + w * vec3(0.5, 0.0, 1.0); return throughput * value;; } /// russian roullete if (depth++ >= RR_DEPTH) { float q = min(max(throughput.x(), max(throughput.y(), throughput.z())), 0.95f); if (curand_uniform(rand_state) >= q) return vec3(0, 0, 0); throughput /= q; } } return vec3(0, 0, 0); } __global__ void render(vec3 *fb, int max_x, int max_y, camera **cam, hitable_list **d_world, curandState *rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; curandState local_rand_state = rand_state[pixel_index]; vec3 spec(0, 0, 0); for (int s = 0; s < SPP; s++) { /// normalized uv coordinate float u = float(i + curand_uniform(&local_rand_state)) / float(max_x); float v = float(j + curand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u, v); spec += renderPixel(r, d_world, &local_rand_state); } rand_state[pixel_index] = local_rand_state; /// gamma correction spec /= float(SPP); spec[0] = sqrt(spec[0]); spec[1] = sqrt(spec[1]); spec[2] = sqrt(spec[2]); fb[pixel_index] = spec; } __global__ void create_world(hitable **d_list, hitable_list **d_world, camera **d_camera) { if (threadIdx.x == 0 && blockIdx.x == 0) { d_list[0] = new sphere(vec3(0, 0, -1), 0.5, new lambertian(vec3(0.8, 0.3, 0.3))); d_list[1] = new sphere(vec3(0, -100.5, -1), 100, new lambertian(vec3(0.8, 0.8, 0.0))); d_list[2] = new sphere(vec3(1, 0, -1), 0.5, new metal(vec3(0.8, 0.6, 0.2), 0.2)); d_list[3] = new sphere(vec3(-1, 0, -1), 0.5, new mirror(vec3(0.8, 0.8, 0.8))); d_list[4] = new sphere(vec3(0, 0, 1), 0.2, new emitter(vec3(1, 1, 1))); *d_world = new hitable_list(d_list, 5); *d_camera = new camera(); } } __global__ void free_world(hitable **d_list, hitable_list **d_world, camera **d_camera) { delete d_list[0]; delete d_list[1]; delete d_world[0]; delete d_camera[0]; } __global__ void camere_init(int max_x, int max_y, curandState *rand_state) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; curand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } int main() { /* Initiliaztion */ int nx = 1200; int ny = 600; int tx = 8; int ty = 8; int num_pixels = nx*ny; int num_hitables = 5; size_t fb_size = num_pixels*sizeof(vec3); hitable **d_list; // list of hitable pointer hitable_list **d_world; curandState *d_rand_state; camera **d_camera; std::cerr << "Rendering a " << nx << "x" << ny << " image "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; // File open ofstream file; file.open("image.ppm"); clock_t start, stop; start = clock(); // allocate FB vec3 *fb; checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); /* Scene Initializer */ checkCudaErrors(cudaMalloc((void **)&d_list, num_hitables * sizeof(hitable *))); checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable_list *))); checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *))); create_world << <1, 1 >> >(d_list, d_world, d_camera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); /* Camera Initiailizer */ dim3 blocks(nx / tx + 1, ny / ty + 1); dim3 threads(tx, ty); checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState))); camere_init << <blocks, threads >> >(nx, ny, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); /* Setting thread blocks and Run */ render<<<blocks, threads>>>(fb, nx, ny, d_camera, d_world, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; /* Output FB as Image */ file << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny - 1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j * nx + i; vec3 col = fb[pixel_index]; int ir = int(255.99f*col[0]); int ig = int(255.99f*col[1]); int ib = int(255.99f*col[2]); file << ir << " " << ig << " " << ib << "\n"; } } file.close(); checkCudaErrors(cudaDeviceSynchronize()); free_world << <1, 1 >> >(d_list, d_world, d_camera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(d_list)); checkCudaErrors(cudaFree(d_world)); checkCudaErrors(cudaFree(d_camera)); checkCudaErrors(cudaFree(d_rand_state)); checkCudaErrors(cudaFree(fb)); cudaDeviceReset(); }
23bee00e0b1b98ab548a2e4b9f87dbe2b8037f6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHAtomics.cuh" #include "THHTensor.hpp" #include "THHStorage.hpp" #define divup(a, b) ((a) + (b) - 1) / (b) const int THREADS_PER_BLOCK = 256; const int THREADS_X = 32; const int THREADS_Y = THREADS_PER_BLOCK / THREADS_X; const int REPEAT = 32; const int64_t NNZ_PER_BLOCK_MAX = 1024; /* sign MACRO */ #ifndef clamp #define clamp(a, low, high) max(min((a), (high)), (low)) #endif __device__ double atomicExch(double *address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long res = atomicExch(address_as_ull, __double_as_longlong(val)); return __longlong_as_double(res); } template<typename Ty, bool train> __global__ static void updateOutput( Ty *output, Ty *normalizedValues, const Ty *values, const int64_t *cumSumSizes, const int64_t *keys, const int64_t batchSize, const int64_t outDim, Ty *weight, const Ty *bias, const int64_t weightStride, const int64_t keysOffset, const int maxNormalize, const int nnzPerBlock) { /******************************************************* * Adapted from the following file in arrayfire * https://github.com/arrayfire/arrayfire/blob/v3.4.1/src/backend/opencl/kernel/csrmm.cl * ******************************************************* * Original copyright notice can be seen below: * * Copyright (c) 2016, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ const int64_t tidx = threadIdx.x; const int64_t tidy = threadIdx.y; const int64_t tid = tidy * blockDim.x + tidx; const int64_t gidx = blockIdx.x * blockDim.x + tidx; Ty *nWeight = weight; // Offset the number of elements specified by maxNormalize weight += gidx + maxNormalize; output += gidx; bool within_N = (gidx < outDim); __shared__ Ty s_values[THREADS_PER_BLOCK]; __shared__ int64_t s_keys[THREADS_PER_BLOCK]; const int64_t rowId = blockIdx.y; // if (rowId >= batchSize) return; // Load the nonzero column offsets for current row const int64_t batchStart = (rowId == 0 ? 0 : cumSumSizes[rowId - 1]) + blockIdx.z * nnzPerBlock; const int64_t batchEnd = min(batchStart + nnzPerBlock, cumSumSizes[rowId]); const int64_t batchStride = blockDim.x * blockDim.y; Ty outVal = 0; // Since the number of nonzero elements might be greater than local memory available, // Load only part of the row into local memory, perform partial dot, repeat until done. for (int64_t id = batchStart; id < batchEnd; id += batchStride) { // Load the current chunk of the row into local memory int64_t lim = min(batchEnd - id, (int64_t)batchStride); int64_t key = tid < lim ? keys[id + tid] + keysOffset : -1; Ty val = tid < lim ? values[id + tid] : 0; int64_t nWeightOffset = key * weightStride; if (tid < lim && maxNormalize) { Ty *nWeightCurr = nWeight + nWeightOffset; if (train) { Ty absVal = fabs(val); Ty maxVal = nWeightCurr[0]; if (absVal > maxVal) { // Updating maxVal and invMaxVal. Go hogwild! Ty invAbsVal = 1.0 / absVal; atomicExch(nWeightCurr + 0, absVal); atomicExch(nWeightCurr + 1, invAbsVal); } val = clamp(val * nWeightCurr[1], -1.0, 1.0) + nWeightCurr[3]; normalizedValues[id + tid] = val; nWeightCurr[2] = 1; } else { val = clamp(val * nWeightCurr[1], -1.0, 1.0) + nWeightCurr[3]; } } s_keys[tid] = key; s_values[tid] = val; __syncthreads(); // Perform a single "dot" operation for each thread for (int64_t idy = tidy; within_N && idy < lim; idy += blockDim.y) { outVal += s_values[idy] * weight[weightStride * s_keys[idy]]; } __syncthreads(); } // s_values is no longer used at this point. Reuse it for reducing outVal. // A reduction along the y dimension now gives a single output value along x. s_values[tid] = outVal; for (int64_t y = blockDim.y / 2; y >= 1; y /= 2) { __syncthreads(); if (tidy < y) s_values[tid] = s_values[tid] + s_values[tid + y * blockDim.x]; } if (within_N && tidy == 0) { Ty val = s_values[tid] + (blockIdx.z == 0 ? bias[gidx] : 0); if (gridDim.z == 1) { output[rowId * outDim] = val; } else { atomicAdd(output + rowId * outDim, val); } } } // This kernel takes in the following inputs: // values of size [keysSize x 1] and gradOutput of size [batchSize x outDim], // to generate gradWeight of size [keysSize x outDim] // nth block along y dimension computes on the non zero elements from the nth batch. template<typename Ty> __global__ static void accGradWeight( Ty *gradWeight, const Ty *gradOutput, const Ty *values, const int64_t *cumSumSizes, const int64_t outDim, const int64_t gradWeightStride, const Ty scale, const Ty weightDecay, const int maxNormalize) { const int64_t bidy = blockIdx.y; const int64_t tidx = threadIdx.x; const int64_t tidy = threadIdx.y; const int64_t tid = tidy * blockDim.x + tidx; const int64_t ntid = blockDim.x * blockDim.y; const int64_t gidx = blockIdx.x * blockDim.x + tidx; // All the y threads in the block will use the same gradOutput value gradOutput += bidy * outDim; Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0); // Calculate the amount of work for the current block / batch. const int64_t batchStart = bidy == 0 ? 0 : cumSumSizes[bidy - 1]; const int64_t batchEnd = cumSumSizes[bidy]; const int64_t batchLimit = batchEnd - batchStart; // Number of iterations required to finish the work for the current batch. const int64_t iters = divup(batchLimit, ntid); // Offset the values to the current batch. values += batchStart; // When maxNormalize is enabled, gradWeight will be twice the size. // The first half will contain the gradients required for maxNormalization. // The second half will contain the gradients required for updating weights. // if maxNormalize is false, both will evaluate to the same pointer. Ty *gradWeight0 = gradWeight + batchStart * gradWeightStride + gidx; Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0); __shared__ Ty s_values[THREADS_PER_BLOCK]; // Using iters to avoid divergence + synchtreads for (int64_t n = 0; n < iters; n++) { int64_t off = n * ntid; int64_t id = off + tid; int64_t lim = min(ntid, batchLimit - off); // Read the values required for the current iteration. s_values[tid] = id < batchLimit ? values[id] : 0; __syncthreads(); if (gidx < outDim) { if (maxNormalize) { for (int64_t idy = tidy; idy < lim; idy += blockDim.y) { // gradOutVal is already scaled gradWeight0[(off + idy) * gradWeightStride] = gradOutVal; } } for (int64_t idy = tidy; idy < lim; idy += blockDim.y) { gradWeight1[(off + idy) * gradWeightStride] = s_values[idy] * gradOutVal; } } __syncthreads(); } } // The gradBias is just a reduction of gradOutput along the batches. // There is only one block along y dimension performing the reduction. template<typename Ty, bool update> __global__ static void accGradBias( Ty *buffer, const Ty *gradOutput, const int64_t outDim, const int64_t batchSize, const Ty scale, const Ty weightDecay) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int tid = tidy * blockDim.x + tidx; const int64_t idx = blockIdx.x * blockDim.x + tidx; Ty gradBiasVal = 0; gradOutput += idx; __shared__ Ty s_gradBiasVals[THREADS_PER_BLOCK]; // Each thread along y calculates the partial sum. if (idx < outDim) { for (int64_t idy = tidy; idy < batchSize; idy += blockDim.y) { gradBiasVal += gradOutput[idy * outDim]; } } s_gradBiasVals[tid] = gradBiasVal * scale; __syncthreads(); // Perform reduction is performed along y. for (int y = blockDim.y / 2; y >= 1; y /= 2) { if (tidy < y) { s_gradBiasVals[tid] += s_gradBiasVals[tid + y * blockDim.x]; } __syncthreads(); } // Write the output only from the first lane. if (tidy == 0 && idx < outDim) { if (update) { // If performing inplace update, subtract from bias. Ty *bias = buffer; bias[idx] = (bias[idx] - s_gradBiasVals[tid]); } else { // If just accumulating gradients, write to gradBias. Ty *gradBias = buffer; gradBias[idx] = s_gradBiasVals[tid]; } } } // Use gradWeight from accGradWeight to update the weight. // This kernel is launched batchSize number of times. // At each step in the iteration, the weights are updated in a sparse manner. template<typename Ty> __global__ static void updateWeight( Ty *weight, const Ty *gradWeight, const int64_t *keys, const int64_t *cumSumSizes, const int64_t outDim, const int64_t gradWeightStride, const int64_t weightStride, const int64_t keysOffset, const Ty learningRate, const Ty weightDecay, const int maxNormalize, const int64_t batchId) { int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x; int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y; // Find the limits of the work to be done const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1]; const int64_t batchEnd = cumSumSizes[batchId]; // When maxNormalize is turned on, the weight tensor will contain // an extra "maxNormalize" number of terms per output at the beginning. // When maxNormalize is false, both will evaluate to same pointer. // when maxNormalize is true, // - nWeight[2] will contain the individual scaling factor. // - nWeight[3] will contain the individual bias for the normalized input. Ty *nWeight = weight; weight += maxNormalize + gidx; // When maxNormalize is enabled, gradWeight will be twice the size. // The first half will contain the gradients required for maxNormalization. // The second half will contain the gradients required for updating weights. // if maxNormalize is false, both will evaluate to the same pointer. const Ty *gradWeight0 = gradWeight + gidx; const Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0); if (gidx >= outDim) return; for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) { Ty lr = learningRate; Ty wd = weightDecay; int64_t weightOffset = (keys[id] + keysOffset) * weightStride; Ty weightVal = weight[weightOffset]; if (maxNormalize) { Ty scale = nWeight[weightOffset + 2]; lr *= scale; wd *= scale; // nWeight[3] needs to be updated in the following manner for a given input. // nWeight[3] = nWeight[3] - sum(gradWeight0[gidx] * weight[gidx]); // Since problem is parallelized along gidx, use atomicAdd for the update. Ty gradNormBias = lr * weightVal * gradWeight0[id * gradWeightStride]; atomicAdd(nWeight + weightOffset + 3, -gradNormBias); } // Perform the regular update Ty gradWeightVal = lr * gradWeight1[id * gradWeightStride]; if (weightDecay == 0) { weight[weightOffset] = weightVal - gradWeightVal; } else { weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal; } } } // This kernel is launched batchSize number of times. // At each step in the iteration, the weights are updated in place in a sparse manner. template<typename Ty> __global__ static void accUpdateWeight( Ty *weight, const int64_t weightStride, const Ty *gradOutput, const int64_t outDim, const Ty *values, const int64_t *cumSumSizes, const int64_t *keys, const int64_t keysOffset, const Ty scale, const Ty weightDecay, const int maxNormalize, const int64_t batchId) { // Parallel along outDim. int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x; // Parallel along the sparse input size for current batch. int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y; if (gidx >= outDim) return; // Find the limits of the work to be done. const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1]; const int64_t batchEnd = cumSumSizes[batchId]; gradOutput += batchId * outDim; Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0); // When maxNormalize is turned on, the weight tensor will contain // an extra "maxNormalize" number of terms per output at the beginning. // When maxNormalize is false, both will evaluate to same pointer. // when maxNormalize is true, // - nWeight[2] will contain the individual scaling factor. // - nWeight[3] will contain the individual bias for the normalized input. Ty *nWeight = weight; weight += maxNormalize + gidx; for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) { Ty wd = weightDecay; int64_t weightOffset = (keys[id] + keysOffset) * weightStride; Ty gradWeightVal = gradOutVal * values[id]; Ty weightVal = weight[weightOffset]; if (maxNormalize) { Ty nScale = nWeight[weightOffset + 2]; gradWeightVal *= nScale; wd *= nScale; // nWeight[3] needs to be updated in the following manner for a given input. // nWeight[3] = nWeight[3] - sum(gradOut[gidx] * weight[gidx]); // Since problem is parallelized along gidx, use atomicAdd for the update. Ty gradNormBias = nScale * weightVal * gradOutVal; atomicAdd(nWeight + weightOffset + 3, -gradNormBias); } // Perform the regular update if (weightDecay == 0) { weight[weightOffset] = weightVal - gradWeightVal; } else { weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal; } } } void THNN_CudaHalfIndexLinear_updateOutput( THCState *state, THCudaLongTensor *keys, int64_t keysOffset, THCudaHalfTensor *values, THCudaLongTensor *sizes, THCudaLongTensor *cumSumSizes, THCudaHalfTensor *output, THCudaHalfTensor *weight, THCudaHalfTensor *bias, THCudaHalfTensor *normalizedValues, int train) { THError("THCudaHalfTensor not supported with IndexLinear"); } void THNN_CudaHalfIndexLinear_accGradParameters( THCState *state, THCudaLongTensor *keys, int64_t keysOffset, THCudaHalfTensor *values, THCudaLongTensor *sizes, THCudaLongTensor *cumSumSizes, THCudaHalfTensor *gradOutput, THCudaHalfTensor *gradWeight, THCudaHalfTensor *gradBias, THCudaHalfTensor *weight, THCudaHalfTensor *bias, THCudaHalfTensor* valuesBuffer, float weightDecay, float scale) { THError("THCudaHalfTensor not supported with IndexLinear"); } void THNN_CudaHalfIndexLinear_accUpdateGradParameters( THCState *state, THCudaLongTensor *keys, int64_t keysOffset, THCudaHalfTensor *values, THCudaLongTensor *sizes, THCudaLongTensor *cumSumSizes, THCudaHalfTensor *gradOutput, THCudaHalfTensor *weight, THCudaHalfTensor *bias, float weightDecay, float scale) { THError("THCudaHalfTensor not supported with IndexLinear"); } void THNN_CudaHalfIndexLinear_updateParameters( THCState *state, THCudaHalfTensor *gradWeight, THCudaHalfTensor *gradBias, THCudaHalfTensor *weight, THCudaHalfTensor *bias, THCudaLongTensor *runningKeys, THCudaLongTensor *cumSumSizes, int64_t keysOffset, float weightDecay, float learningRate) { THError("THCudaHalfTensor not supported with IndexLinear"); } #include "generic/IndexLinear.cu" #include "THHGenerateFloatType.h" #include "generic/IndexLinear.cu" #include "THHGenerateDoubleType.h"
23bee00e0b1b98ab548a2e4b9f87dbe2b8037f6e.cu
#include "THCUNN.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCAtomics.cuh" #include "THCTensor.hpp" #include "THCStorage.hpp" #define divup(a, b) ((a) + (b) - 1) / (b) const int THREADS_PER_BLOCK = 256; const int THREADS_X = 32; const int THREADS_Y = THREADS_PER_BLOCK / THREADS_X; const int REPEAT = 32; const int64_t NNZ_PER_BLOCK_MAX = 1024; /* sign MACRO */ #ifndef clamp #define clamp(a, low, high) max(min((a), (high)), (low)) #endif __device__ double atomicExch(double *address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long res = atomicExch(address_as_ull, __double_as_longlong(val)); return __longlong_as_double(res); } template<typename Ty, bool train> __global__ static void updateOutput( Ty *output, Ty *normalizedValues, const Ty *values, const int64_t *cumSumSizes, const int64_t *keys, const int64_t batchSize, const int64_t outDim, Ty *weight, const Ty *bias, const int64_t weightStride, const int64_t keysOffset, const int maxNormalize, const int nnzPerBlock) { /******************************************************* * Adapted from the following file in arrayfire * https://github.com/arrayfire/arrayfire/blob/v3.4.1/src/backend/opencl/kernel/csrmm.cl * ******************************************************* * Original copyright notice can be seen below: * * Copyright (c) 2016, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ const int64_t tidx = threadIdx.x; const int64_t tidy = threadIdx.y; const int64_t tid = tidy * blockDim.x + tidx; const int64_t gidx = blockIdx.x * blockDim.x + tidx; Ty *nWeight = weight; // Offset the number of elements specified by maxNormalize weight += gidx + maxNormalize; output += gidx; bool within_N = (gidx < outDim); __shared__ Ty s_values[THREADS_PER_BLOCK]; __shared__ int64_t s_keys[THREADS_PER_BLOCK]; const int64_t rowId = blockIdx.y; // if (rowId >= batchSize) return; // Load the nonzero column offsets for current row const int64_t batchStart = (rowId == 0 ? 0 : cumSumSizes[rowId - 1]) + blockIdx.z * nnzPerBlock; const int64_t batchEnd = min(batchStart + nnzPerBlock, cumSumSizes[rowId]); const int64_t batchStride = blockDim.x * blockDim.y; Ty outVal = 0; // Since the number of nonzero elements might be greater than local memory available, // Load only part of the row into local memory, perform partial dot, repeat until done. for (int64_t id = batchStart; id < batchEnd; id += batchStride) { // Load the current chunk of the row into local memory int64_t lim = min(batchEnd - id, (int64_t)batchStride); int64_t key = tid < lim ? keys[id + tid] + keysOffset : -1; Ty val = tid < lim ? values[id + tid] : 0; int64_t nWeightOffset = key * weightStride; if (tid < lim && maxNormalize) { Ty *nWeightCurr = nWeight + nWeightOffset; if (train) { Ty absVal = fabs(val); Ty maxVal = nWeightCurr[0]; if (absVal > maxVal) { // Updating maxVal and invMaxVal. Go hogwild! Ty invAbsVal = 1.0 / absVal; atomicExch(nWeightCurr + 0, absVal); atomicExch(nWeightCurr + 1, invAbsVal); } val = clamp(val * nWeightCurr[1], -1.0, 1.0) + nWeightCurr[3]; normalizedValues[id + tid] = val; nWeightCurr[2] = 1; } else { val = clamp(val * nWeightCurr[1], -1.0, 1.0) + nWeightCurr[3]; } } s_keys[tid] = key; s_values[tid] = val; __syncthreads(); // Perform a single "dot" operation for each thread for (int64_t idy = tidy; within_N && idy < lim; idy += blockDim.y) { outVal += s_values[idy] * weight[weightStride * s_keys[idy]]; } __syncthreads(); } // s_values is no longer used at this point. Reuse it for reducing outVal. // A reduction along the y dimension now gives a single output value along x. s_values[tid] = outVal; for (int64_t y = blockDim.y / 2; y >= 1; y /= 2) { __syncthreads(); if (tidy < y) s_values[tid] = s_values[tid] + s_values[tid + y * blockDim.x]; } if (within_N && tidy == 0) { Ty val = s_values[tid] + (blockIdx.z == 0 ? bias[gidx] : 0); if (gridDim.z == 1) { output[rowId * outDim] = val; } else { atomicAdd(output + rowId * outDim, val); } } } // This kernel takes in the following inputs: // values of size [keysSize x 1] and gradOutput of size [batchSize x outDim], // to generate gradWeight of size [keysSize x outDim] // nth block along y dimension computes on the non zero elements from the nth batch. template<typename Ty> __global__ static void accGradWeight( Ty *gradWeight, const Ty *gradOutput, const Ty *values, const int64_t *cumSumSizes, const int64_t outDim, const int64_t gradWeightStride, const Ty scale, const Ty weightDecay, const int maxNormalize) { const int64_t bidy = blockIdx.y; const int64_t tidx = threadIdx.x; const int64_t tidy = threadIdx.y; const int64_t tid = tidy * blockDim.x + tidx; const int64_t ntid = blockDim.x * blockDim.y; const int64_t gidx = blockIdx.x * blockDim.x + tidx; // All the y threads in the block will use the same gradOutput value gradOutput += bidy * outDim; Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0); // Calculate the amount of work for the current block / batch. const int64_t batchStart = bidy == 0 ? 0 : cumSumSizes[bidy - 1]; const int64_t batchEnd = cumSumSizes[bidy]; const int64_t batchLimit = batchEnd - batchStart; // Number of iterations required to finish the work for the current batch. const int64_t iters = divup(batchLimit, ntid); // Offset the values to the current batch. values += batchStart; // When maxNormalize is enabled, gradWeight will be twice the size. // The first half will contain the gradients required for maxNormalization. // The second half will contain the gradients required for updating weights. // if maxNormalize is false, both will evaluate to the same pointer. Ty *gradWeight0 = gradWeight + batchStart * gradWeightStride + gidx; Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0); __shared__ Ty s_values[THREADS_PER_BLOCK]; // Using iters to avoid divergence + synchtreads for (int64_t n = 0; n < iters; n++) { int64_t off = n * ntid; int64_t id = off + tid; int64_t lim = min(ntid, batchLimit - off); // Read the values required for the current iteration. s_values[tid] = id < batchLimit ? values[id] : 0; __syncthreads(); if (gidx < outDim) { if (maxNormalize) { for (int64_t idy = tidy; idy < lim; idy += blockDim.y) { // gradOutVal is already scaled gradWeight0[(off + idy) * gradWeightStride] = gradOutVal; } } for (int64_t idy = tidy; idy < lim; idy += blockDim.y) { gradWeight1[(off + idy) * gradWeightStride] = s_values[idy] * gradOutVal; } } __syncthreads(); } } // The gradBias is just a reduction of gradOutput along the batches. // There is only one block along y dimension performing the reduction. template<typename Ty, bool update> __global__ static void accGradBias( Ty *buffer, const Ty *gradOutput, const int64_t outDim, const int64_t batchSize, const Ty scale, const Ty weightDecay) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int tid = tidy * blockDim.x + tidx; const int64_t idx = blockIdx.x * blockDim.x + tidx; Ty gradBiasVal = 0; gradOutput += idx; __shared__ Ty s_gradBiasVals[THREADS_PER_BLOCK]; // Each thread along y calculates the partial sum. if (idx < outDim) { for (int64_t idy = tidy; idy < batchSize; idy += blockDim.y) { gradBiasVal += gradOutput[idy * outDim]; } } s_gradBiasVals[tid] = gradBiasVal * scale; __syncthreads(); // Perform reduction is performed along y. for (int y = blockDim.y / 2; y >= 1; y /= 2) { if (tidy < y) { s_gradBiasVals[tid] += s_gradBiasVals[tid + y * blockDim.x]; } __syncthreads(); } // Write the output only from the first lane. if (tidy == 0 && idx < outDim) { if (update) { // If performing inplace update, subtract from bias. Ty *bias = buffer; bias[idx] = (bias[idx] - s_gradBiasVals[tid]); } else { // If just accumulating gradients, write to gradBias. Ty *gradBias = buffer; gradBias[idx] = s_gradBiasVals[tid]; } } } // Use gradWeight from accGradWeight to update the weight. // This kernel is launched batchSize number of times. // At each step in the iteration, the weights are updated in a sparse manner. template<typename Ty> __global__ static void updateWeight( Ty *weight, const Ty *gradWeight, const int64_t *keys, const int64_t *cumSumSizes, const int64_t outDim, const int64_t gradWeightStride, const int64_t weightStride, const int64_t keysOffset, const Ty learningRate, const Ty weightDecay, const int maxNormalize, const int64_t batchId) { int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x; int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y; // Find the limits of the work to be done const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1]; const int64_t batchEnd = cumSumSizes[batchId]; // When maxNormalize is turned on, the weight tensor will contain // an extra "maxNormalize" number of terms per output at the beginning. // When maxNormalize is false, both will evaluate to same pointer. // when maxNormalize is true, // - nWeight[2] will contain the individual scaling factor. // - nWeight[3] will contain the individual bias for the normalized input. Ty *nWeight = weight; weight += maxNormalize + gidx; // When maxNormalize is enabled, gradWeight will be twice the size. // The first half will contain the gradients required for maxNormalization. // The second half will contain the gradients required for updating weights. // if maxNormalize is false, both will evaluate to the same pointer. const Ty *gradWeight0 = gradWeight + gidx; const Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0); if (gidx >= outDim) return; for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) { Ty lr = learningRate; Ty wd = weightDecay; int64_t weightOffset = (keys[id] + keysOffset) * weightStride; Ty weightVal = weight[weightOffset]; if (maxNormalize) { Ty scale = nWeight[weightOffset + 2]; lr *= scale; wd *= scale; // nWeight[3] needs to be updated in the following manner for a given input. // nWeight[3] = nWeight[3] - sum(gradWeight0[gidx] * weight[gidx]); // Since problem is parallelized along gidx, use atomicAdd for the update. Ty gradNormBias = lr * weightVal * gradWeight0[id * gradWeightStride]; atomicAdd(nWeight + weightOffset + 3, -gradNormBias); } // Perform the regular update Ty gradWeightVal = lr * gradWeight1[id * gradWeightStride]; if (weightDecay == 0) { weight[weightOffset] = weightVal - gradWeightVal; } else { weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal; } } } // This kernel is launched batchSize number of times. // At each step in the iteration, the weights are updated in place in a sparse manner. template<typename Ty> __global__ static void accUpdateWeight( Ty *weight, const int64_t weightStride, const Ty *gradOutput, const int64_t outDim, const Ty *values, const int64_t *cumSumSizes, const int64_t *keys, const int64_t keysOffset, const Ty scale, const Ty weightDecay, const int maxNormalize, const int64_t batchId) { // Parallel along outDim. int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x; // Parallel along the sparse input size for current batch. int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y; if (gidx >= outDim) return; // Find the limits of the work to be done. const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1]; const int64_t batchEnd = cumSumSizes[batchId]; gradOutput += batchId * outDim; Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0); // When maxNormalize is turned on, the weight tensor will contain // an extra "maxNormalize" number of terms per output at the beginning. // When maxNormalize is false, both will evaluate to same pointer. // when maxNormalize is true, // - nWeight[2] will contain the individual scaling factor. // - nWeight[3] will contain the individual bias for the normalized input. Ty *nWeight = weight; weight += maxNormalize + gidx; for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) { Ty wd = weightDecay; int64_t weightOffset = (keys[id] + keysOffset) * weightStride; Ty gradWeightVal = gradOutVal * values[id]; Ty weightVal = weight[weightOffset]; if (maxNormalize) { Ty nScale = nWeight[weightOffset + 2]; gradWeightVal *= nScale; wd *= nScale; // nWeight[3] needs to be updated in the following manner for a given input. // nWeight[3] = nWeight[3] - sum(gradOut[gidx] * weight[gidx]); // Since problem is parallelized along gidx, use atomicAdd for the update. Ty gradNormBias = nScale * weightVal * gradOutVal; atomicAdd(nWeight + weightOffset + 3, -gradNormBias); } // Perform the regular update if (weightDecay == 0) { weight[weightOffset] = weightVal - gradWeightVal; } else { weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal; } } } void THNN_CudaHalfIndexLinear_updateOutput( THCState *state, THCudaLongTensor *keys, int64_t keysOffset, THCudaHalfTensor *values, THCudaLongTensor *sizes, THCudaLongTensor *cumSumSizes, THCudaHalfTensor *output, THCudaHalfTensor *weight, THCudaHalfTensor *bias, THCudaHalfTensor *normalizedValues, int train) { THError("THCudaHalfTensor not supported with IndexLinear"); } void THNN_CudaHalfIndexLinear_accGradParameters( THCState *state, THCudaLongTensor *keys, int64_t keysOffset, THCudaHalfTensor *values, THCudaLongTensor *sizes, THCudaLongTensor *cumSumSizes, THCudaHalfTensor *gradOutput, THCudaHalfTensor *gradWeight, THCudaHalfTensor *gradBias, THCudaHalfTensor *weight, THCudaHalfTensor *bias, THCudaHalfTensor* valuesBuffer, float weightDecay, float scale) { THError("THCudaHalfTensor not supported with IndexLinear"); } void THNN_CudaHalfIndexLinear_accUpdateGradParameters( THCState *state, THCudaLongTensor *keys, int64_t keysOffset, THCudaHalfTensor *values, THCudaLongTensor *sizes, THCudaLongTensor *cumSumSizes, THCudaHalfTensor *gradOutput, THCudaHalfTensor *weight, THCudaHalfTensor *bias, float weightDecay, float scale) { THError("THCudaHalfTensor not supported with IndexLinear"); } void THNN_CudaHalfIndexLinear_updateParameters( THCState *state, THCudaHalfTensor *gradWeight, THCudaHalfTensor *gradBias, THCudaHalfTensor *weight, THCudaHalfTensor *bias, THCudaLongTensor *runningKeys, THCudaLongTensor *cumSumSizes, int64_t keysOffset, float weightDecay, float learningRate) { THError("THCudaHalfTensor not supported with IndexLinear"); } #include "generic/IndexLinear.cu" #include "THCGenerateFloatType.h" #include "generic/IndexLinear.cu" #include "THCGenerateDoubleType.h"
b9c1830021eda4b76abb2fb496dfb94ddf9cd65e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; #define CREATOR "COMP3231" #define RGB_COMPONENT_COLOR 255 #define thread_x 10 #define thread_y 10 #define CUDA_CHECK(err) (cuda_checker(err, __FILE__, __LINE__)) static void cuda_checker(hipError_t err, const char *file, int line) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __device__ float filter[] = {0.05, 0.1, 0.05, 0.1, 0.4, 0.1, 0.05, 0.1, 0.05}; // GlobalVar filter __global__ void blur_kernel(PPMImage *dev_img, PPMPixel *out_data) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = x + gridDim.x * blockDim.x * y; // width and height of image int width = dev_img->x; int height = dev_img->y; __shared__ PPMPixel data[thread_y + 2][thread_x + 2]; // sharedVar pixels of image // empty pixel PPMPixel empty; empty.red = 0; empty.green = 0; empty.blue = 0; int local_idx = threadIdx.x + threadIdx.y * blockDim.x; // index in block // read pixels surrounding block into shared memory if (local_idx < blockDim.x + 2) { int tmp_x = blockIdx.x * blockDim.x - 1 + local_idx; int tmp_y = blockIdx.y * blockDim.y - 1; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data [0][local_idx] = empty; else data [0][local_idx] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; tmp_y = (blockIdx.y + 1) * blockDim.y; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data[blockDim.y + 1][local_idx] = empty; else data[blockDim.y + 1][local_idx] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; } if (local_idx < blockDim.y) { int tmp_x = blockIdx.x * blockDim.x - 1; int tmp_y = blockIdx.y * blockDim.y + local_idx; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data[local_idx + 1][0] = empty; else data[local_idx + 1][0] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; tmp_x = (blockIdx.x + 1) * blockDim.x; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data[local_idx + 1][blockDim.x + 1] = empty; else data[local_idx + 1][blockDim.x + 1] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; } // read data into shared memory if (idx < width * height) data[threadIdx.y + 1][threadIdx.x + 1] = dev_img->data[idx]; __syncthreads(); if (idx < width * height) { // avoid illegal memory access out_data[idx].red = data[threadIdx.y][threadIdx.x].red * filter[0] + data[threadIdx.y][threadIdx.x + 1].red * filter[1] + data[threadIdx.y][threadIdx.x + 2].red * filter[2] + data[threadIdx.y + 1][threadIdx.x].red * filter[3] + data[threadIdx.y + 1][threadIdx.x + 1].red * filter[4] + data[threadIdx.y + 1][threadIdx.x + 2].red * filter[5] + data[threadIdx.y + 2][threadIdx.x].red * filter[6] + data[threadIdx.y + 2][threadIdx.x + 1].red * filter[7] + data[threadIdx.y + 2][threadIdx.x + 2].red * filter[8]; out_data[idx].green = data[threadIdx.y][threadIdx.x].green * filter[0] + data[threadIdx.y][threadIdx.x + 1].green * filter[1] + data[threadIdx.y][threadIdx.x + 2].green * filter[2] + data[threadIdx.y + 1][threadIdx.x].green * filter[3] + data[threadIdx.y + 1][threadIdx.x + 1].green * filter[4] + data[threadIdx.y + 1][threadIdx.x + 2].green * filter[5] + data[threadIdx.y + 2][threadIdx.x].green * filter[6] + data[threadIdx.y + 2][threadIdx.x + 1].green * filter[7] + data[threadIdx.y + 2][threadIdx.x + 2].green * filter[8]; out_data[idx].blue = data[threadIdx.y][threadIdx.x].blue * filter[0] + data[threadIdx.y][threadIdx.x + 1].blue * filter[1] + data[threadIdx.y][threadIdx.x + 2].blue * filter[2] + data[threadIdx.y + 1][threadIdx.x].blue * filter[3] + data[threadIdx.y + 1][threadIdx.x + 1].blue * filter[4] + data[threadIdx.y + 1][threadIdx.x + 2].blue * filter[5] + data[threadIdx.y + 2][threadIdx.x].blue * filter[6] + data[threadIdx.y + 2][threadIdx.x + 1].blue * filter[7] + data[threadIdx.y + 2][threadIdx.x + 2].blue * filter[8]; } } void your_gaussian_blur_func(PPMImage *img) { PPMImage *host_img; // for assigning PPMPixel pointer on device host_img = (PPMImage *) malloc(sizeof(PPMImage)); memcpy(host_img, img, sizeof(PPMImage)); CUDA_CHECK(hipMalloc((void**)&(host_img->data), img->x * img->y * sizeof(PPMPixel))); // allocate PPMPixel pointer on device CUDA_CHECK(hipMemcpy(host_img->data, img->data, img->x * img->y * sizeof(PPMPixel), hipMemcpyHostToDevice)); // copy PPMPixel data to device // PPMPixel data is now on the gpu, now copy the "meta" data to gpu PPMImage *dev_img; // for assigning PPMImage on device CUDA_CHECK(hipMalloc((void**)&dev_img, sizeof(PPMImage))); // allocate memory on device CUDA_CHECK(hipMemcpy(dev_img, host_img, sizeof(PPMImage), hipMemcpyHostToDevice)); // copy memory to device PPMPixel *out_data; CUDA_CHECK(hipMalloc((void**)&(out_data), img->x * img->y * sizeof(PPMPixel))); // allocate PPMPixel pointer on device dim3 threadsPerBlock = dim3(thread_x, thread_y); dim3 blocksPerGrid = dim3((img->x + thread_x - 1) / thread_x, (img->y + thread_y - 1) / thread_y); hipLaunchKernelGGL(( blur_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_img, out_data); CUDA_CHECK(hipMemcpy(img->data, out_data, img->x * img->y * sizeof(PPMPixel), hipMemcpyDeviceToHost)); // copy memory to host CUDA_CHECK(hipFree(out_data)); CUDA_CHECK(hipFree(host_img->data)); CUDA_CHECK(hipFree(dev_img)); free(host_img); } int main(){ // read PPMImage *image; image = readPPM("input.ppm"); // record execution time float time; hipEvent_t start, stop; CUDA_CHECK(hipEventCreate(&start)); CUDA_CHECK(hipEventCreate(&stop)); CUDA_CHECK(hipEventRecord(start, 0)); your_gaussian_blur_func(image); CUDA_CHECK(hipEventRecord(stop, 0)); CUDA_CHECK(hipEventSynchronize(stop)); CUDA_CHECK(hipEventElapsedTime(&time, start, stop)); printf("Time to generate: %3.1f ms \n", time); // write writePPM("output.ppm",image); }
b9c1830021eda4b76abb2fb496dfb94ddf9cd65e.cu
#include<stdio.h> #include<stdlib.h> typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; #define CREATOR "COMP3231" #define RGB_COMPONENT_COLOR 255 #define thread_x 10 #define thread_y 10 #define CUDA_CHECK(err) (cuda_checker(err, __FILE__, __LINE__)) static void cuda_checker(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __device__ float filter[] = {0.05, 0.1, 0.05, 0.1, 0.4, 0.1, 0.05, 0.1, 0.05}; // GlobalVar filter __global__ void blur_kernel(PPMImage *dev_img, PPMPixel *out_data) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = x + gridDim.x * blockDim.x * y; // width and height of image int width = dev_img->x; int height = dev_img->y; __shared__ PPMPixel data[thread_y + 2][thread_x + 2]; // sharedVar pixels of image // empty pixel PPMPixel empty; empty.red = 0; empty.green = 0; empty.blue = 0; int local_idx = threadIdx.x + threadIdx.y * blockDim.x; // index in block // read pixels surrounding block into shared memory if (local_idx < blockDim.x + 2) { int tmp_x = blockIdx.x * blockDim.x - 1 + local_idx; int tmp_y = blockIdx.y * blockDim.y - 1; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data [0][local_idx] = empty; else data [0][local_idx] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; tmp_y = (blockIdx.y + 1) * blockDim.y; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data[blockDim.y + 1][local_idx] = empty; else data[blockDim.y + 1][local_idx] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; } if (local_idx < blockDim.y) { int tmp_x = blockIdx.x * blockDim.x - 1; int tmp_y = blockIdx.y * blockDim.y + local_idx; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data[local_idx + 1][0] = empty; else data[local_idx + 1][0] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; tmp_x = (blockIdx.x + 1) * blockDim.x; if (tmp_x < 0 || tmp_y < 0 || tmp_x >= blockDim.x * gridDim.x || tmp_y >= blockDim.y * gridDim.y) data[local_idx + 1][blockDim.x + 1] = empty; else data[local_idx + 1][blockDim.x + 1] = dev_img->data[tmp_x + tmp_y * gridDim.x * blockDim.x]; } // read data into shared memory if (idx < width * height) data[threadIdx.y + 1][threadIdx.x + 1] = dev_img->data[idx]; __syncthreads(); if (idx < width * height) { // avoid illegal memory access out_data[idx].red = data[threadIdx.y][threadIdx.x].red * filter[0] + data[threadIdx.y][threadIdx.x + 1].red * filter[1] + data[threadIdx.y][threadIdx.x + 2].red * filter[2] + data[threadIdx.y + 1][threadIdx.x].red * filter[3] + data[threadIdx.y + 1][threadIdx.x + 1].red * filter[4] + data[threadIdx.y + 1][threadIdx.x + 2].red * filter[5] + data[threadIdx.y + 2][threadIdx.x].red * filter[6] + data[threadIdx.y + 2][threadIdx.x + 1].red * filter[7] + data[threadIdx.y + 2][threadIdx.x + 2].red * filter[8]; out_data[idx].green = data[threadIdx.y][threadIdx.x].green * filter[0] + data[threadIdx.y][threadIdx.x + 1].green * filter[1] + data[threadIdx.y][threadIdx.x + 2].green * filter[2] + data[threadIdx.y + 1][threadIdx.x].green * filter[3] + data[threadIdx.y + 1][threadIdx.x + 1].green * filter[4] + data[threadIdx.y + 1][threadIdx.x + 2].green * filter[5] + data[threadIdx.y + 2][threadIdx.x].green * filter[6] + data[threadIdx.y + 2][threadIdx.x + 1].green * filter[7] + data[threadIdx.y + 2][threadIdx.x + 2].green * filter[8]; out_data[idx].blue = data[threadIdx.y][threadIdx.x].blue * filter[0] + data[threadIdx.y][threadIdx.x + 1].blue * filter[1] + data[threadIdx.y][threadIdx.x + 2].blue * filter[2] + data[threadIdx.y + 1][threadIdx.x].blue * filter[3] + data[threadIdx.y + 1][threadIdx.x + 1].blue * filter[4] + data[threadIdx.y + 1][threadIdx.x + 2].blue * filter[5] + data[threadIdx.y + 2][threadIdx.x].blue * filter[6] + data[threadIdx.y + 2][threadIdx.x + 1].blue * filter[7] + data[threadIdx.y + 2][threadIdx.x + 2].blue * filter[8]; } } void your_gaussian_blur_func(PPMImage *img) { PPMImage *host_img; // for assigning PPMPixel pointer on device host_img = (PPMImage *) malloc(sizeof(PPMImage)); memcpy(host_img, img, sizeof(PPMImage)); CUDA_CHECK(cudaMalloc((void**)&(host_img->data), img->x * img->y * sizeof(PPMPixel))); // allocate PPMPixel pointer on device CUDA_CHECK(cudaMemcpy(host_img->data, img->data, img->x * img->y * sizeof(PPMPixel), cudaMemcpyHostToDevice)); // copy PPMPixel data to device // PPMPixel data is now on the gpu, now copy the "meta" data to gpu PPMImage *dev_img; // for assigning PPMImage on device CUDA_CHECK(cudaMalloc((void**)&dev_img, sizeof(PPMImage))); // allocate memory on device CUDA_CHECK(cudaMemcpy(dev_img, host_img, sizeof(PPMImage), cudaMemcpyHostToDevice)); // copy memory to device PPMPixel *out_data; CUDA_CHECK(cudaMalloc((void**)&(out_data), img->x * img->y * sizeof(PPMPixel))); // allocate PPMPixel pointer on device dim3 threadsPerBlock = dim3(thread_x, thread_y); dim3 blocksPerGrid = dim3((img->x + thread_x - 1) / thread_x, (img->y + thread_y - 1) / thread_y); blur_kernel<<<blocksPerGrid, threadsPerBlock>>>(dev_img, out_data); CUDA_CHECK(cudaMemcpy(img->data, out_data, img->x * img->y * sizeof(PPMPixel), cudaMemcpyDeviceToHost)); // copy memory to host CUDA_CHECK(cudaFree(out_data)); CUDA_CHECK(cudaFree(host_img->data)); CUDA_CHECK(cudaFree(dev_img)); free(host_img); } int main(){ // read PPMImage *image; image = readPPM("input.ppm"); // record execution time float time; cudaEvent_t start, stop; CUDA_CHECK(cudaEventCreate(&start)); CUDA_CHECK(cudaEventCreate(&stop)); CUDA_CHECK(cudaEventRecord(start, 0)); your_gaussian_blur_func(image); CUDA_CHECK(cudaEventRecord(stop, 0)); CUDA_CHECK(cudaEventSynchronize(stop)); CUDA_CHECK(cudaEventElapsedTime(&time, start, stop)); printf("Time to generate: %3.1f ms \n", time); // write writePPM("output.ppm",image); }
000e84c00eb159707f0538a506b40d56105e2da2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel5_minus_4_front [3][2]; static int dims_update_halo_kernel5_minus_4_front_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel5_minus_4_front_gpu(ACC<double> &vol_flux_z, ACC<double> &mass_flux_z, const int* fields) { if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z(0,0,0) = -vol_flux_z(0,0,-4); if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z(0,0,0) = -mass_flux_z(0,0,-4); } __global__ void ops_update_halo_kernel5_minus_4_front( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_minus_4_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel5_minus_4_front[0][0] * dims_update_halo_kernel5_minus_4_front[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_minus_4_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel5_minus_4_front[1][0] * dims_update_halo_kernel5_minus_4_front[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel5_minus_4_front[0][0], dims_update_halo_kernel5_minus_4_front[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel5_minus_4_front[1][0], dims_update_halo_kernel5_minus_4_front[1][1], arg1); update_halo_kernel5_minus_4_front_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel5_minus_4_front_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,94)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(94,"update_halo_kernel5_minus_4_front"); OPS_kernels[94].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel5_minus_4_front_h[0][0] || ydim0 != dims_update_halo_kernel5_minus_4_front_h[0][1] || xdim1 != dims_update_halo_kernel5_minus_4_front_h[1][0] || ydim1 != dims_update_halo_kernel5_minus_4_front_h[1][1]) { dims_update_halo_kernel5_minus_4_front_h[0][0] = xdim0; dims_update_halo_kernel5_minus_4_front_h[0][1] = ydim0; dims_update_halo_kernel5_minus_4_front_h[1][0] = xdim1; dims_update_halo_kernel5_minus_4_front_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel5_minus_4_front, dims_update_halo_kernel5_minus_4_front_h, sizeof(dims_update_halo_kernel5_minus_4_front))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[94].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_4_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[94].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[94].mpi_time += t2-t1; OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 94; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 94; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel5_minus_4_front_execute; if (OPS_diags > 1) { ops_timing_realloc(94,"update_halo_kernel5_minus_4_front"); } ops_enqueue_kernel(desc); } #endif
000e84c00eb159707f0538a506b40d56105e2da2.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel5_minus_4_front [3][2]; static int dims_update_halo_kernel5_minus_4_front_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel5_minus_4_front_gpu(ACC<double> &vol_flux_z, ACC<double> &mass_flux_z, const int* fields) { if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z(0,0,0) = -vol_flux_z(0,0,-4); if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z(0,0,0) = -mass_flux_z(0,0,-4); } __global__ void ops_update_halo_kernel5_minus_4_front( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_minus_4_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel5_minus_4_front[0][0] * dims_update_halo_kernel5_minus_4_front[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_minus_4_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel5_minus_4_front[1][0] * dims_update_halo_kernel5_minus_4_front[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel5_minus_4_front[0][0], dims_update_halo_kernel5_minus_4_front[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel5_minus_4_front[1][0], dims_update_halo_kernel5_minus_4_front[1][1], arg1); update_halo_kernel5_minus_4_front_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel5_minus_4_front_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,94)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(94,"update_halo_kernel5_minus_4_front"); OPS_kernels[94].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel5_minus_4_front_h[0][0] || ydim0 != dims_update_halo_kernel5_minus_4_front_h[0][1] || xdim1 != dims_update_halo_kernel5_minus_4_front_h[1][0] || ydim1 != dims_update_halo_kernel5_minus_4_front_h[1][1]) { dims_update_halo_kernel5_minus_4_front_h[0][0] = xdim0; dims_update_halo_kernel5_minus_4_front_h[0][1] = ydim0; dims_update_halo_kernel5_minus_4_front_h[1][0] = xdim1; dims_update_halo_kernel5_minus_4_front_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel5_minus_4_front, dims_update_halo_kernel5_minus_4_front_h, sizeof(dims_update_halo_kernel5_minus_4_front))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[94].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel5_minus_4_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[94].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[94].mpi_time += t2-t1; OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 94; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 94; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel5_minus_4_front_execute; if (OPS_diags > 1) { ops_timing_realloc(94,"update_halo_kernel5_minus_4_front"); } ops_enqueue_kernel(desc); } #endif
e7447852560523e7ab7ed2e89101094dd9ae89d6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> using namespace std; long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256, 13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384, 13l * 13 * 256, 6l * 6 * 256}; int num_layers = 8; int main() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // long size_to_alloc = layer_sizes[0]; for (int j = 0; j < num_layers; j++) { long size_to_alloc = layer_sizes[j]; int num_pieces = 8; void *p[num_pieces]; for (int i = 0; i < num_pieces; i++) { hipHostMalloc(&p[i], size_to_alloc / num_pieces); } for (int i = 0; i < num_pieces; i++) { hipHostFree(p[i]); } } hipEventRecord(stop); hipEventSynchronize(stop); float milli; hipEventElapsedTime(&milli, start, stop); cout << "allocating and freeing pieces(ms): " << milli << endl; void *p_bulk; hipEventRecord(start); for (int j = 0; j < num_layers; j++) { long size_to_alloc = layer_sizes[j]; hipHostMalloc(&p_bulk, size_to_alloc); hipHostFree(p_bulk); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); cout << "allocating and freeing bulk(ms): " << milli << endl; }
e7447852560523e7ab7ed2e89101094dd9ae89d6.cu
#include <iostream> using namespace std; long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256, 13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384, 13l * 13 * 256, 6l * 6 * 256}; int num_layers = 8; int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // long size_to_alloc = layer_sizes[0]; for (int j = 0; j < num_layers; j++) { long size_to_alloc = layer_sizes[j]; int num_pieces = 8; void *p[num_pieces]; for (int i = 0; i < num_pieces; i++) { cudaMallocHost(&p[i], size_to_alloc / num_pieces); } for (int i = 0; i < num_pieces; i++) { cudaFreeHost(p[i]); } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milli; cudaEventElapsedTime(&milli, start, stop); cout << "allocating and freeing pieces(ms): " << milli << endl; void *p_bulk; cudaEventRecord(start); for (int j = 0; j < num_layers; j++) { long size_to_alloc = layer_sizes[j]; cudaMallocHost(&p_bulk, size_to_alloc); cudaFreeHost(p_bulk); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); cout << "allocating and freeing bulk(ms): " << milli << endl; }
778efff80f481c170a51c6876286bc535d68b803.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdio> #include "census_transform.hpp" namespace sgm { namespace { static constexpr int WINDOW_WIDTH = 9; static constexpr int WINDOW_HEIGHT = 7; static constexpr int BLOCK_SIZE = 128; static constexpr int LINES_PER_BLOCK = 16; template <typename T> __global__ void census_transform_kernel( feature_type *dest, const T *src, int width, int height, int pitch) { using pixel_type = T; static const int SMEM_BUFFER_SIZE = WINDOW_HEIGHT + 1; const int half_kw = WINDOW_WIDTH / 2; const int half_kh = WINDOW_HEIGHT / 2; __shared__ pixel_type smem_lines[SMEM_BUFFER_SIZE][BLOCK_SIZE]; const int tid = threadIdx.x; const int x0 = blockIdx.x * (BLOCK_SIZE - WINDOW_WIDTH + 1) - half_kw; const int y0 = blockIdx.y * LINES_PER_BLOCK; for(int i = 0; i < WINDOW_HEIGHT; ++i){ const int x = x0 + tid, y = y0 - half_kh + i; pixel_type value = 0; if(0 <= x && x < width && 0 <= y && y < height){ value = src[x + y * pitch]; } smem_lines[i][tid] = value; } __syncthreads(); #pragma unroll for(int i = 0; i < LINES_PER_BLOCK; ++i){ if(i + 1 < LINES_PER_BLOCK){ // Load to smem const int x = x0 + tid, y = y0 + half_kh + i + 1; pixel_type value = 0; if(0 <= x && x < width && 0 <= y && y < height){ value = src[x + y * pitch]; } const int smem_x = tid; const int smem_y = (WINDOW_HEIGHT + i) % SMEM_BUFFER_SIZE; smem_lines[smem_y][smem_x] = value; } if(half_kw <= tid && tid < BLOCK_SIZE - half_kw){ // Compute and store const int x = x0 + tid, y = y0 + i; if(half_kw <= x && x < width - half_kw && half_kh <= y && y < height - half_kh){ const int smem_x = tid; const int smem_y = (half_kh + i) % SMEM_BUFFER_SIZE; feature_type f = 0; for(int dy = -half_kh; dy < 0; ++dy){ const int smem_y1 = (smem_y + dy + SMEM_BUFFER_SIZE) % SMEM_BUFFER_SIZE; const int smem_y2 = (smem_y - dy + SMEM_BUFFER_SIZE) % SMEM_BUFFER_SIZE; for(int dx = -half_kw; dx <= half_kw; ++dx){ const int smem_x1 = smem_x + dx; const int smem_x2 = smem_x - dx; const auto a = smem_lines[smem_y1][smem_x1]; const auto b = smem_lines[smem_y2][smem_x2]; f = (f << 1) | (a > b); } } for(int dx = -half_kw; dx < 0; ++dx){ const int smem_x1 = smem_x + dx; const int smem_x2 = smem_x - dx; const auto a = smem_lines[smem_y][smem_x1]; const auto b = smem_lines[smem_y][smem_x2]; f = (f << 1) | (a > b); } dest[x + y * width] = f; } } __syncthreads(); } } template <typename T> void enqueue_census_transform( feature_type *dest, const T *src, int width, int height, int pitch, hipStream_t stream) { const int width_per_block = BLOCK_SIZE - WINDOW_WIDTH + 1; const int height_per_block = LINES_PER_BLOCK; const dim3 gdim( (width + width_per_block - 1) / width_per_block, (height + height_per_block - 1) / height_per_block); const dim3 bdim(BLOCK_SIZE); hipLaunchKernelGGL(( census_transform_kernel), dim3(gdim), dim3(bdim), 0, stream, dest, src, width, height, pitch); } } template <typename T> CensusTransform<T>::CensusTransform() : m_feature_buffer() { } template <typename T> void CensusTransform<T>::enqueue( const input_type *src, int width, int height, int pitch, hipStream_t stream) { if(m_feature_buffer.size() != static_cast<size_t>(width * height)){ m_feature_buffer = DeviceBuffer<feature_type>(width * height); } enqueue_census_transform( m_feature_buffer.data(), src, width, height, pitch, stream); } template class CensusTransform<uint8_t>; template class CensusTransform<uint16_t>; }
778efff80f481c170a51c6876286bc535d68b803.cu
/* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdio> #include "census_transform.hpp" namespace sgm { namespace { static constexpr int WINDOW_WIDTH = 9; static constexpr int WINDOW_HEIGHT = 7; static constexpr int BLOCK_SIZE = 128; static constexpr int LINES_PER_BLOCK = 16; template <typename T> __global__ void census_transform_kernel( feature_type *dest, const T *src, int width, int height, int pitch) { using pixel_type = T; static const int SMEM_BUFFER_SIZE = WINDOW_HEIGHT + 1; const int half_kw = WINDOW_WIDTH / 2; const int half_kh = WINDOW_HEIGHT / 2; __shared__ pixel_type smem_lines[SMEM_BUFFER_SIZE][BLOCK_SIZE]; const int tid = threadIdx.x; const int x0 = blockIdx.x * (BLOCK_SIZE - WINDOW_WIDTH + 1) - half_kw; const int y0 = blockIdx.y * LINES_PER_BLOCK; for(int i = 0; i < WINDOW_HEIGHT; ++i){ const int x = x0 + tid, y = y0 - half_kh + i; pixel_type value = 0; if(0 <= x && x < width && 0 <= y && y < height){ value = src[x + y * pitch]; } smem_lines[i][tid] = value; } __syncthreads(); #pragma unroll for(int i = 0; i < LINES_PER_BLOCK; ++i){ if(i + 1 < LINES_PER_BLOCK){ // Load to smem const int x = x0 + tid, y = y0 + half_kh + i + 1; pixel_type value = 0; if(0 <= x && x < width && 0 <= y && y < height){ value = src[x + y * pitch]; } const int smem_x = tid; const int smem_y = (WINDOW_HEIGHT + i) % SMEM_BUFFER_SIZE; smem_lines[smem_y][smem_x] = value; } if(half_kw <= tid && tid < BLOCK_SIZE - half_kw){ // Compute and store const int x = x0 + tid, y = y0 + i; if(half_kw <= x && x < width - half_kw && half_kh <= y && y < height - half_kh){ const int smem_x = tid; const int smem_y = (half_kh + i) % SMEM_BUFFER_SIZE; feature_type f = 0; for(int dy = -half_kh; dy < 0; ++dy){ const int smem_y1 = (smem_y + dy + SMEM_BUFFER_SIZE) % SMEM_BUFFER_SIZE; const int smem_y2 = (smem_y - dy + SMEM_BUFFER_SIZE) % SMEM_BUFFER_SIZE; for(int dx = -half_kw; dx <= half_kw; ++dx){ const int smem_x1 = smem_x + dx; const int smem_x2 = smem_x - dx; const auto a = smem_lines[smem_y1][smem_x1]; const auto b = smem_lines[smem_y2][smem_x2]; f = (f << 1) | (a > b); } } for(int dx = -half_kw; dx < 0; ++dx){ const int smem_x1 = smem_x + dx; const int smem_x2 = smem_x - dx; const auto a = smem_lines[smem_y][smem_x1]; const auto b = smem_lines[smem_y][smem_x2]; f = (f << 1) | (a > b); } dest[x + y * width] = f; } } __syncthreads(); } } template <typename T> void enqueue_census_transform( feature_type *dest, const T *src, int width, int height, int pitch, cudaStream_t stream) { const int width_per_block = BLOCK_SIZE - WINDOW_WIDTH + 1; const int height_per_block = LINES_PER_BLOCK; const dim3 gdim( (width + width_per_block - 1) / width_per_block, (height + height_per_block - 1) / height_per_block); const dim3 bdim(BLOCK_SIZE); census_transform_kernel<<<gdim, bdim, 0, stream>>>(dest, src, width, height, pitch); } } template <typename T> CensusTransform<T>::CensusTransform() : m_feature_buffer() { } template <typename T> void CensusTransform<T>::enqueue( const input_type *src, int width, int height, int pitch, cudaStream_t stream) { if(m_feature_buffer.size() != static_cast<size_t>(width * height)){ m_feature_buffer = DeviceBuffer<feature_type>(width * height); } enqueue_census_transform( m_feature_buffer.data(), src, width, height, pitch, stream); } template class CensusTransform<uint8_t>; template class CensusTransform<uint16_t>; }
cc49a66626d71590dba6a2e3b02967fedb77bbdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Conlain Kelly, Nic Olsen, Dan Negrut // ============================================================================= #include <cmath> #include <numeric> #include "chrono_granular/physics/ChGranularGPU_SMC.cuh" #include "chrono_granular/utils/ChGranularUtilities.h" namespace chrono { namespace granular { __host__ double ChSystemGranularSMC::get_max_z() const { size_t nSpheres = sphere_local_pos_Z.size(); std::vector<int64_t> sphere_pos_global_Z; sphere_pos_global_Z.resize(nSpheres); for (size_t index = 0; index < nSpheres; index++) { unsigned int ownerSD = sphere_data->sphere_owner_SDs[index]; int3 sphere_pos_local = make_int3(sphere_data->sphere_local_pos_X[index], sphere_data->sphere_local_pos_Y[index], sphere_data->sphere_local_pos_Z[index]); sphere_pos_global_Z[index] = convertPosLocalToGlobal(ownerSD, sphere_pos_local, gran_params).z; } double max_z_SU = *(std::max_element(sphere_pos_global_Z.begin(), sphere_pos_global_Z.end())); double max_z_UU = max_z_SU * LENGTH_SU2UU; return max_z_UU; } // Reset broadphase data structures void ChSystemGranularSMC::resetBroadphaseInformation() { // Set all the offsets to zero gpuErrchk(hipMemset(SD_NumSpheresTouching.data(), 0, SD_NumSpheresTouching.size() * sizeof(unsigned int))); gpuErrchk(hipMemset(SD_SphereCompositeOffsets.data(), 0, SD_SphereCompositeOffsets.size() * sizeof(unsigned int))); // For each SD, all the spheres touching that SD should have their ID be NULL_GRANULAR_ID gpuErrchk(hipMemset(spheres_in_SD_composite.data(), NULL_GRANULAR_ID, spheres_in_SD_composite.size() * sizeof(unsigned int))); gpuErrchk(hipDeviceSynchronize()); } // Reset sphere acceleration data structures void ChSystemGranularSMC::resetSphereAccelerations() { // cache past acceleration data if (time_integrator == GRAN_TIME_INTEGRATOR::CHUNG) { gpuErrchk(hipMemcpy(sphere_acc_X_old.data(), sphere_acc_X.data(), nSpheres * sizeof(float), hipMemcpyDeviceToDevice)); gpuErrchk(hipMemcpy(sphere_acc_Y_old.data(), sphere_acc_Y.data(), nSpheres * sizeof(float), hipMemcpyDeviceToDevice)); gpuErrchk(hipMemcpy(sphere_acc_Z_old.data(), sphere_acc_Z.data(), nSpheres * sizeof(float), hipMemcpyDeviceToDevice)); // if we have multistep AND friction, cache old alphas if (gran_params->friction_mode != FRICTIONLESS) { gpuErrchk(hipMemcpy(sphere_ang_acc_X_old.data(), sphere_ang_acc_X.data(), nSpheres * sizeof(float), hipMemcpyDeviceToDevice)); gpuErrchk(hipMemcpy(sphere_ang_acc_Y_old.data(), sphere_ang_acc_Y.data(), nSpheres * sizeof(float), hipMemcpyDeviceToDevice)); gpuErrchk(hipMemcpy(sphere_ang_acc_Z_old.data(), sphere_ang_acc_Z.data(), nSpheres * sizeof(float), hipMemcpyDeviceToDevice)); } gpuErrchk(hipDeviceSynchronize()); } // reset current accelerations to zero to zero gpuErrchk(hipMemset(sphere_acc_X.data(), 0, nSpheres * sizeof(float))); gpuErrchk(hipMemset(sphere_acc_Y.data(), 0, nSpheres * sizeof(float))); gpuErrchk(hipMemset(sphere_acc_Z.data(), 0, nSpheres * sizeof(float))); // reset torques to zero, if applicable if (gran_params->friction_mode != FRICTIONLESS) { gpuErrchk(hipMemset(sphere_ang_acc_X.data(), 0, nSpheres * sizeof(float))); gpuErrchk(hipMemset(sphere_ang_acc_Y.data(), 0, nSpheres * sizeof(float))); gpuErrchk(hipMemset(sphere_ang_acc_Z.data(), 0, nSpheres * sizeof(float))); } } __global__ void compute_absv(const unsigned int nSpheres, const float* velX, const float* velY, const float* velZ, float* d_absv) { unsigned int my_sphere = blockIdx.x * blockDim.x + threadIdx.x; if (my_sphere < nSpheres) { float v[3] = {velX[my_sphere], velY[my_sphere], velZ[my_sphere]}; d_absv[my_sphere] = v[0] * v[0] + v[1] * v[1] + v[2] * v[2]; } } __host__ float ChSystemGranularSMC::get_max_vel() const { float* d_absv; float* d_max_vel; float h_max_vel; gpuErrchk(hipMalloc(&d_absv, nSpheres * sizeof(float))); gpuErrchk(hipMalloc(&d_max_vel, sizeof(float))); hipLaunchKernelGGL(( compute_absv), dim3((nSpheres + 255) / 256), dim3(256), 0, 0, nSpheres, pos_X_dt.data(), pos_Y_dt.data(), pos_Z_dt.data(), d_absv); void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_absv, d_max_vel, nSpheres); gpuErrchk(hipMalloc(&d_temp_storage, temp_storage_bytes)); hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_absv, d_max_vel, nSpheres); gpuErrchk(hipMemcpy(&h_max_vel, d_max_vel, sizeof(float), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(d_absv)); gpuErrchk(hipFree(d_max_vel)); return h_max_vel; } __host__ int3 ChSystemGranularSMC::getSDTripletFromID(unsigned int SD_ID) const { return SDIDTriplet(SD_ID, gran_params); } /// Sort sphere positions by subdomain id /// Occurs entirely on host, not intended to be efficient /// ONLY DO AT BEGINNING OF SIMULATION __host__ void ChSystemGranularSMC::defragment_initial_positions() { // key and value pointers std::vector<unsigned int, cudallocator<unsigned int>> sphere_ids; // load sphere indices sphere_ids.resize(nSpheres); std::iota(sphere_ids.begin(), sphere_ids.end(), 0); // sort sphere ids by owner SD std::sort(sphere_ids.begin(), sphere_ids.end(), [&](std::size_t i, std::size_t j) { return sphere_owner_SDs.at(i) < sphere_owner_SDs.at(j); }); std::vector<int, cudallocator<int>> sphere_pos_x_tmp; std::vector<int, cudallocator<int>> sphere_pos_y_tmp; std::vector<int, cudallocator<int>> sphere_pos_z_tmp; std::vector<float, cudallocator<float>> sphere_vel_x_tmp; std::vector<float, cudallocator<float>> sphere_vel_y_tmp; std::vector<float, cudallocator<float>> sphere_vel_z_tmp; std::vector<not_stupid_bool, cudallocator<not_stupid_bool>> sphere_fixed_tmp; std::vector<unsigned int, cudallocator<unsigned int>> sphere_owner_SDs_tmp; sphere_pos_x_tmp.resize(nSpheres); sphere_pos_y_tmp.resize(nSpheres); sphere_pos_z_tmp.resize(nSpheres); sphere_vel_x_tmp.resize(nSpheres); sphere_vel_y_tmp.resize(nSpheres); sphere_vel_z_tmp.resize(nSpheres); sphere_fixed_tmp.resize(nSpheres); sphere_owner_SDs_tmp.resize(nSpheres); // reorder values into new sorted for (unsigned int i = 0; i < nSpheres; i++) { sphere_pos_x_tmp.at(i) = sphere_local_pos_X.at(sphere_ids.at(i)); sphere_pos_y_tmp.at(i) = sphere_local_pos_Y.at(sphere_ids.at(i)); sphere_pos_z_tmp.at(i) = sphere_local_pos_Z.at(sphere_ids.at(i)); sphere_vel_x_tmp.at(i) = pos_X_dt.at(sphere_ids.at(i)); sphere_vel_y_tmp.at(i) = pos_Y_dt.at(sphere_ids.at(i)); sphere_vel_z_tmp.at(i) = pos_Z_dt.at(sphere_ids.at(i)); sphere_fixed_tmp.at(i) = sphere_fixed.at(sphere_ids.at(i)); sphere_owner_SDs_tmp.at(i) = sphere_owner_SDs.at(sphere_ids.at(i)); } // swap into the correct data structures sphere_local_pos_X.swap(sphere_pos_x_tmp); sphere_local_pos_Y.swap(sphere_pos_y_tmp); sphere_local_pos_Z.swap(sphere_pos_z_tmp); pos_X_dt.swap(sphere_vel_x_tmp); pos_Y_dt.swap(sphere_vel_y_tmp); pos_Z_dt.swap(sphere_vel_z_tmp); sphere_fixed.swap(sphere_fixed_tmp); sphere_owner_SDs.swap(sphere_owner_SDs_tmp); } __host__ void ChSystemGranularSMC::setupSphereDataStructures() { // Each fills user_sphere_positions with positions to be copied if (user_sphere_positions.size() == 0) { printf("ERROR: no sphere positions given!\n"); exit(1); } nSpheres = (unsigned int)user_sphere_positions.size(); INFO_PRINTF("%u balls added!\n", nSpheres); gran_params->nSpheres = nSpheres; TRACK_VECTOR_RESIZE(sphere_owner_SDs, nSpheres, "sphere_owner_SDs", NULL_GRANULAR_ID); // Allocate space for new bodies TRACK_VECTOR_RESIZE(sphere_local_pos_X, nSpheres, "sphere_local_pos_X", 0); TRACK_VECTOR_RESIZE(sphere_local_pos_Y, nSpheres, "sphere_local_pos_Y", 0); TRACK_VECTOR_RESIZE(sphere_local_pos_Z, nSpheres, "sphere_local_pos_Z", 0); TRACK_VECTOR_RESIZE(sphere_fixed, nSpheres, "sphere_fixed", 0); TRACK_VECTOR_RESIZE(pos_X_dt, nSpheres, "pos_X_dt", 0); TRACK_VECTOR_RESIZE(pos_Y_dt, nSpheres, "pos_Y_dt", 0); TRACK_VECTOR_RESIZE(pos_Z_dt, nSpheres, "pos_Z_dt", 0); // temporarily store global positions as 64-bit, discard as soon as local positions are loaded { bool user_provided_fixed = user_sphere_fixed.size() != 0; bool user_provided_vel = user_sphere_vel.size() != 0; if ((user_provided_fixed && user_sphere_fixed.size() != nSpheres) || (user_provided_vel && user_sphere_vel.size() != nSpheres)) { printf("Provided fixity or velocity array does not match provided particle positions\n"); exit(1); } std::vector<int64_t, cudallocator<int64_t>> sphere_global_pos_X; std::vector<int64_t, cudallocator<int64_t>> sphere_global_pos_Y; std::vector<int64_t, cudallocator<int64_t>> sphere_global_pos_Z; sphere_global_pos_X.resize(nSpheres); sphere_global_pos_Y.resize(nSpheres); sphere_global_pos_Z.resize(nSpheres); // Copy from array of structs to 3 arrays for (unsigned int i = 0; i < nSpheres; i++) { float3 vec = user_sphere_positions.at(i); // cast to double, convert to SU, then cast to int64_t sphere_global_pos_X.at(i) = (int64_t)((double)vec.x / LENGTH_SU2UU); sphere_global_pos_Y.at(i) = (int64_t)((double)vec.y / LENGTH_SU2UU); sphere_global_pos_Z.at(i) = (int64_t)((double)vec.z / LENGTH_SU2UU); // Convert to not_stupid_bool sphere_fixed.at(i) = (not_stupid_bool)((user_provided_fixed) ? user_sphere_fixed[i] : false); if (user_provided_vel) { auto vel = user_sphere_vel.at(i); pos_X_dt.at(i) = vel.x / VEL_SU2UU; pos_Y_dt.at(i) = vel.y / VEL_SU2UU; pos_Z_dt.at(i) = vel.z / VEL_SU2UU; } } packSphereDataPointers(); // Figure our the number of blocks that need to be launched to cover the box unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; hipLaunchKernelGGL(( initializeLocalPositions), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, sphere_data, sphere_global_pos_X.data(), sphere_global_pos_Y.data(), sphere_global_pos_Z.data(), nSpheres, gran_params); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); defragment_initial_positions(); } TRACK_VECTOR_RESIZE(sphere_acc_X, nSpheres, "sphere_acc_X", 0); TRACK_VECTOR_RESIZE(sphere_acc_Y, nSpheres, "sphere_acc_Y", 0); TRACK_VECTOR_RESIZE(sphere_acc_Z, nSpheres, "sphere_acc_Z", 0); // NOTE that this will get resized again later, this is just the first estimate TRACK_VECTOR_RESIZE(spheres_in_SD_composite, 2 * nSpheres, "spheres_in_SD_composite", NULL_GRANULAR_ID); if (gran_params->friction_mode != GRAN_FRICTION_MODE::FRICTIONLESS) { // add rotational DOFs TRACK_VECTOR_RESIZE(sphere_Omega_X, nSpheres, "sphere_Omega_X", 0); TRACK_VECTOR_RESIZE(sphere_Omega_Y, nSpheres, "sphere_Omega_Y", 0); TRACK_VECTOR_RESIZE(sphere_Omega_Z, nSpheres, "sphere_Omega_Z", 0); // add torques TRACK_VECTOR_RESIZE(sphere_ang_acc_X, nSpheres, "sphere_ang_acc_X", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Y, nSpheres, "sphere_ang_acc_Y", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Z, nSpheres, "sphere_ang_acc_Z", 0); } if (gran_params->friction_mode == GRAN_FRICTION_MODE::MULTI_STEP || gran_params->friction_mode == GRAN_FRICTION_MODE::SINGLE_STEP) { TRACK_VECTOR_RESIZE(contact_partners_map, 12 * nSpheres, "contact_partners_map", NULL_GRANULAR_ID); TRACK_VECTOR_RESIZE(contact_active_map, 12 * nSpheres, "contact_active_map", false); } if (gran_params->friction_mode == GRAN_FRICTION_MODE::MULTI_STEP) { float3 null_history = {0., 0., 0.}; TRACK_VECTOR_RESIZE(contact_history_map, 12 * nSpheres, "contact_history_map", null_history); } if (time_integrator == GRAN_TIME_INTEGRATOR::CHUNG) { TRACK_VECTOR_RESIZE(sphere_acc_X_old, nSpheres, "sphere_acc_X_old", 0); TRACK_VECTOR_RESIZE(sphere_acc_Y_old, nSpheres, "sphere_acc_Y_old", 0); TRACK_VECTOR_RESIZE(sphere_acc_Z_old, nSpheres, "sphere_acc_Z_old", 0); // friction and multistep means keep old ang acc if (gran_params->friction_mode != GRAN_FRICTION_MODE::FRICTIONLESS) { TRACK_VECTOR_RESIZE(sphere_ang_acc_X_old, nSpheres, "sphere_ang_acc_X_old", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Y_old, nSpheres, "sphere_ang_acc_Y_old", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Z_old, nSpheres, "sphere_ang_acc_Z_old", 0); } } // make sure the right pointers are packed packSphereDataPointers(); } __host__ void ChSystemGranularSMC::runSphereBroadphase() { METRICS_PRINTF("Resetting broadphase info!\n"); resetBroadphaseInformation(); // Figure our the number of blocks that need to be launched to cover the box unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; packSphereDataPointers(); hipLaunchKernelGGL(( sphereBroadphase_dryrun<CUDA_THREADS_PER_BLOCK>) , dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, sphere_data, nSpheres, gran_params); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; // num spheres in last SD unsigned int last_SD_num_spheres = SD_NumSpheresTouching.at(nSDs - 1); unsigned int* out_ptr = SD_SphereCompositeOffsets.data(); unsigned int* in_ptr = SD_NumSpheresTouching.data(); // copy data into the tmp array gpuErrchk(hipMemcpy(out_ptr, in_ptr, nSDs * sizeof(unsigned int), hipMemcpyDeviceToDevice)); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, in_ptr, out_ptr, nSDs); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); // Allocate temporary storage gpuErrchk(hipMalloc(&d_temp_storage, temp_storage_bytes)); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); // Run exclusive prefix sum hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, in_ptr, out_ptr, nSDs); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); // total number of sphere entries to record unsigned int num_entries = out_ptr[nSDs - 1] + in_ptr[nSDs - 1]; spheres_in_SD_composite.resize(num_entries, NULL_GRANULAR_ID); // make sure the DEs pointer is updated packSphereDataPointers(); // printf("first run: num entries is %u, theoretical max is %u\n", num_entries, nSDs * MAX_COUNT_OF_SPHERES_PER_SD); // for (unsigned int i = 0; i < nSDs; i++) { // printf("SD %d has offset %u, N %u \n", i, out_ptr[i], in_ptr[i]); // } // back up the offsets // TODO use a cached allocator, CUB provides one std::vector<unsigned int, cudallocator<unsigned int>> SD_SphereCompositeOffsets_bak; SD_SphereCompositeOffsets_bak.resize(SD_SphereCompositeOffsets.size()); gpuErrchk(hipMemcpy(SD_SphereCompositeOffsets_bak.data(), SD_SphereCompositeOffsets.data(), nSDs * sizeof(unsigned int), hipMemcpyDeviceToDevice)); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); hipLaunchKernelGGL(( sphereBroadphase<CUDA_THREADS_PER_BLOCK>), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, sphere_data, nSpheres, gran_params); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); // // for (unsigned int i = 0; i < nSDs; i++) { // printf("SD %d has offset %u, N %u \n", i, out_ptr[i], in_ptr[i]); // } // // for (unsigned int i = 0; i < num_entries; i++) { // printf("entry %u is %u\n", i, spheres_in_SD_composite[i]); // } // restore the old offsets gpuErrchk(hipMemcpy(SD_SphereCompositeOffsets.data(), SD_SphereCompositeOffsets_bak.data(), nSDs * sizeof(unsigned int), hipMemcpyDeviceToDevice)); gpuErrchk(hipFree(d_temp_storage)); } __host__ void ChSystemGranularSMC::updateBCPositions() { for (unsigned int i = 0; i < BC_params_list_UU.size(); i++) { auto bc_type = BC_type_list.at(i); const BC_params_t<float, float3>& params_UU = BC_params_list_UU.at(i); BC_params_t<int64_t, int64_t3>& params_SU = BC_params_list_SU.at(i); auto offset_function = BC_offset_function_list.at(i); setBCOffset(bc_type, params_UU, params_SU, offset_function(elapsedSimTime)); } if (!BD_is_fixed) { double3 new_BD_offset = BDOffsetFunction(elapsedSimTime); int64_t3 bd_offset_SU = {0, 0, 0}; bd_offset_SU.x = new_BD_offset.x / LENGTH_SU2UU; bd_offset_SU.y = new_BD_offset.y / LENGTH_SU2UU; bd_offset_SU.z = new_BD_offset.z / LENGTH_SU2UU; int64_t old_frame_X = gran_params->BD_frame_X; int64_t old_frame_Y = gran_params->BD_frame_Y; int64_t old_frame_Z = gran_params->BD_frame_Z; gran_params->BD_frame_X = bd_offset_SU.x + BD_rest_frame_SU.x; gran_params->BD_frame_Y = bd_offset_SU.y + BD_rest_frame_SU.y; gran_params->BD_frame_Z = bd_offset_SU.z + BD_rest_frame_SU.z; unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; int64_t3 offset_delta = {0, 0, 0}; // if the frame X increases, the local X should decrease offset_delta.x = old_frame_X - gran_params->BD_frame_X; offset_delta.y = old_frame_Y - gran_params->BD_frame_Y; offset_delta.z = old_frame_Z - gran_params->BD_frame_Z; // printf("offset is %lld, %lld, %lld\n", offset_delta.x, offset_delta.y, offset_delta.z); packSphereDataPointers(); hipLaunchKernelGGL(( applyBDFrameChange), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, offset_delta, sphere_data, nSpheres, gran_params); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } } __host__ double ChSystemGranularSMC::advance_simulation(float duration) { // Figure our the number of blocks that need to be launched to cover the box unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; // Settling simulation loop. float duration_SU = duration / TIME_SU2UU; unsigned int nsteps = std::round(duration_SU / stepSize_SU); METRICS_PRINTF("advancing by %f at timestep %f, %u timesteps at approx user timestep %f\n", duration_SU, stepSize_SU, nsteps, duration / nsteps); float time_elapsed_SU = 0; // time elapsed in this advance call // Run the simulation, there are aggressive synchronizations because we want to have no race conditions for (; time_elapsed_SU < stepSize_SU * nsteps; time_elapsed_SU += stepSize_SU) { updateBCPositions(); runSphereBroadphase(); packSphereDataPointers(); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); resetSphereAccelerations(); resetBCForces(); METRICS_PRINTF("Starting computeSphereForces!\n"); if (gran_params->friction_mode == FRICTIONLESS) { // Compute sphere-sphere forces hipLaunchKernelGGL(( computeSphereForces_frictionless), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0, sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(), BC_params_list_SU.size()); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } else if (gran_params->friction_mode == SINGLE_STEP || gran_params->friction_mode == MULTI_STEP) { // figure out who is contacting hipLaunchKernelGGL(( determineContactPairs), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0, sphere_data, gran_params); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( computeSphereContactForces), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(), BC_params_list_SU.size(), nSpheres); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } METRICS_PRINTF("Starting integrateSpheres!\n"); hipLaunchKernelGGL(( integrateSpheres), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, stepSize_SU, sphere_data, nSpheres, gran_params); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); if (gran_params->friction_mode != GRAN_FRICTION_MODE::FRICTIONLESS) { hipLaunchKernelGGL(( updateFrictionData), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, stepSize_SU, sphere_data, nSpheres, gran_params); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } elapsedSimTime += stepSize_SU * TIME_SU2UU; // Advance current time } return time_elapsed_SU * TIME_SU2UU; // return elapsed UU time } } // namespace granular } // namespace chrono
cc49a66626d71590dba6a2e3b02967fedb77bbdd.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Conlain Kelly, Nic Olsen, Dan Negrut // ============================================================================= #include <cmath> #include <numeric> #include "chrono_granular/physics/ChGranularGPU_SMC.cuh" #include "chrono_granular/utils/ChGranularUtilities.h" namespace chrono { namespace granular { __host__ double ChSystemGranularSMC::get_max_z() const { size_t nSpheres = sphere_local_pos_Z.size(); std::vector<int64_t> sphere_pos_global_Z; sphere_pos_global_Z.resize(nSpheres); for (size_t index = 0; index < nSpheres; index++) { unsigned int ownerSD = sphere_data->sphere_owner_SDs[index]; int3 sphere_pos_local = make_int3(sphere_data->sphere_local_pos_X[index], sphere_data->sphere_local_pos_Y[index], sphere_data->sphere_local_pos_Z[index]); sphere_pos_global_Z[index] = convertPosLocalToGlobal(ownerSD, sphere_pos_local, gran_params).z; } double max_z_SU = *(std::max_element(sphere_pos_global_Z.begin(), sphere_pos_global_Z.end())); double max_z_UU = max_z_SU * LENGTH_SU2UU; return max_z_UU; } // Reset broadphase data structures void ChSystemGranularSMC::resetBroadphaseInformation() { // Set all the offsets to zero gpuErrchk(cudaMemset(SD_NumSpheresTouching.data(), 0, SD_NumSpheresTouching.size() * sizeof(unsigned int))); gpuErrchk(cudaMemset(SD_SphereCompositeOffsets.data(), 0, SD_SphereCompositeOffsets.size() * sizeof(unsigned int))); // For each SD, all the spheres touching that SD should have their ID be NULL_GRANULAR_ID gpuErrchk(cudaMemset(spheres_in_SD_composite.data(), NULL_GRANULAR_ID, spheres_in_SD_composite.size() * sizeof(unsigned int))); gpuErrchk(cudaDeviceSynchronize()); } // Reset sphere acceleration data structures void ChSystemGranularSMC::resetSphereAccelerations() { // cache past acceleration data if (time_integrator == GRAN_TIME_INTEGRATOR::CHUNG) { gpuErrchk(cudaMemcpy(sphere_acc_X_old.data(), sphere_acc_X.data(), nSpheres * sizeof(float), cudaMemcpyDeviceToDevice)); gpuErrchk(cudaMemcpy(sphere_acc_Y_old.data(), sphere_acc_Y.data(), nSpheres * sizeof(float), cudaMemcpyDeviceToDevice)); gpuErrchk(cudaMemcpy(sphere_acc_Z_old.data(), sphere_acc_Z.data(), nSpheres * sizeof(float), cudaMemcpyDeviceToDevice)); // if we have multistep AND friction, cache old alphas if (gran_params->friction_mode != FRICTIONLESS) { gpuErrchk(cudaMemcpy(sphere_ang_acc_X_old.data(), sphere_ang_acc_X.data(), nSpheres * sizeof(float), cudaMemcpyDeviceToDevice)); gpuErrchk(cudaMemcpy(sphere_ang_acc_Y_old.data(), sphere_ang_acc_Y.data(), nSpheres * sizeof(float), cudaMemcpyDeviceToDevice)); gpuErrchk(cudaMemcpy(sphere_ang_acc_Z_old.data(), sphere_ang_acc_Z.data(), nSpheres * sizeof(float), cudaMemcpyDeviceToDevice)); } gpuErrchk(cudaDeviceSynchronize()); } // reset current accelerations to zero to zero gpuErrchk(cudaMemset(sphere_acc_X.data(), 0, nSpheres * sizeof(float))); gpuErrchk(cudaMemset(sphere_acc_Y.data(), 0, nSpheres * sizeof(float))); gpuErrchk(cudaMemset(sphere_acc_Z.data(), 0, nSpheres * sizeof(float))); // reset torques to zero, if applicable if (gran_params->friction_mode != FRICTIONLESS) { gpuErrchk(cudaMemset(sphere_ang_acc_X.data(), 0, nSpheres * sizeof(float))); gpuErrchk(cudaMemset(sphere_ang_acc_Y.data(), 0, nSpheres * sizeof(float))); gpuErrchk(cudaMemset(sphere_ang_acc_Z.data(), 0, nSpheres * sizeof(float))); } } __global__ void compute_absv(const unsigned int nSpheres, const float* velX, const float* velY, const float* velZ, float* d_absv) { unsigned int my_sphere = blockIdx.x * blockDim.x + threadIdx.x; if (my_sphere < nSpheres) { float v[3] = {velX[my_sphere], velY[my_sphere], velZ[my_sphere]}; d_absv[my_sphere] = v[0] * v[0] + v[1] * v[1] + v[2] * v[2]; } } __host__ float ChSystemGranularSMC::get_max_vel() const { float* d_absv; float* d_max_vel; float h_max_vel; gpuErrchk(cudaMalloc(&d_absv, nSpheres * sizeof(float))); gpuErrchk(cudaMalloc(&d_max_vel, sizeof(float))); compute_absv<<<(nSpheres + 255) / 256, 256>>>(nSpheres, pos_X_dt.data(), pos_Y_dt.data(), pos_Z_dt.data(), d_absv); void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_absv, d_max_vel, nSpheres); gpuErrchk(cudaMalloc(&d_temp_storage, temp_storage_bytes)); cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_absv, d_max_vel, nSpheres); gpuErrchk(cudaMemcpy(&h_max_vel, d_max_vel, sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_absv)); gpuErrchk(cudaFree(d_max_vel)); return h_max_vel; } __host__ int3 ChSystemGranularSMC::getSDTripletFromID(unsigned int SD_ID) const { return SDIDTriplet(SD_ID, gran_params); } /// Sort sphere positions by subdomain id /// Occurs entirely on host, not intended to be efficient /// ONLY DO AT BEGINNING OF SIMULATION __host__ void ChSystemGranularSMC::defragment_initial_positions() { // key and value pointers std::vector<unsigned int, cudallocator<unsigned int>> sphere_ids; // load sphere indices sphere_ids.resize(nSpheres); std::iota(sphere_ids.begin(), sphere_ids.end(), 0); // sort sphere ids by owner SD std::sort(sphere_ids.begin(), sphere_ids.end(), [&](std::size_t i, std::size_t j) { return sphere_owner_SDs.at(i) < sphere_owner_SDs.at(j); }); std::vector<int, cudallocator<int>> sphere_pos_x_tmp; std::vector<int, cudallocator<int>> sphere_pos_y_tmp; std::vector<int, cudallocator<int>> sphere_pos_z_tmp; std::vector<float, cudallocator<float>> sphere_vel_x_tmp; std::vector<float, cudallocator<float>> sphere_vel_y_tmp; std::vector<float, cudallocator<float>> sphere_vel_z_tmp; std::vector<not_stupid_bool, cudallocator<not_stupid_bool>> sphere_fixed_tmp; std::vector<unsigned int, cudallocator<unsigned int>> sphere_owner_SDs_tmp; sphere_pos_x_tmp.resize(nSpheres); sphere_pos_y_tmp.resize(nSpheres); sphere_pos_z_tmp.resize(nSpheres); sphere_vel_x_tmp.resize(nSpheres); sphere_vel_y_tmp.resize(nSpheres); sphere_vel_z_tmp.resize(nSpheres); sphere_fixed_tmp.resize(nSpheres); sphere_owner_SDs_tmp.resize(nSpheres); // reorder values into new sorted for (unsigned int i = 0; i < nSpheres; i++) { sphere_pos_x_tmp.at(i) = sphere_local_pos_X.at(sphere_ids.at(i)); sphere_pos_y_tmp.at(i) = sphere_local_pos_Y.at(sphere_ids.at(i)); sphere_pos_z_tmp.at(i) = sphere_local_pos_Z.at(sphere_ids.at(i)); sphere_vel_x_tmp.at(i) = pos_X_dt.at(sphere_ids.at(i)); sphere_vel_y_tmp.at(i) = pos_Y_dt.at(sphere_ids.at(i)); sphere_vel_z_tmp.at(i) = pos_Z_dt.at(sphere_ids.at(i)); sphere_fixed_tmp.at(i) = sphere_fixed.at(sphere_ids.at(i)); sphere_owner_SDs_tmp.at(i) = sphere_owner_SDs.at(sphere_ids.at(i)); } // swap into the correct data structures sphere_local_pos_X.swap(sphere_pos_x_tmp); sphere_local_pos_Y.swap(sphere_pos_y_tmp); sphere_local_pos_Z.swap(sphere_pos_z_tmp); pos_X_dt.swap(sphere_vel_x_tmp); pos_Y_dt.swap(sphere_vel_y_tmp); pos_Z_dt.swap(sphere_vel_z_tmp); sphere_fixed.swap(sphere_fixed_tmp); sphere_owner_SDs.swap(sphere_owner_SDs_tmp); } __host__ void ChSystemGranularSMC::setupSphereDataStructures() { // Each fills user_sphere_positions with positions to be copied if (user_sphere_positions.size() == 0) { printf("ERROR: no sphere positions given!\n"); exit(1); } nSpheres = (unsigned int)user_sphere_positions.size(); INFO_PRINTF("%u balls added!\n", nSpheres); gran_params->nSpheres = nSpheres; TRACK_VECTOR_RESIZE(sphere_owner_SDs, nSpheres, "sphere_owner_SDs", NULL_GRANULAR_ID); // Allocate space for new bodies TRACK_VECTOR_RESIZE(sphere_local_pos_X, nSpheres, "sphere_local_pos_X", 0); TRACK_VECTOR_RESIZE(sphere_local_pos_Y, nSpheres, "sphere_local_pos_Y", 0); TRACK_VECTOR_RESIZE(sphere_local_pos_Z, nSpheres, "sphere_local_pos_Z", 0); TRACK_VECTOR_RESIZE(sphere_fixed, nSpheres, "sphere_fixed", 0); TRACK_VECTOR_RESIZE(pos_X_dt, nSpheres, "pos_X_dt", 0); TRACK_VECTOR_RESIZE(pos_Y_dt, nSpheres, "pos_Y_dt", 0); TRACK_VECTOR_RESIZE(pos_Z_dt, nSpheres, "pos_Z_dt", 0); // temporarily store global positions as 64-bit, discard as soon as local positions are loaded { bool user_provided_fixed = user_sphere_fixed.size() != 0; bool user_provided_vel = user_sphere_vel.size() != 0; if ((user_provided_fixed && user_sphere_fixed.size() != nSpheres) || (user_provided_vel && user_sphere_vel.size() != nSpheres)) { printf("Provided fixity or velocity array does not match provided particle positions\n"); exit(1); } std::vector<int64_t, cudallocator<int64_t>> sphere_global_pos_X; std::vector<int64_t, cudallocator<int64_t>> sphere_global_pos_Y; std::vector<int64_t, cudallocator<int64_t>> sphere_global_pos_Z; sphere_global_pos_X.resize(nSpheres); sphere_global_pos_Y.resize(nSpheres); sphere_global_pos_Z.resize(nSpheres); // Copy from array of structs to 3 arrays for (unsigned int i = 0; i < nSpheres; i++) { float3 vec = user_sphere_positions.at(i); // cast to double, convert to SU, then cast to int64_t sphere_global_pos_X.at(i) = (int64_t)((double)vec.x / LENGTH_SU2UU); sphere_global_pos_Y.at(i) = (int64_t)((double)vec.y / LENGTH_SU2UU); sphere_global_pos_Z.at(i) = (int64_t)((double)vec.z / LENGTH_SU2UU); // Convert to not_stupid_bool sphere_fixed.at(i) = (not_stupid_bool)((user_provided_fixed) ? user_sphere_fixed[i] : false); if (user_provided_vel) { auto vel = user_sphere_vel.at(i); pos_X_dt.at(i) = vel.x / VEL_SU2UU; pos_Y_dt.at(i) = vel.y / VEL_SU2UU; pos_Z_dt.at(i) = vel.z / VEL_SU2UU; } } packSphereDataPointers(); // Figure our the number of blocks that need to be launched to cover the box unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; initializeLocalPositions<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>( sphere_data, sphere_global_pos_X.data(), sphere_global_pos_Y.data(), sphere_global_pos_Z.data(), nSpheres, gran_params); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); defragment_initial_positions(); } TRACK_VECTOR_RESIZE(sphere_acc_X, nSpheres, "sphere_acc_X", 0); TRACK_VECTOR_RESIZE(sphere_acc_Y, nSpheres, "sphere_acc_Y", 0); TRACK_VECTOR_RESIZE(sphere_acc_Z, nSpheres, "sphere_acc_Z", 0); // NOTE that this will get resized again later, this is just the first estimate TRACK_VECTOR_RESIZE(spheres_in_SD_composite, 2 * nSpheres, "spheres_in_SD_composite", NULL_GRANULAR_ID); if (gran_params->friction_mode != GRAN_FRICTION_MODE::FRICTIONLESS) { // add rotational DOFs TRACK_VECTOR_RESIZE(sphere_Omega_X, nSpheres, "sphere_Omega_X", 0); TRACK_VECTOR_RESIZE(sphere_Omega_Y, nSpheres, "sphere_Omega_Y", 0); TRACK_VECTOR_RESIZE(sphere_Omega_Z, nSpheres, "sphere_Omega_Z", 0); // add torques TRACK_VECTOR_RESIZE(sphere_ang_acc_X, nSpheres, "sphere_ang_acc_X", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Y, nSpheres, "sphere_ang_acc_Y", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Z, nSpheres, "sphere_ang_acc_Z", 0); } if (gran_params->friction_mode == GRAN_FRICTION_MODE::MULTI_STEP || gran_params->friction_mode == GRAN_FRICTION_MODE::SINGLE_STEP) { TRACK_VECTOR_RESIZE(contact_partners_map, 12 * nSpheres, "contact_partners_map", NULL_GRANULAR_ID); TRACK_VECTOR_RESIZE(contact_active_map, 12 * nSpheres, "contact_active_map", false); } if (gran_params->friction_mode == GRAN_FRICTION_MODE::MULTI_STEP) { float3 null_history = {0., 0., 0.}; TRACK_VECTOR_RESIZE(contact_history_map, 12 * nSpheres, "contact_history_map", null_history); } if (time_integrator == GRAN_TIME_INTEGRATOR::CHUNG) { TRACK_VECTOR_RESIZE(sphere_acc_X_old, nSpheres, "sphere_acc_X_old", 0); TRACK_VECTOR_RESIZE(sphere_acc_Y_old, nSpheres, "sphere_acc_Y_old", 0); TRACK_VECTOR_RESIZE(sphere_acc_Z_old, nSpheres, "sphere_acc_Z_old", 0); // friction and multistep means keep old ang acc if (gran_params->friction_mode != GRAN_FRICTION_MODE::FRICTIONLESS) { TRACK_VECTOR_RESIZE(sphere_ang_acc_X_old, nSpheres, "sphere_ang_acc_X_old", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Y_old, nSpheres, "sphere_ang_acc_Y_old", 0); TRACK_VECTOR_RESIZE(sphere_ang_acc_Z_old, nSpheres, "sphere_ang_acc_Z_old", 0); } } // make sure the right pointers are packed packSphereDataPointers(); } __host__ void ChSystemGranularSMC::runSphereBroadphase() { METRICS_PRINTF("Resetting broadphase info!\n"); resetBroadphaseInformation(); // Figure our the number of blocks that need to be launched to cover the box unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; packSphereDataPointers(); sphereBroadphase_dryrun<CUDA_THREADS_PER_BLOCK> <<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(sphere_data, nSpheres, gran_params); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; // num spheres in last SD unsigned int last_SD_num_spheres = SD_NumSpheresTouching.at(nSDs - 1); unsigned int* out_ptr = SD_SphereCompositeOffsets.data(); unsigned int* in_ptr = SD_NumSpheresTouching.data(); // copy data into the tmp array gpuErrchk(cudaMemcpy(out_ptr, in_ptr, nSDs * sizeof(unsigned int), cudaMemcpyDeviceToDevice)); cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, in_ptr, out_ptr, nSDs); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); // Allocate temporary storage gpuErrchk(cudaMalloc(&d_temp_storage, temp_storage_bytes)); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); // Run exclusive prefix sum cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, in_ptr, out_ptr, nSDs); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); // total number of sphere entries to record unsigned int num_entries = out_ptr[nSDs - 1] + in_ptr[nSDs - 1]; spheres_in_SD_composite.resize(num_entries, NULL_GRANULAR_ID); // make sure the DEs pointer is updated packSphereDataPointers(); // printf("first run: num entries is %u, theoretical max is %u\n", num_entries, nSDs * MAX_COUNT_OF_SPHERES_PER_SD); // for (unsigned int i = 0; i < nSDs; i++) { // printf("SD %d has offset %u, N %u \n", i, out_ptr[i], in_ptr[i]); // } // back up the offsets // TODO use a cached allocator, CUB provides one std::vector<unsigned int, cudallocator<unsigned int>> SD_SphereCompositeOffsets_bak; SD_SphereCompositeOffsets_bak.resize(SD_SphereCompositeOffsets.size()); gpuErrchk(cudaMemcpy(SD_SphereCompositeOffsets_bak.data(), SD_SphereCompositeOffsets.data(), nSDs * sizeof(unsigned int), cudaMemcpyDeviceToDevice)); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); sphereBroadphase<CUDA_THREADS_PER_BLOCK><<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(sphere_data, nSpheres, gran_params); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); // // for (unsigned int i = 0; i < nSDs; i++) { // printf("SD %d has offset %u, N %u \n", i, out_ptr[i], in_ptr[i]); // } // // for (unsigned int i = 0; i < num_entries; i++) { // printf("entry %u is %u\n", i, spheres_in_SD_composite[i]); // } // restore the old offsets gpuErrchk(cudaMemcpy(SD_SphereCompositeOffsets.data(), SD_SphereCompositeOffsets_bak.data(), nSDs * sizeof(unsigned int), cudaMemcpyDeviceToDevice)); gpuErrchk(cudaFree(d_temp_storage)); } __host__ void ChSystemGranularSMC::updateBCPositions() { for (unsigned int i = 0; i < BC_params_list_UU.size(); i++) { auto bc_type = BC_type_list.at(i); const BC_params_t<float, float3>& params_UU = BC_params_list_UU.at(i); BC_params_t<int64_t, int64_t3>& params_SU = BC_params_list_SU.at(i); auto offset_function = BC_offset_function_list.at(i); setBCOffset(bc_type, params_UU, params_SU, offset_function(elapsedSimTime)); } if (!BD_is_fixed) { double3 new_BD_offset = BDOffsetFunction(elapsedSimTime); int64_t3 bd_offset_SU = {0, 0, 0}; bd_offset_SU.x = new_BD_offset.x / LENGTH_SU2UU; bd_offset_SU.y = new_BD_offset.y / LENGTH_SU2UU; bd_offset_SU.z = new_BD_offset.z / LENGTH_SU2UU; int64_t old_frame_X = gran_params->BD_frame_X; int64_t old_frame_Y = gran_params->BD_frame_Y; int64_t old_frame_Z = gran_params->BD_frame_Z; gran_params->BD_frame_X = bd_offset_SU.x + BD_rest_frame_SU.x; gran_params->BD_frame_Y = bd_offset_SU.y + BD_rest_frame_SU.y; gran_params->BD_frame_Z = bd_offset_SU.z + BD_rest_frame_SU.z; unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; int64_t3 offset_delta = {0, 0, 0}; // if the frame X increases, the local X should decrease offset_delta.x = old_frame_X - gran_params->BD_frame_X; offset_delta.y = old_frame_Y - gran_params->BD_frame_Y; offset_delta.z = old_frame_Z - gran_params->BD_frame_Z; // printf("offset is %lld, %lld, %lld\n", offset_delta.x, offset_delta.y, offset_delta.z); packSphereDataPointers(); applyBDFrameChange<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(offset_delta, sphere_data, nSpheres, gran_params); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } } __host__ double ChSystemGranularSMC::advance_simulation(float duration) { // Figure our the number of blocks that need to be launched to cover the box unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK; // Settling simulation loop. float duration_SU = duration / TIME_SU2UU; unsigned int nsteps = std::round(duration_SU / stepSize_SU); METRICS_PRINTF("advancing by %f at timestep %f, %u timesteps at approx user timestep %f\n", duration_SU, stepSize_SU, nsteps, duration / nsteps); float time_elapsed_SU = 0; // time elapsed in this advance call // Run the simulation, there are aggressive synchronizations because we want to have no race conditions for (; time_elapsed_SU < stepSize_SU * nsteps; time_elapsed_SU += stepSize_SU) { updateBCPositions(); runSphereBroadphase(); packSphereDataPointers(); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); resetSphereAccelerations(); resetBCForces(); METRICS_PRINTF("Starting computeSphereForces!\n"); if (gran_params->friction_mode == FRICTIONLESS) { // Compute sphere-sphere forces computeSphereForces_frictionless<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>( sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(), BC_params_list_SU.size()); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } else if (gran_params->friction_mode == SINGLE_STEP || gran_params->friction_mode == MULTI_STEP) { // figure out who is contacting determineContactPairs<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>(sphere_data, gran_params); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); computeSphereContactForces<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>( sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(), BC_params_list_SU.size(), nSpheres); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } METRICS_PRINTF("Starting integrateSpheres!\n"); integrateSpheres<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(stepSize_SU, sphere_data, nSpheres, gran_params); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); if (gran_params->friction_mode != GRAN_FRICTION_MODE::FRICTIONLESS) { updateFrictionData<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(stepSize_SU, sphere_data, nSpheres, gran_params); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } elapsedSimTime += stepSize_SU * TIME_SU2UU; // Advance current time } return time_elapsed_SU * TIME_SU2UU; // return elapsed UU time } } // namespace granular } // namespace chrono
ea52a5de3f78a44c79406f2c18a6d7c8d7ddfc46.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #ifndef NDEBUG #define CHECK_STATUS(status) \ if (status != hipSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ hipGetErrorString(status)) #else #define CHECK_STATUS(status) status #endif ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void MyKernel(float* output, float* input, size_t size){ int i = threadIdx.x + blockDim.x*blockIdx.x; if(i>size/ sizeof(float)) return; output[i] = input[i] - 5; } int main(int argc, char **argv) { CHECK_STATUS(hipSetDevice(0)); hipDeviceProp_t prop; CHECK_STATUS(hipGetDeviceProperties(&prop,0)); printf("Kernel:%d, :%d\n",prop.concurrentKernels,prop.asyncEngineCount); // hipStream_t stream[2]; for (int i = 0; i < 2; ++i) CHECK_STATUS(hipStreamCreate(&stream[i])); // (page-locked memory) const int N = 512; size_t size = N * sizeof(float); float *hostPtr; CHECK_STATUS(hipHostMalloc(&hostPtr, 2 * size)); // for(size_t i=0;i<N*2;i++) hostPtr[i] = i; // float *inputDevPtr,*outputDevPtr; CHECK_STATUS(hipMalloc(&inputDevPtr,2*size)); CHECK_STATUS(hipMalloc(&outputDevPtr,2*size)); for (int i = 0; i < 2; ++i) { // CHECK_STATUS(hipMemcpyAsync(inputDevPtr + i * N, hostPtr + i * N, size, hipMemcpyHostToDevice, stream[i])); // kernel hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(512), 0, stream[i], outputDevPtr + i * N, inputDevPtr + i * N, size); // CHECK_STATUS(hipGetLastError()); // CHECK_STATUS(hipMemcpyAsync(hostPtr + i * N, outputDevPtr + i * N, size, hipMemcpyDeviceToHost, stream[i])); } // // 1. hipDeviceSynchronize(); // 2. for (int i = 0; i < 2; ++i) CHECK_STATUS(hipStreamSynchronize(stream[i])); // for(size_t i=0;i<10;i++) { printf("%.2f\t",hostPtr[i]); } // for (int i = 0; i < 2; ++i) CHECK_STATUS(hipStreamDestroy(stream[i])); // CHECK_STATUS(hipHostFree(hostPtr)); CHECK_STATUS(hipFree(inputDevPtr)); CHECK_STATUS(hipFree(outputDevPtr)); return 0; }
ea52a5de3f78a44c79406f2c18a6d7c8d7ddfc46.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #ifndef NDEBUG #define CHECK_STATUS(status) \ if (status != cudaSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ cudaGetErrorString(status)) #else #define CHECK_STATUS(status) status #endif ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void MyKernel(float* output, float* input, size_t size){ int i = threadIdx.x + blockDim.x*blockIdx.x; if(i>size/ sizeof(float)) return; output[i] = input[i] - 5; } int main(int argc, char **argv) { CHECK_STATUS(cudaSetDevice(0)); cudaDeviceProp prop; CHECK_STATUS(cudaGetDeviceProperties(&prop,0)); printf("Kernel并发执行:%d, 异步引擎数量:%d\n",prop.concurrentKernels,prop.asyncEngineCount); // 创建两个流 cudaStream_t stream[2]; for (int i = 0; i < 2; ++i) CHECK_STATUS(cudaStreamCreate(&stream[i])); // 在主机内存上分配页锁存(page-locked memory) const int N = 512; size_t size = N * sizeof(float); float *hostPtr; CHECK_STATUS(cudaMallocHost(&hostPtr, 2 * size)); // 初始化 for(size_t i=0;i<N*2;i++) hostPtr[i] = i; // 分配设备内存 float *inputDevPtr,*outputDevPtr; CHECK_STATUS(cudaMalloc(&inputDevPtr,2*size)); CHECK_STATUS(cudaMalloc(&outputDevPtr,2*size)); for (int i = 0; i < 2; ++i) { // 把数据从页锁存复制到设备内存 CHECK_STATUS(cudaMemcpyAsync(inputDevPtr + i * N, hostPtr + i * N, size, cudaMemcpyHostToDevice, stream[i])); // 调用kernel MyKernel<<<100, 512, 0, stream[i]>>>(outputDevPtr + i * N, inputDevPtr + i * N, size); // 检查错误 CHECK_STATUS(cudaGetLastError()); // 把数据从设备内存拷贝会主机内存 CHECK_STATUS(cudaMemcpyAsync(hostPtr + i * N, outputDevPtr + i * N, size, cudaMemcpyDeviceToHost, stream[i])); } // 同步,有三种方式,只列出两种,选一种就行 // 1.等待所有流中的的所有任务完成 cudaDeviceSynchronize(); // 2.分别同步不同的流,可以只等待其中的某个流中的任务完成 for (int i = 0; i < 2; ++i) CHECK_STATUS(cudaStreamSynchronize(stream[i])); // 打印数据 for(size_t i=0;i<10;i++) { printf("%.2f\t",hostPtr[i]); } // 销毁流 for (int i = 0; i < 2; ++i) CHECK_STATUS(cudaStreamDestroy(stream[i])); // 释放设备内存 CHECK_STATUS(cudaFreeHost(hostPtr)); CHECK_STATUS(cudaFree(inputDevPtr)); CHECK_STATUS(cudaFree(outputDevPtr)); return 0; }
42e3eaff3016cd1b395e9f57c5461f0252625793.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <iostream> #include "utils.h" typedef unsigned int uint32; __global__ static void memoryTransfer( uint32* dev_input, uint32* dev_output, size_t inputSize) { // Index calculations int x = blockIdx.x * blockDim.x + threadIdx.x; //global x id int y = blockIdx.y * blockDim.y + threadIdx.y; //global y id int xwidth = blockDim.x * gridDim.x; //X width of the grid int global_tid = y*xwidth + x; uint32 w1, w2, w3, w4; if(global_tid < inputSize/16) { w1 = dev_input[4*global_tid]; w2 = dev_input[4*global_tid+1]; w3 = dev_input[4*global_tid+2]; w4 = dev_input[4*global_tid+3]; dev_output[4*global_tid ] = w1; dev_output[4*global_tid+1] = w2; dev_output[4*global_tid+2] = w3; dev_output[4*global_tid+3] = w4; } } __global__ static void encrypt_Kernel( size_t inputSize) { // Index calculations int tid = threadIdx.y*blockDim.x + threadIdx.x; //local id int x = blockIdx.x * blockDim.x + threadIdx.x; //global x id int y = blockIdx.y * blockDim.y + threadIdx.y; //global y id int w = blockDim.x * gridDim.x; //width of the grid int global_tid = y*w + x; //global id int blockSize = blockDim.x * blockDim.y; uint32_t w1,w2,w3,w4,s1,s2,s3,s4; int ROUNDS = 11; // store the T-boxes and sbox in shared memory. __shared__ uint32_t sm_te1[256], sm_te2[256], sm_te3[256], sm_te4[256]; __shared__ uint8_t sm_sbox[256]; // Loading shared memory. 256 elements are needed int elemPerThread = 256/blockSize; if ( !elemPerThread && tid<256) { //load dev_sm_te1, dev_sm_te2, dev_sm_te3, dev_sm_te4 and // sm_sbox to share memory variables sm_te1, sm_te2, //sm_te3, sm_te4 and sm_sbox; sm_te1[tid] = tid; sm_te2[tid] = tid; sm_te3[tid] = tid; sm_te4[tid] = tid; sm_sbox[tid] = tid; } else { for(int i=0; i<elemPerThread; i++) { sm_te1[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_te2[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_te3[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_te4[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_sbox[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; } int modEPT = 256%blockSize; //256 is not a multiple of blockSize if(!modEPT && (tid == blockSize-1)) { for(int i=0; i<modEPT; i++) { sm_te1[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_te2[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_te3[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_te4[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_sbox[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; } } } __syncthreads(); // Each thread treat 16 bytes. if(global_tid < inputSize/16) { //load the cipher blocks, all the global memory transactions are //coalesced. The original plain text load from files, due to the read //procedure reverse the byte order of the 32-bit words, So a reverse //process was necessary. w1 = 4*global_tid; w2 = 4*global_tid+1; w3 = 4*global_tid+2; w4 = 4*global_tid+3; // First round AddRoundKey: ex-or with round key //w1 ^= const_expkey[0]; //w2 ^= const_expkey[1]; //w3 ^= const_expkey[2]; //w4 ^= const_expkey[3]; // Round transformation: a set of table lookups operations. #pragma unroll for (int i = 1; i < 10; i++) { s1 = (sm_te4[(w4 >> 24)] ^ sm_te3[(w3 >> 16) & 0xFF] ^ sm_te2[(w2 >> 8) & 0xFF] ^ sm_te1[w1 & 0xFF]); s2 = (sm_te4[(w1 >> 24)] ^ sm_te3[(w4 >> 16) & 0xFF] ^ sm_te2[(w3 >> 8) & 0xFF] ^ sm_te1[w2 & 0xFF]); s3 = (sm_te4[(w2 >> 24)] ^ sm_te3[(w1 >> 16) & 0xFF] ^ sm_te2[(w4 >> 8) & 0xFF] ^ sm_te1[w3 & 0xFF]); s4 = (sm_te4[(w3 >> 24)] ^ sm_te3[(w2 >> 16) & 0xFF] ^ sm_te2[(w1 >> 8) & 0xFF] ^ sm_te1[w4 & 0xFF]); w1 = s1 ^ const_expkey[i * 4]; w2 = s2 ^ const_expkey[i * 4 + 1]; w3 = s3 ^ const_expkey[i * 4 + 2]; w4 = s4 ^ const_expkey[i * 4 + 3]; } // The final round doesnt include the MixColumns s1 = (uint32_t)(sm_sbox[ w1 & 0xFF]); s1 |= (uint32_t)(sm_sbox[(w2 >> 8) & 0xFF]) << 8; s1 |= (uint32_t)(sm_sbox[(w3 >> 16) & 0xFF]) << 16; s1 |= (uint32_t)(sm_sbox[(w4 >> 24) ]) << 24; //SubBytes and ShiftRows s1 ^= const_expkey[(ROUNDS - 1) * 4]; //AddRoundKey s2 = (uint32_t)(sm_sbox[ w2 & 0xFF]); s2 |= (uint32_t)(sm_sbox[(w3 >> 8) & 0xFF]) << 8; s2 |= (uint32_t)(sm_sbox[(w4 >> 16) & 0xFF]) << 16; s2 |= (uint32_t)(sm_sbox[(w1 >> 24) ]) << 24; //SubBytes and ShiftRows s2 ^= const_expkey[(ROUNDS - 1) * 4 + 1]; //AddRoundKey s3 = (uint32_t)(sm_sbox[ w3 & 0xFF]); s3 |= (uint32_t)(sm_sbox[(w4 >> 8) & 0xFF]) << 8; s3 |= (uint32_t)(sm_sbox[(w1 >> 16) & 0xFF]) << 16; s3 |= (uint32_t)(sm_sbox[(w2 >> 24) ]) << 24; //SubBytes and ShiftRows s3 ^= const_expkey[(ROUNDS - 1) * 4 + 2]; //AddRoundKey s4 = (uint32_t)(sm_sbox[ w4 & 0xFF]); s4 |= (uint32_t)(sm_sbox[(w1 >> 8) & 0xFF]) << 8; s4 |= (uint32_t)(sm_sbox[(w2 >> 16) & 0xFF]) << 16; s4 |= (uint32_t)(sm_sbox[(w3 >> 24) ]) << 24; //SubBytes and ShiftRows s4 ^= const_expkey[(ROUNDS - 1) * 4 + 3]; //AddRoundKey } } int main(int argc, char * argv[]) { /////////////////////////////////////////////////////////////// // command line arguments /////////////////////////////////////////////////////////////// char * filename; int threadNum = 512; // Threads per block. This is a recommanded number. int blockNum = 0; // Number of blocks in the grid std::cout << std::endl << "********************************************************************" ; std::cout << std::endl << "****** CUDA Tests ******" ; std::cout << std::endl << "****** Memory Bottleneck ******" ; std::cout << std::endl << "********************************************************************" << std::endl << std::endl; if (argc > 1){ for( int n=1 ; n<argc ; n=n+2 ) { if((strcmp(argv[n],"-filename") == 0) && (n+1<argc)) { filename = argv[n+1]; } else if((strcmp(argv[n],"-threadNum") == 0) && (n+1<argc)) { threadNum = atoi(argv[n+1]); if(threadNum ==0) { printf("\n threadNum must be a non-null value.\n"); exit(1); } } else if((strcmp(argv[n],"-blockNum") == 0) && (n+1<argc)) { blockNum = atoi(argv[n+1]); } else if((strcmp(argv[n],"-help") == 0)) { std::cout << " This is a CUDA test program." << std::endl; std::cout << " \"-options value\" availables are:" << std::endl; std::cout << " -filename, the file path to encrypt or decrypt." << std::endl; std::cout << " -threadNum to set the number of threads per block. Default recommended value is 512." << std::endl; std::cout << " -blockNum to set the number of blocks in the grid. Default value is 0 and will create enough blocks taking into account the input file size and the threadNum argument." << std::endl; std::cout << " The order between options is not important." << std::endl << std::endl; exit(0); } else { std::cout << std::endl << "Argument " << argv[n] << " does not correspond to any valid arguments. Type -help for details about valid command line arguments." <<std::endl; exit(1); } } } else { std::cout << std::endl << std::endl << "Not enough arguments. Type -help option in the command line for further explanations." << std::endl; exit(1); } std::cout << " threadNum = " << threadNum << std::endl; std::cout << " blockNum = " << blockNum << std::endl; std::cout << " Filename = " << filename << std::endl << std::endl; // ***Inputdata file to encrypt/decrypt*** //Checking for the size of the file int filesize; filesize = fsize(filename); uint8_t *inputData, *outputData; inputData = (uint8_t*)malloc((filesize)*sizeof(uint8_t)); outputData = (uint8_t*)malloc((filesize)*sizeof(uint8_t)); //Opening the file FILE * inputFile; int result; inputFile = fopen(filename,"rb"); if (inputFile == NULL) { perror ("Error opening file"); exit(1); } result = fread (inputData, sizeof(uint8_t), filesize, inputFile); if(result != filesize) { perror("Reading error from the input file"); exit(1); } fclose(inputFile); if(!blockNum) { blockNum = 1+filesize/(threadNum); } std::cout << " Gridsize in term of block: " << blockNum << std::endl; //Device vectors declarations and allocations uint32 * devInput, * devOutput; hipMalloc( (void **) &devInput , filesize*sizeof(uint8_t)); hipMalloc( (void **) &devOutput , filesize*sizeof(uint8_t)); //To record the device time execution hipEvent_t startDevice, stopDevice; checkCudaErrors(hipEventCreate(&startDevice)); checkCudaErrors(hipEventCreate(&stopDevice)); //checkCudaErrors(hipEventRecord(startHost, NULL)); hipMemcpy(devInput, inputData, filesize*sizeof(uint8_t), hipMemcpyHostToDevice); //Warm Up for(int i=0; i < 1000 ; i++) { hipLaunchKernelGGL(( encrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, 0, filesize); } checkCudaErrors(hipEventRecord(startDevice, NULL)); for(int j=0; j<1000; j++){ //for benchmarking hipLaunchKernelGGL(( encrypt_Kernel), dim3(blockNum),dim3(threadNum), 0, 0, filesize); } checkCudaErrors(hipEventRecord(stopDevice, NULL)); //Copy results from host memory to device memory hipMemcpy(outputData, devOutput, filesize*sizeof(uint8_t), hipMemcpyDeviceToHost); checkCudaErrors(hipEventSynchronize(stopDevice)); //Time calculation float Devmsec = 0.0f; checkCudaErrors(hipEventElapsedTime(&Devmsec, startDevice, stopDevice)); Devmsec /= 1000; double throughput = 1.0e-9f*8*filesize/(Devmsec*1.0e-3f); printf("\n GPU processing time: %f (ms)", Devmsec); printf("\n GPU throughput: %f (Gbps)\n", throughput); //Free host memory free(inputData); free(outputData); // Free device memory hipFree(devInput); hipFree(devOutput); }
42e3eaff3016cd1b395e9f57c5461f0252625793.cu
#include <cuda_runtime_api.h> #include <iostream> #include "utils.h" typedef unsigned int uint32; __global__ static void memoryTransfer( uint32* dev_input, uint32* dev_output, size_t inputSize) { // Index calculations int x = blockIdx.x * blockDim.x + threadIdx.x; //global x id int y = blockIdx.y * blockDim.y + threadIdx.y; //global y id int xwidth = blockDim.x * gridDim.x; //X width of the grid int global_tid = y*xwidth + x; uint32 w1, w2, w3, w4; if(global_tid < inputSize/16) { w1 = dev_input[4*global_tid]; w2 = dev_input[4*global_tid+1]; w3 = dev_input[4*global_tid+2]; w4 = dev_input[4*global_tid+3]; dev_output[4*global_tid ] = w1; dev_output[4*global_tid+1] = w2; dev_output[4*global_tid+2] = w3; dev_output[4*global_tid+3] = w4; } } __global__ static void encrypt_Kernel( size_t inputSize) { // Index calculations int tid = threadIdx.y*blockDim.x + threadIdx.x; //local id int x = blockIdx.x * blockDim.x + threadIdx.x; //global x id int y = blockIdx.y * blockDim.y + threadIdx.y; //global y id int w = blockDim.x * gridDim.x; //width of the grid int global_tid = y*w + x; //global id int blockSize = blockDim.x * blockDim.y; uint32_t w1,w2,w3,w4,s1,s2,s3,s4; int ROUNDS = 11; // store the T-boxes and sbox in shared memory. __shared__ uint32_t sm_te1[256], sm_te2[256], sm_te3[256], sm_te4[256]; __shared__ uint8_t sm_sbox[256]; // Loading shared memory. 256 elements are needed int elemPerThread = 256/blockSize; if ( !elemPerThread && tid<256) { //load dev_sm_te1, dev_sm_te2, dev_sm_te3, dev_sm_te4 and // sm_sbox to share memory variables sm_te1, sm_te2, //sm_te3, sm_te4 and sm_sbox; sm_te1[tid] = tid; sm_te2[tid] = tid; sm_te3[tid] = tid; sm_te4[tid] = tid; sm_sbox[tid] = tid; } else { for(int i=0; i<elemPerThread; i++) { sm_te1[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_te2[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_te3[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_te4[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; sm_sbox[tid*elemPerThread + i] = tid*(elemPerThread+1) + i; } int modEPT = 256%blockSize; //256 is not a multiple of blockSize if(!modEPT && (tid == blockSize-1)) { for(int i=0; i<modEPT; i++) { sm_te1[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_te2[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_te3[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_te4[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; sm_sbox[tid*(elemPerThread+1) + i] = tid*(elemPerThread+1) + i; } } } __syncthreads(); // Each thread treat 16 bytes. if(global_tid < inputSize/16) { //load the cipher blocks, all the global memory transactions are //coalesced. The original plain text load from files, due to the read //procedure reverse the byte order of the 32-bit words, So a reverse //process was necessary. w1 = 4*global_tid; w2 = 4*global_tid+1; w3 = 4*global_tid+2; w4 = 4*global_tid+3; // First round AddRoundKey: ex-or with round key //w1 ^= const_expkey[0]; //w2 ^= const_expkey[1]; //w3 ^= const_expkey[2]; //w4 ^= const_expkey[3]; // Round transformation: a set of table lookups operations. #pragma unroll for (int i = 1; i < 10; i++) { s1 = (sm_te4[(w4 >> 24)] ^ sm_te3[(w3 >> 16) & 0xFF] ^ sm_te2[(w2 >> 8) & 0xFF] ^ sm_te1[w1 & 0xFF]); s2 = (sm_te4[(w1 >> 24)] ^ sm_te3[(w4 >> 16) & 0xFF] ^ sm_te2[(w3 >> 8) & 0xFF] ^ sm_te1[w2 & 0xFF]); s3 = (sm_te4[(w2 >> 24)] ^ sm_te3[(w1 >> 16) & 0xFF] ^ sm_te2[(w4 >> 8) & 0xFF] ^ sm_te1[w3 & 0xFF]); s4 = (sm_te4[(w3 >> 24)] ^ sm_te3[(w2 >> 16) & 0xFF] ^ sm_te2[(w1 >> 8) & 0xFF] ^ sm_te1[w4 & 0xFF]); w1 = s1 ^ const_expkey[i * 4]; w2 = s2 ^ const_expkey[i * 4 + 1]; w3 = s3 ^ const_expkey[i * 4 + 2]; w4 = s4 ^ const_expkey[i * 4 + 3]; } // The final round doesn’t include the MixColumns s1 = (uint32_t)(sm_sbox[ w1 & 0xFF]); s1 |= (uint32_t)(sm_sbox[(w2 >> 8) & 0xFF]) << 8; s1 |= (uint32_t)(sm_sbox[(w3 >> 16) & 0xFF]) << 16; s1 |= (uint32_t)(sm_sbox[(w4 >> 24) ]) << 24; //SubBytes and ShiftRows s1 ^= const_expkey[(ROUNDS - 1) * 4]; //AddRoundKey s2 = (uint32_t)(sm_sbox[ w2 & 0xFF]); s2 |= (uint32_t)(sm_sbox[(w3 >> 8) & 0xFF]) << 8; s2 |= (uint32_t)(sm_sbox[(w4 >> 16) & 0xFF]) << 16; s2 |= (uint32_t)(sm_sbox[(w1 >> 24) ]) << 24; //SubBytes and ShiftRows s2 ^= const_expkey[(ROUNDS - 1) * 4 + 1]; //AddRoundKey s3 = (uint32_t)(sm_sbox[ w3 & 0xFF]); s3 |= (uint32_t)(sm_sbox[(w4 >> 8) & 0xFF]) << 8; s3 |= (uint32_t)(sm_sbox[(w1 >> 16) & 0xFF]) << 16; s3 |= (uint32_t)(sm_sbox[(w2 >> 24) ]) << 24; //SubBytes and ShiftRows s3 ^= const_expkey[(ROUNDS - 1) * 4 + 2]; //AddRoundKey s4 = (uint32_t)(sm_sbox[ w4 & 0xFF]); s4 |= (uint32_t)(sm_sbox[(w1 >> 8) & 0xFF]) << 8; s4 |= (uint32_t)(sm_sbox[(w2 >> 16) & 0xFF]) << 16; s4 |= (uint32_t)(sm_sbox[(w3 >> 24) ]) << 24; //SubBytes and ShiftRows s4 ^= const_expkey[(ROUNDS - 1) * 4 + 3]; //AddRoundKey } } int main(int argc, char * argv[]) { /////////////////////////////////////////////////////////////// // command line arguments /////////////////////////////////////////////////////////////// char * filename; int threadNum = 512; // Threads per block. This is a recommanded number. int blockNum = 0; // Number of blocks in the grid std::cout << std::endl << "********************************************************************" ; std::cout << std::endl << "****** CUDA Tests ******" ; std::cout << std::endl << "****** Memory Bottleneck ******" ; std::cout << std::endl << "********************************************************************" << std::endl << std::endl; if (argc > 1){ for( int n=1 ; n<argc ; n=n+2 ) { if((strcmp(argv[n],"-filename") == 0) && (n+1<argc)) { filename = argv[n+1]; } else if((strcmp(argv[n],"-threadNum") == 0) && (n+1<argc)) { threadNum = atoi(argv[n+1]); if(threadNum ==0) { printf("\n threadNum must be a non-null value.\n"); exit(1); } } else if((strcmp(argv[n],"-blockNum") == 0) && (n+1<argc)) { blockNum = atoi(argv[n+1]); } else if((strcmp(argv[n],"-help") == 0)) { std::cout << " This is a CUDA test program." << std::endl; std::cout << " \"-options value\" availables are:" << std::endl; std::cout << " -filename, the file path to encrypt or decrypt." << std::endl; std::cout << " -threadNum to set the number of threads per block. Default recommended value is 512." << std::endl; std::cout << " -blockNum to set the number of blocks in the grid. Default value is 0 and will create enough blocks taking into account the input file size and the threadNum argument." << std::endl; std::cout << " The order between options is not important." << std::endl << std::endl; exit(0); } else { std::cout << std::endl << "Argument " << argv[n] << " does not correspond to any valid arguments. Type -help for details about valid command line arguments." <<std::endl; exit(1); } } } else { std::cout << std::endl << std::endl << "Not enough arguments. Type -help option in the command line for further explanations." << std::endl; exit(1); } std::cout << " threadNum = " << threadNum << std::endl; std::cout << " blockNum = " << blockNum << std::endl; std::cout << " Filename = " << filename << std::endl << std::endl; // ***Inputdata file to encrypt/decrypt*** //Checking for the size of the file int filesize; filesize = fsize(filename); uint8_t *inputData, *outputData; inputData = (uint8_t*)malloc((filesize)*sizeof(uint8_t)); outputData = (uint8_t*)malloc((filesize)*sizeof(uint8_t)); //Opening the file FILE * inputFile; int result; inputFile = fopen(filename,"rb"); if (inputFile == NULL) { perror ("Error opening file"); exit(1); } result = fread (inputData, sizeof(uint8_t), filesize, inputFile); if(result != filesize) { perror("Reading error from the input file"); exit(1); } fclose(inputFile); if(!blockNum) { blockNum = 1+filesize/(threadNum); } std::cout << " Gridsize in term of block: " << blockNum << std::endl; //Device vectors declarations and allocations uint32 * devInput, * devOutput; cudaMalloc( (void **) &devInput , filesize*sizeof(uint8_t)); cudaMalloc( (void **) &devOutput , filesize*sizeof(uint8_t)); //To record the device time execution cudaEvent_t startDevice, stopDevice; checkCudaErrors(cudaEventCreate(&startDevice)); checkCudaErrors(cudaEventCreate(&stopDevice)); //checkCudaErrors(cudaEventRecord(startHost, NULL)); cudaMemcpy(devInput, inputData, filesize*sizeof(uint8_t), cudaMemcpyHostToDevice); //Warm Up for(int i=0; i < 1000 ; i++) { encrypt_Kernel<<<blockNum,threadNum>>>(filesize); } checkCudaErrors(cudaEventRecord(startDevice, NULL)); for(int j=0; j<1000; j++){ //for benchmarking encrypt_Kernel<<<blockNum,threadNum>>>(filesize); } checkCudaErrors(cudaEventRecord(stopDevice, NULL)); //Copy results from host memory to device memory cudaMemcpy(outputData, devOutput, filesize*sizeof(uint8_t), cudaMemcpyDeviceToHost); checkCudaErrors(cudaEventSynchronize(stopDevice)); //Time calculation float Devmsec = 0.0f; checkCudaErrors(cudaEventElapsedTime(&Devmsec, startDevice, stopDevice)); Devmsec /= 1000; double throughput = 1.0e-9f*8*filesize/(Devmsec*1.0e-3f); printf("\n GPU processing time: %f (ms)", Devmsec); printf("\n GPU throughput: %f (Gbps)\n", throughput); //Free host memory free(inputData); free(outputData); // Free device memory cudaFree(devInput); cudaFree(devOutput); }
8273551fe4a67f58985f31eabaf9fe00048ea264.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> __global__ void print_from_gpu(void) { printf("Hello World! from thread [%d,%d] \ From device\n", threadIdx.x,blockIdx.x); } int main(void) { printf("Hello World from host!\n"); hipLaunchKernelGGL(( print_from_gpu), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); return 0; }
8273551fe4a67f58985f31eabaf9fe00048ea264.cu
#include<stdio.h> #include<stdlib.h> __global__ void print_from_gpu(void) { printf("Hello World! from thread [%d,%d] \ From device\n", threadIdx.x,blockIdx.x); } int main(void) { printf("Hello World from host!\n"); print_from_gpu<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
1b02430b9fbb94ad39ab13ce03a2ddcc7cb87605.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #define size 15 using namespace std; __global__ void callOperation(int *a, int *b, int *res, int x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { res[tid] = a[tid] - (b[tid] * x); } } __global__ void callOperationSharedStatic(int *a, int *b, int *res, int x, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } __shared__ int s_a[size], s_b[size], s_res[size]; __shared__ int s_x; s_x = x; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - (s_b[tid] * s_x); res[tid] = s_res[tid]; } __global__ void callOperationSharedDynamic(int *a, int *b, int *res, int x, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } extern __shared__ int arrays[]; __shared__ int s_x; int *s_a = arrays; int *s_b = &s_a[n]; int *s_res = &s_b[n]; s_x = x; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - (s_b[tid] * s_x); res[tid] = s_res[tid]; } int main() { int *a, *b, *res; int x = 10; int *d_a, *d_b, *d_res; a = (int*)malloc(size * sizeof(int)); b = (int*)malloc(size * sizeof(int)); res = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { a[i] = i*size; b[i] = -i; } cout << "\n\nNiz A:" << endl; for (int i = 0; i < size; i++) { cout << a[i] << "\t"; } cout << "\n\nNiz B:" << endl; for (int i = 0; i < size; i++) { cout << b[i] << "\t"; } cout << "\n\nSkalar je:" << x << endl; hipMalloc(&d_a, size * sizeof(int)); hipMalloc(&d_b, size * sizeof(int)); hipMalloc(&d_res, size * sizeof(int)); hipMemcpy(d_a, a, size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, size * sizeof(int), hipMemcpyHostToDevice); //callOperation << <size / 256 + 1, 256 >> > (d_a, d_b, d_res, x, size); //callOperationSharedStatic << <size / 256 + 1, 256 >> > (d_a, d_b, d_res, x, size); callOperationSharedDynamic << <size / 256 + 1, 256, size * sizeof(int) + size * sizeof(int) + size * sizeof(int) >> > (d_a, d_b, d_res, x, size); hipMemcpy(res, d_res, size * sizeof(int), hipMemcpyDeviceToHost); cout << "\n\nNiz Rez:" << endl; for (int i = 0; i < size; i++) { cout << res[i] << "\t"; } hipFree(d_a); hipFree(d_b); hipFree(d_res); free(a); free(b); free(res); hipDeviceReset(); cout << endl; system("PAUSE"); return 0; }
1b02430b9fbb94ad39ab13ce03a2ddcc7cb87605.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #define size 15 using namespace std; __global__ void callOperation(int *a, int *b, int *res, int x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { res[tid] = a[tid] - (b[tid] * x); } } __global__ void callOperationSharedStatic(int *a, int *b, int *res, int x, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } __shared__ int s_a[size], s_b[size], s_res[size]; __shared__ int s_x; s_x = x; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - (s_b[tid] * s_x); res[tid] = s_res[tid]; } __global__ void callOperationSharedDynamic(int *a, int *b, int *res, int x, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } extern __shared__ int arrays[]; __shared__ int s_x; int *s_a = arrays; int *s_b = &s_a[n]; int *s_res = &s_b[n]; s_x = x; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - (s_b[tid] * s_x); res[tid] = s_res[tid]; } int main() { int *a, *b, *res; int x = 10; int *d_a, *d_b, *d_res; a = (int*)malloc(size * sizeof(int)); b = (int*)malloc(size * sizeof(int)); res = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { a[i] = i*size; b[i] = -i; } cout << "\n\nNiz A:" << endl; for (int i = 0; i < size; i++) { cout << a[i] << "\t"; } cout << "\n\nNiz B:" << endl; for (int i = 0; i < size; i++) { cout << b[i] << "\t"; } cout << "\n\nSkalar je:" << x << endl; cudaMalloc(&d_a, size * sizeof(int)); cudaMalloc(&d_b, size * sizeof(int)); cudaMalloc(&d_res, size * sizeof(int)); cudaMemcpy(d_a, a, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size * sizeof(int), cudaMemcpyHostToDevice); //callOperation << <size / 256 + 1, 256 >> > (d_a, d_b, d_res, x, size); //callOperationSharedStatic << <size / 256 + 1, 256 >> > (d_a, d_b, d_res, x, size); callOperationSharedDynamic << <size / 256 + 1, 256, size * sizeof(int) + size * sizeof(int) + size * sizeof(int) >> > (d_a, d_b, d_res, x, size); cudaMemcpy(res, d_res, size * sizeof(int), cudaMemcpyDeviceToHost); cout << "\n\nNiz Rez:" << endl; for (int i = 0; i < size; i++) { cout << res[i] << "\t"; } cudaFree(d_a); cudaFree(d_b); cudaFree(d_res); free(a); free(b); free(res); cudaDeviceReset(); cout << endl; system("PAUSE"); return 0; }
1d745e3b4c8189b049b1fc491ce9515809f9417d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ParallelPrunningDaat.cu * * Created on: 08/12/2017 * Author: roussian */ #include "ParallelPrunningDaat.cuh" #include "DeviceFunctions.cuh" #include "UnityTest.cuh" #include <stdio.h> __device__ volatile float globalThreshold = 0.0; __device__ volatile float globalThresholdBatch[500]; __device__ int globalCount=0; __global__ void mergeTopkLists_v3(float *dTopkScoreList, int *iTopkDocList, int iTopk, int iMergeNumber, int iSkipTopkBetweenMerges, int iSkipTopkBetweenBlocks, int iTotalElementos){ // if(blockIdx.x != 74) // return; //Peguei o doc idblock 4 skipBlock 32 skipMerges 16 na posio 18559 ! // if(iSkipTopkBetweenBlocks == 4 && iSkipTopkBetweenMerges == 2) // if(blockIdx.x != 87) // return; __shared__ documentTopkList documentTopkSharedList_1; __shared__ documentTopkList documentTopkSharedList_2; __shared__ documentTopkList documentTopkSharedList_Partial; __shared__ short int halfNumberTopk;// = iTopk >> 1; __shared__ short int iTopkPosition;// = iTopk - 1; // Comea no ndice 0 e vai iTopk - 1 __shared__ short int halfPositionNumberTopk;// = iTopkPosition >> 1; if(THREAD_MASTER){ halfNumberTopk = iTopk >> 1; }else if(THREAD_MASTER_2){ iTopkPosition = iTopk - 1; halfPositionNumberTopk = iTopkPosition >> 1; } int warpIndex = threadIdx.x >> 5; int threadWarpId = threadIdx.x - (warpIndex << 5); // (threadIdx.x & 0x1f); //threadIdx.x % 32; int isOdd = warpIndex & 1; //Verifica se o IdWarp mpar int numberThreadsInList = ((blockDim.x >> 6) << 5); // (#Block/Tamanho da Warp--2) / 2; ---> isso pq metade do # de warps trabalham sobre uma lista warpIndex = warpIndex >> 1; //Isso pois as warps so divididas por impar e par. Ento, se o idWarp 5, ento o novo id 2 __syncthreads(); // int proportion = iTopk / blockDim.x; //K mltiplo do numero de threads por bloco int offset = iTopkPosition; //a Posio que cada thread ir inserir o seu elemento //A posio das threads nas listas --- half + (pos. da warp * #threads dentro da warp) + id int indexInMemShared = halfNumberTopk + ( warpIndex << 5) + threadWarpId;//(iTopk >> 1) + ((warpId >> 1) << 5) + threadWarpId;///half + (pos. da warp * #threads na warp) + id float score_1, score_2; float *ownScorePtr, *workListPtr; int *ownDocId; int position; int index_1, index_2, indexLocal; // int isEndPart; //Obtm a posio inicial que a thread ir inserir na lista final. offset -= (iTopkPosition - indexInMemShared ) << 1;//A multiplicao por 2 por causa das duas listas // if(blockIdx.x == 203) // printf("Oi!\n"); __syncthreads(); //As listas esto alinhadas em uma lista, por isso que o indice tem que seguir para //a prxima parte no processada referente ao bloco //Um merge pega 2 listas ou 1 lista + Resultado anterior index_1 = blockIdx.x * iTopk * iSkipTopkBetweenBlocks + threadIdx.x;//blockIdx.x * iTopk * (iMergeNumber + 1) * iSkipBetweenMerge + threadIdx.x; index_2 = index_1 + iTopk * iSkipTopkBetweenMerges;//index_1 + iTopk * iSkipTopkBetweenMerges; // isEndPart = 0;//index_2 > totalElements; indexLocal = threadIdx.x; //O nmero de threads por bloco pode ser menor que K while(indexLocal < iTopk){ documentTopkSharedList_1.id[indexLocal] = iTopkDocList[index_1]; documentTopkSharedList_1.score[indexLocal] = dTopkScoreList[index_1]; // if(iTopkDocList[index_1] == 46517642) // printf("Peguei o doc idblock %d skipBlock %d skipMerges %d na posio %d !\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,index_1); index_1 += blockDim.x; indexLocal += blockDim.x; } // if(THREAD_MASTER && blockIdx.x == 0 && iSkipTopkBetweenBlocks >= 2048) // printf("idblock %d skipBlock %d skipMerges %d na posio inicial %d %d!\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,blockIdx.x * iTopk * iSkipTopkBetweenBlocks, // blockIdx.x * iTopk * iSkipTopkBetweenBlocks + iTopk * iSkipTopkBetweenMerges); __syncthreads(); // if(THREAD_MASTER && blockIdx.x == 0){ // printf("First List - "); // for (int i = 0; i < iTopk; ++i) { // printf(" %.2f ", documentTopkSharedList_1.score[i]); // } // printf("\n"); // } for (int globalRound = 0; globalRound < iMergeNumber; ++globalRound) { //O nmero de threads por bloco pode ser menor que K indexLocal = threadIdx.x; while(indexLocal < iTopk){ if(index_2 >= iTotalElementos || index_2 < 0){ documentTopkSharedList_2.id[indexLocal] = 0; documentTopkSharedList_2.score[indexLocal] = 0; }else{ documentTopkSharedList_2.id[indexLocal] = iTopkDocList[index_2]; documentTopkSharedList_2.score[indexLocal] = dTopkScoreList[index_2]; // if(iTopkDocList[index_2] == 46517642) // printf("Peguei o doc idblock %d skipBlock %d skipMerges %d na posio %d !\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,index_2); } index_2 += blockDim.x; indexLocal += blockDim.x; } __syncthreads(); if(!isOdd){//As threads das Warps com ids par trabalham sobre os maiores elementos da mesma posio. do {//Esse bloco de instrues trabalha somente com dados que esto na memria compartilhada score_1 = documentTopkSharedList_1.score[indexInMemShared]; score_2 = documentTopkSharedList_2.score[indexInMemShared]; //Escolhe o maior elemento de uma mesma posio e a lista, a que tiver o menor elemento, que ir pecorrer. if(score_1 >= score_2){ ownScorePtr = &score_1; ownDocId = &documentTopkSharedList_1.id[indexInMemShared]; // if(*ownDocId == 46517642){// && blockIdx.x == 5 // printf("1 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_2.score; //A lista de trabalho sempre a lista do menor elemento } else{ ownScorePtr = &score_2; ownDocId = &documentTopkSharedList_2.id[indexInMemShared]; // if(*ownDocId == 46517642){// && blockIdx.x == 5){ // printf("1.1 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_1.score; } if(score_1 != score_2){ //Busca atualizar o offset, i.e., procura o 1 elemento maior position = indexInMemShared;//Define a posio incio para fazer as comparaes (//Se workscore est em A[i] e A[i] < B[i], ento A[i] < B[i + (1,2,3...)]) while( (position+1 < iTopk) && (*ownScorePtr > workListPtr[position+1]) ){ offset++; //Ao encontrar um elemento menor, ele ir aumentar a posio que ir inserir o seu elemento position++; } } //Insere os maiores elementos das listas, i.e., os elementos mais a direita da lista dos top-k documentTopkSharedList_Partial.score[offset] = *ownScorePtr; documentTopkSharedList_Partial.id[offset] = *ownDocId; //Redefine as variveis para inicializar outro bloco de dados que est na memria compartilhada indexInMemShared += numberThreadsInList; offset = iTopkPosition - ((iTopkPosition - indexInMemShared ) << 1); //Reinicia o offset } while (indexInMemShared < iTopk); }else{ int count; //Quantos elementos ir buscar; float *ownScoreListPtr; do{//Esse bloco de instruo trabalha somente com dados que esto na memria compartilhada offset--;// o menor elemento entre dois elementos (mesmo ndice) score_1 = documentTopkSharedList_1.score[indexInMemShared]; score_2 = documentTopkSharedList_2.score[indexInMemShared]; //Escolhe o menor elemento de uma mesma posio e a lista, a que tiver o maior elemento, que ir pecorrer. if(score_1 < score_2){ ownScorePtr = &score_1; ownDocId = &documentTopkSharedList_1.id[indexInMemShared]; // if(*ownDocId == 46517642){ // printf("2 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_2.score; ownScoreListPtr = documentTopkSharedList_1.score; } else{//Entra igual(se for igual, entao o score_2 selecionado) ou menor ownScorePtr = &score_2; ownDocId = &documentTopkSharedList_2.id[indexInMemShared]; // if(*ownDocId == 46517642){ // printf("2.1 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_1.score; ownScoreListPtr = documentTopkSharedList_2.score; } //Duas possibilidades podem ocorrer: (1) O elemento adquirido est entre os k maiores elementos //(2) o elemento no est entre os k maiore elementos if(*ownScorePtr >= workListPtr[halfPositionNumberTopk]){//Compara-se com o elemento que est na metade//if(*ownScorePtr > workListPtr[iTopkPosition >> 1]){//Compara-se com o elemento que est na metade position = indexInMemShared;// - 1; while( (position - 1 > 0) && (*ownScorePtr < workListPtr[position-1]) ){ offset--; position--; } documentTopkSharedList_Partial.score[offset] = *ownScorePtr; documentTopkSharedList_Partial.id[offset] = *ownDocId; }else{ offset -= indexInMemShared - halfNumberTopk;//(iTopk >> 1); Subtrai da metade do nmero das posies e no do ndice mx, pois j ouve uma subtrao do conjunto dos maiores elementos count = halfPositionNumberTopk - offset; //Quantos elementos ir buscar; float *aux; int posWork, posOwn; int *docIdOwn, *docIdWork; if(ownScoreListPtr[iTopkPosition] >= workListPtr[halfPositionNumberTopk] ){ ownScorePtr = &ownScoreListPtr[iTopkPosition]; posOwn = iTopkPosition; posWork = halfPositionNumberTopk; if(ownScoreListPtr == documentTopkSharedList_2.score){ docIdOwn = documentTopkSharedList_2.id; docIdWork = documentTopkSharedList_1.id; }else{ docIdOwn = documentTopkSharedList_1.id; docIdWork = documentTopkSharedList_2.id; } }else{ ownScorePtr = &workListPtr[halfPositionNumberTopk]; aux = ownScoreListPtr; ownScoreListPtr = workListPtr; workListPtr = aux; posOwn = halfPositionNumberTopk; posWork = iTopkPosition; if(workListPtr == documentTopkSharedList_2.score){ docIdWork = documentTopkSharedList_2.id; docIdOwn = documentTopkSharedList_1.id; }else{ docIdWork = documentTopkSharedList_1.id; docIdOwn = documentTopkSharedList_2.id; } } while(count > 0){ while((workListPtr[posWork] <= ownScoreListPtr[posOwn]) && (count > 0)){ posOwn--; count--; } // posOwn++; if(count == 0){ ownScorePtr = &ownScoreListPtr[posOwn]; ownDocId = &docIdOwn[posOwn]; }else{ while((ownScoreListPtr[posOwn] <= workListPtr[posWork]) && count > 0){ posWork--; count--; } // posWork++; if(count == 0){ ownScorePtr = &workListPtr[posWork]; ownDocId = &docIdWork[posWork]; } } } documentTopkSharedList_Partial.score[offset] = *ownScorePtr; documentTopkSharedList_Partial.id[offset] = *ownDocId; } indexInMemShared += numberThreadsInList; offset = iTopkPosition - ((iTopkPosition - indexInMemShared ) << 1); //Reinicia o offset } while(indexInMemShared < iTopk); }//IF-ELSE ODD __syncthreads(); indexLocal = threadIdx.x; while(indexLocal < iTopk){ documentTopkSharedList_1.id[indexLocal] = documentTopkSharedList_Partial.id[indexLocal]; documentTopkSharedList_1.score[indexLocal] = documentTopkSharedList_Partial.score[indexLocal]; indexLocal += blockDim.x; } // -1 por causa do avano realizado pelas threads para o prximo bloco de topk documentos no ltimo loop index_2 += iTopk * (iSkipTopkBetweenMerges - 1); indexInMemShared = halfNumberTopk + ( warpIndex << 5) + threadWarpId; offset = iTopkPosition - ((iTopkPosition - indexInMemShared ) << 1); //Reinicia o offset // checkMerge_Sorting_Documents(documentTopkSharedList_Partial, iSkipTopkBetweenMerges, iSkipTopkBetweenBlocks, iTopk); } __syncthreads(); index_1 = blockIdx.x * iTopk * iSkipTopkBetweenBlocks + threadIdx.x; indexLocal = threadIdx.x; while(indexLocal < iTopk){ // if(isEndPart) // break; // if(documentTopkSharedList_Partial.id[indexLocal] == 46517642) // printf("Entregando o doc idblock %d skipBlock %d skipMerges %d em %d!\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,index_1); if(documentTopkSharedList_Partial.score[indexLocal] != 0.0){ iTopkDocList[index_1] = documentTopkSharedList_Partial.id[indexLocal]; dTopkScoreList[index_1] = documentTopkSharedList_Partial.score[indexLocal]; } indexLocal += blockDim.x; index_1 += blockDim.x; } // __syncthreads(); // // if(THREAD_MASTER && blockIdx.x == 0){ // printf("Final List - "); // for (int i = 0; i < iTopk; ++i) { // printf(" %.2f ", documentTopkSharedList_Partial.score[i]); // } // printf("\n"); // } } __global__ void matchWandParallel_FIXED_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iGlobalRoundNumber,// const int iBlockRoundNumber, const short int iTopK, const float iInitialThreshold,const int* d_iDocNumberByTermList){ int count=0; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ int iGlobalInitialPosition; __shared__ float score; __shared__ bool isValidCandidate; int positionInitialInTermPostingList; float thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; if(THREAD_MASTER){ iGlobalInitialPosition = blockDim.x * blockIdx.x * iGlobalRoundNumber; documentTopk.padding = iTopK; } if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } __syncthreads(); for (int globalRound = 0; globalRound < iGlobalRoundNumber; ++globalRound) { positionInitialInTermPostingList = 0; for (int termIndex = 0; termIndex < iTermNumber; ++termIndex) { localIndex = threadIdx.x; globalIndex = positionInitialInTermPostingList + iGlobalInitialPosition + localIndex; while(localIndex < DOC_QUANTITY_IN_MEMORY){//(globalIndex < d_iDocNumberByTermList[termIndex] && localIndex < DOC_QUANTITY_IN_MEMORY){ if(globalIndex < d_iDocNumberByTermList[termIndex] + positionInitialInTermPostingList){ postingLists[termIndex].docId[localIndex] = iDocIdList[globalIndex];//[positionInitialInTermPostingList + globalIndex]; postingLists[termIndex].freq[localIndex] = iFreqList[globalIndex]; postingLists[termIndex].docLenght[localIndex] = iDocLenghtList[globalIndex]; } else{ postingLists[termIndex].docId[localIndex] = NO_MORE_DOC; } localIndex += blockDim.x; globalIndex += blockDim.x; } if(THREAD_MASTER){ fingers[termIndex].docId = postingLists[termIndex].docId[0]; fingers[termIndex].position = (fingers[termIndex].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0 ; } positionInitialInTermPostingList += d_iDocNumberByTermList[termIndex]; } __syncthreads(); // if(fingers[0].docId == 16563866) // printf("Oi!"); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER) isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); count++; padding = documentTopk.padding; __syncthreads(); if(isValidCandidate){ //Avaliao Completa if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } __syncthreads(); // if(padding != 0 || thresholdLocal < score){ if(thresholdLocal < score){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId,score,padding); // thresholdLocal = documentTopk.score[0]; } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,DOC_QUANTITY_IN_MEMORY,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, DOC_QUANTITY_IN_MEMORY); } // if(fingers[0].docId == 16563866) // printf("Oi!"); __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); //Select term sharedPivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); } if(THREAD_MASTER){ iGlobalInitialPosition += DOC_QUANTITY_IN_MEMORY; } if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMax(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } __syncthreads(); } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); if(THREAD_MASTER) atomicAdd(&globalCount,count); if(THREAD_MASTER) printf("-----%d----", globalCount); } __global__ void matchWandParallel_VARIABLE_Batch_Block_3(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int* iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions,int *iDocNumberByTermListGlobal){ __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ postingList2 postings[TERM_NUMBER]; __shared__ int positionInShared[TERM_NUMBER]; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; __shared__ short int iTermNumber; int count; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; limitDoc.secondMaxDocId = -1; iTermNumber = iTermNumberByQuery[blockIdx.x]; } __syncthreads(); if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[blockIdx.x]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm]*1.0;//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; positionInShared[threadIdx.x] = -1; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco #pragma unroll 4 for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; globalIndex = 0; int maxDoc; if(THREAD_MASTER) limitDoc.minDocId = 0; globalIndex = docAmount; maxDoc = iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex - 1]; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); fingers[threadIdx.x].docId = iDocIdList[iSharedPositionInitialInList[threadIdx.x]]; fingers[threadIdx.x].position = iSharedPositionInitialInList[threadIdx.x]; } // __syncthreads(); int pos; for (int termId = 0; termId < iTermNumber; ++termId) { for (int localIndex = threadIdx.x; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { pos = fingers[termId].position+localIndex+1; if(pos < iSharedPositionInitialInList[termId] + iDocNumberByTermList[termId]){ postings[termId].docId[localIndex] = iDocIdList[pos]; }else{ postings[termId].docId[localIndex] = NO_MORE_DOC; } } } sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); count++; // if(count == 2559) // printf("Oi"); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { if(THREAD_MASTER && positionInShared[idTerm] == -1) fingers[idTerm].position++; if(docCurrent == fingers[idTerm].docId){ fingers[idTerm].docId = NO_MORE_DOC; int docIdLocal, localIndex=0; for (localIndex = threadIdx.x + positionInShared[idTerm]; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { docIdLocal = postings[idTerm].docId[localIndex]; if(docIdLocal > docCurrent && docIdLocal != NO_MORE_DOC){ if(localIndex == 0 || (postings[idTerm].docId[localIndex-1] <= docCurrent)){ fingers[idTerm].docId = docIdLocal; fingers[idTerm].position += localIndex - positionInShared[idTerm]; positionInShared[idTerm] = localIndex; } break; } } } } } else{ int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[sharedPivot.positionInOrderedList]; if(docCurrent == fingers[threadIdx.x].docId) break; fingers[idTerm].docId = NO_MORE_DOC; if(THREAD_MASTER && positionInShared[idTerm] == -1) fingers[idTerm].position++; int docIdLocal, localIndex=0; for (localIndex = threadIdx.x+positionInShared[idTerm]; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { docIdLocal = postings[idTerm].docId[localIndex]; if(docIdLocal >= docCurrent && docIdLocal != NO_MORE_DOC){ if(localIndex == 0 || (postings[idTerm].docId[localIndex-1] < docCurrent)){ fingers[idTerm].docId = docIdLocal; fingers[idTerm].position += localIndex - positionInShared[idTerm]; positionInShared[idTerm] = localIndex; } break; } } } } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].position != NO_VALID_POSITION){ int pos, localIndex; for (localIndex = threadIdx.x; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { pos = fingers[termId].position+localIndex+1; if(pos < iSharedPositionInitialInList[termId] + iDocNumberByTermList[termId]){ postings[termId].docId[localIndex] = iDocIdList[pos]; }else{ postings[termId].docId[localIndex] = NO_MORE_DOC; } } if(THREAD_MASTER && postings[termId].docId[0] == NO_MORE_DOC) fingers[termId].position = NO_VALID_POSITION; else{ fingers[termId].docId = postings[termId].docId[0]; positionInShared[termId] = -1; fingers[termId].position = pos; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } if(blockIdx.x==499 && THREAD_MASTER) printf("-----%d %d----", blockIdx.x, count); sortLocalTopkDocAndStoreInGlobal((float*)&(dTopkScoreListGlobal[blockIdx.x*iTopK]),(int*)&(iTopkDocListGlobal[blockIdx.x*iTopK]),iTopK,&documentTopk); // if(THREAD_MASTER) //// atomicAdd(&globalCount,count); //// // if } __global__ void matchWandParallel_VARIABLE_Batch_Block_Test(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int* iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions,int *iDocNumberByTermListGlobal, const int* iOrderQueryList){ __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; __shared__ short int iTermNumber; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ // limitDoc.minDocId = -1; limitDoc.secondMaxDocId = -1; iTermNumber = iTermNumberByQuery[blockIdx.x]; } __syncthreads(); if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[iOrderQueryList[blockIdx.x]]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm];//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; // printf(" %.2f ",dUBlist[threadIdx.x]); iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco #pragma unroll 4 for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; // fingers[threadIdx.x].final = 0; // limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = 0; int maxDoc; if(THREAD_MASTER) limitDoc.minDocId = 0; globalIndex = docAmount-1; maxDoc = iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex]; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); fingers[threadIdx.x].docId = iDocIdList[iSharedPositionInitialInList[threadIdx.x]]; fingers[threadIdx.x].position = iSharedPositionInitialInList[threadIdx.x]; } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; // // if(fingers[sharedPivot.idTerm].docId == 38182) // printf("Oi"); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//No Vlido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; // if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ // fingers[threadIdx.x].docId = NO_MORE_DOC; // fingers[threadIdx.x].position = NO_VALID_POSITION; // } } } } } else{ int pivotDoc = docCurrent; long long position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//At alcanar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc){ docLocal = iDocIdList[position]; position += blockDim.x; } position -= blockDim.x; if(docLocal < pivotDoc || position >= (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm])){ docLocal = NO_MORE_DOC; position = NO_VALID_POSITION; } // atomicMin(&(fingers[idTerm].docId) , docLocal); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // sortLocalTopkDocAndStoreInGlobal((float*)&(dTopkScoreListGlobal[blockIdx.x*iTopK]),(int*)&(iTopkDocListGlobal[blockIdx.x*iTopK]),iTopK,&documentTopk); int gIndex = blockIdx.x * iTopK + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < iTopK; localIndex+=blockDim.x) { dTopkScoreListGlobal[gIndex] = documentTopk.score[localIndex]; iTopkDocListGlobal[gIndex] = documentTopk.id[localIndex]; gIndex+=blockDim.x; } } __global__ void matchWandParallel_VARIABLE_Batch_Block_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int* iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions,int *iDocNumberByTermListGlobal){ // // if(blockIdx.x!=1) // return; __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; __shared__ short int iTermNumber; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ // limitDoc.minDocId = -1; limitDoc.secondMaxDocId = -1; iTermNumber = iTermNumberByQuery[blockIdx.x]; } __syncthreads(); if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[blockIdx.x]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm];//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; // printf(" %.2f ",dUBlist[threadIdx.x]); iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco #pragma unroll 4 for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; // fingers[threadIdx.x].final = 0; // limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = 0; int maxDoc; if(THREAD_MASTER) limitDoc.minDocId = 0; globalIndex = docAmount-1; maxDoc = iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex]; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); fingers[threadIdx.x].docId = iDocIdList[iSharedPositionInitialInList[threadIdx.x]]; fingers[threadIdx.x].position = iSharedPositionInitialInList[threadIdx.x]; } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; // // if(fingers[sharedPivot.idTerm].docId == 38182) // printf("Oi"); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//No Vlido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; // if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ // fingers[threadIdx.x].docId = NO_MORE_DOC; // fingers[threadIdx.x].position = NO_VALID_POSITION; // } } } } } else{ int pivotDoc = docCurrent; long long position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//At alcanar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc){ docLocal = iDocIdList[position]; position += blockDim.x; } position -= blockDim.x; if(docLocal < pivotDoc || position >= (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm])){ docLocal = NO_MORE_DOC; position = NO_VALID_POSITION; } // atomicMin(&(fingers[idTerm].docId) , docLocal); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // sortLocalTopkDocAndStoreInGlobal((float*)&(dTopkScoreListGlobal[blockIdx.x*iTopK]),(int*)&(iTopkDocListGlobal[blockIdx.x*iTopK]),iTopK,&documentTopk); int gIndex = blockIdx.x * iTopK + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < iTopK; localIndex+=blockDim.x) { dTopkScoreListGlobal[gIndex] = documentTopk.score[localIndex]; iTopkDocListGlobal[gIndex] = documentTopk.id[localIndex]; gIndex+=blockDim.x; } // if(THREAD_MASTER) //// atomicAdd(&globalCount,count); // // if(THREAD_MASTER) // printf("-----%d %d----", blockIdx.x, count); } __global__ void matchWandParallel_VARIABLE_Batch_Block(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int *iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iBlockRoundNumber, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrPostingPositions, int* ptrQueryPositions, int *iDocNumberByTermList){ if(blockIdx.x != 4999) return; __shared__ short int iTermNumber; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ long long ptrPostingPositionShared[TERM_NUMBER]; __shared__ int iDocNumberByTermListShared[TERM_NUMBER]; __shared__ int queryPosition; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; // __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; // __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex = 0;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // long long positionInitialInTermPostingList;//int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; iTermNumber = iTermNumberByQuery[blockDim.x]; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; // iGlobalInitialPositionInList = 0;//iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // if(THREAD_MASTER) documentTopk.padding = iTopK; __syncthreads(); //Define o max e o min if(threadIdx.x < iTermNumber){ limitDoc.extraPosition[threadIdx.x] = 0; queryPosition = ptrQueryPositions[blockDim.x]; iDocNumberByTermListShared[threadIdx.x] = iDocNumberByTermList[iQueryTerms[queryPosition + threadIdx.x]]; ptrPostingPositionShared[threadIdx.x] = ptrPostingPositions[iQueryTerms[queryPosition+threadIdx.x]]; int docAmount = iDocNumberByTermListShared[threadIdx.x];//iDocNumberByTermList[threadIdx.x]; // globalIndex = iGlobalInitialPositionInList; int aux, maxDoc; int maxNeighbor; if(THREAD_MASTER) limitDoc.minDocId = 0; // int isTail = globalIndex < docAmount; // globalIndex += iElementQuantityByBlock * iGlobalRoundNumber; // isTail &= globalIndex >= docAmount; globalIndex = docAmount - 1; // int isTail = iElementQuantityByBlock >= docAmount; // // if(isTail){ // globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); // } // maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[ptrPostingPositionShared[threadIdx.x] + globalIndex] - 1 : // -1; maxDoc = iDocIdList[ptrPostingPositionShared[threadIdx.x] + globalIndex]; aux = maxDoc; for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } if(THREAD_MASTER) limitDoc.secondMaxDocId = maxDoc; } // __syncthreads(); // // //Busca faixa de documentos; // for (int internTermId = 0; internTermId < iTermNumber; ++internTermId) { // if(needSearchDocRange[internTermId]) // searchRangeOfDocs_batch(iDocIdList,postingLists, internTermId, iGlobalInitialPositionInList, // &limitDoc,iElementQuantityByBlock,iGlobalRoundNumber, // iDocNumberByTermListShared[internTermId], ptrPostingPositionShared[internTermId]); // } __syncthreads(); //Preenche a memria compartilhada // positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { // globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; globalIndex = threadIdx.x; docAmount = iDocNumberByTermListShared[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[ptrPostingPositionShared[termId] + globalIndex] : NO_MORE_DOC; if(globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[ptrPostingPositionShared[termId] + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[ptrPostingPositionShared[termId] + globalIndex]; globalIndex += blockDim.x; } // positionInitialInTermPostingList += iDocNumberByTermList[termId]; } // __syncthreads(); if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; fingers[threadIdx.x].final = (fingers[threadIdx.x].final == 1) ? 1 : 0; } __syncthreads(); __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER) isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); if(thresholdLocal < score){ thresholdLocal = managerMinValue_v5(&documentTopk, docCurrent, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ // searchMoreDocs_batch(iDocIdList,iFreqList,iDocLenghtList,postingLists, // termId,iGlobalInitialPositionInList,&limitDoc, // iElementQuantityByBlock,&(fingers[termId]),docCurrent, // iDocNumberByTermListShared[termId],ptrPostingPositionShared[termId]); searchMoreDocs_batch(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId, 0, &limitDoc, iElementQuantityByBlock,&(fingers[termId]),docCurrent, iDocNumberByTermListShared[termId],ptrPostingPositionShared[termId]); // // ////// // if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > thresholdGlobal){ //// if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > thresholdGlobal){ // atomicMaxD(&thresholdGlobal,thresholdLocal); // } // // if((documentTopk.padding < (iTopK >> 1))) // if(thresholdLocal < thresholdGlobal){ // thresholdLocal = thresholdGlobal; // } } } // __syncthreads();//Talvez no precise //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads();//Talvez no precise //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // if(THREAD_MASTER){ // int max = iTopK - documentTopk.padding; // int i = 0; // while(i > max){ // if(2*i+2 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+2]) // printf("ERRADO!!!\n"); // // if(2*i+1 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+1]) // printf("ERRADO!!!\n"); // // i++; // }} // __syncthreads(); sortLocalTopkDocAndStoreInGlobal_BLOCK(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // globalIndex = iTopK * blockIdx.x + threadIdx.x + documentTopk.padding; // for (localIndex = threadIdx.x; localIndex < (iTopK - documentTopk.padding) ; localIndex += blockDim.x) { // iTopkDocListGlobal[globalIndex] = documentTopk.id[localIndex]; // dTopkScoreListGlobal[globalIndex] = documentTopk.score[localIndex]; // globalIndex += blockDim.x; // } // __syncthreads(); } __global__ void matchWandParallel_BATCH(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iBlockRoundNumber, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions, int idQuery,int *iDocNumberByTermList){ __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ long long ptrInitPostingListShared[TERM_NUMBER]; __shared__ int iDocNumberByTermListShared[TERM_NUMBER]; __shared__ int queryPosition; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // long long positionInitialInTermPostingList;//int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; iGlobalInitialPositionInList = iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } #ifdef DEBUG if(THREAD_MASTER_2) if(iGlobalInitialPositionInList < 0) printf("Opa!!!!"); #endif //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // if(THREAD_MASTER) documentTopk.padding = iTopK; // __syncthreads(); //Define o max e o min if(threadIdx.x < iTermNumber){ limitDoc.extraPosition[threadIdx.x] = 0; queryPosition = ptrQueryPositions[idQuery]; iDocNumberByTermListShared[threadIdx.x] = iDocNumberByTermList[iQueryTerms[queryPosition + threadIdx.x]]; ptrInitPostingListShared[threadIdx.x] = ptrInitPostingList[iQueryTerms[queryPosition + threadIdx.x]]; int docAmount = iDocNumberByTermListShared[threadIdx.x];//iDocNumberByTermList[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; // positionInitialInTermPostingList = 0; // // for (int i = 0; i < threadIdx.x; ++i) { // positionInitialInTermPostingList += iDocNumberByTermList[iQueryTerms[i]];//iDocNumberByTermList[i]; // } // positionInitialInTermPostingList = ptrPostingPositionShared[threadIdx.x]; int aux, maxDoc; int maxNeighbor; if(blockIdx.x != 0){ int maxDoc = (globalIndex < docAmount) ? iDocIdList[ptrInitPostingListShared[threadIdx.x] + globalIndex - 1] : -1; maxDoc++; aux = maxDoc; atomicMax(&limitDoc.minDocId, maxDoc); // for (int i = 1; i < iTermNumber; ++i) { // maxNeighbor = __shfl(aux,i); // if(maxNeighbor > maxDoc) // maxDoc = maxNeighbor; // } // // if(THREAD_MASTER) limitDoc.minDocId = maxDoc; //atomicExch(&(limitDoc.minDocId), maxDoc); if(aux < limitDoc.minDocId && aux != 0){ needSearchDocRange[threadIdx.x] = 1; limitDoc.extraPosition[threadIdx.x] = NO_MORE_DOC; } }else if(THREAD_MASTER) limitDoc.minDocId = 0; int isTail = globalIndex < docAmount; globalIndex += iElementQuantityByBlock * iGlobalRoundNumber; isTail &= globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[ptrInitPostingListShared[threadIdx.x] + globalIndex] : -1; aux = maxDoc; for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_down_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } if(THREAD_MASTER) limitDoc.secondMaxDocId = maxDoc; } __syncthreads(); //Busca faixa de documentos; for (int internTermId = 0; internTermId < iTermNumber; ++internTermId) { if(needSearchDocRange[internTermId]) searchRangeOfDocs_batch(iDocIdList,postingLists, internTermId, iGlobalInitialPositionInList, &limitDoc,iElementQuantityByBlock,iGlobalRoundNumber, iDocNumberByTermListShared[internTermId], ptrInitPostingListShared[internTermId]); } __syncthreads(); //Preenche a memria compartilhada // positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; docAmount = iDocNumberByTermListShared[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[ptrInitPostingListShared[termId] + globalIndex] : NO_MORE_DOC; if(docLocal > limitDoc.secondMaxDocId || globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[ptrInitPostingListShared[termId] + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[ptrInitPostingListShared[termId] + globalIndex]; globalIndex += blockDim.x; } // positionInitialInTermPostingList += iDocNumberByTermList[termId]; } // __syncthreads(); if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; fingers[threadIdx.x].final = 0; } __syncthreads(); __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER) isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); if(thresholdLocal < score){ thresholdLocal = managerMinValue_v5(&documentTopk, docCurrent, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ searchMoreDocs_batch(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId,iGlobalInitialPositionInList,&limitDoc, iElementQuantityByBlock,&(fingers[termId]),docCurrent, iDocNumberByTermListShared[termId],ptrInitPostingListShared[termId]); // // ////// // if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > thresholdGlobal){ //// if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > thresholdGlobal){ // atomicMaxD(&thresholdGlobal,thresholdLocal); // } // // if((documentTopk.padding < (iTopK >> 1))) // if(thresholdLocal < thresholdGlobal){ // thresholdLocal = thresholdGlobal; // } } } // __syncthreads();//Talvez no precise //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads();//Talvez no precise //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // if(THREAD_MASTER){ // int max = iTopK - documentTopk.padding; // int i = 0; // while(i > max){ // if(2*i+2 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+2]) // printf("ERRADO!!!\n"); // // if(2*i+1 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+1]) // printf("ERRADO!!!\n"); // // i++; // }} // // __syncthreads(); sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // globalIndex = iTopK * blockIdx.x + threadIdx.x + documentTopk.padding; // for (localIndex = threadIdx.x; localIndex < (iTopK - documentTopk.padding) ; localIndex += blockDim.x) { // iTopkDocListGlobal[globalIndex] = documentTopk.id[localIndex]; // dTopkScoreListGlobal[globalIndex] = documentTopk.score[localIndex]; // globalIndex += blockDim.x; // } // __syncthreads(); } __global__ void preProcessingWand(const int* iDocIdList, const short int iTermNumber, const int* iDocNumberByTermList, const int* iInitialPositionPostingList, const int docIdNumberByBlock, int* extraPositions, int* docMaxList){ __shared__ int iGlobalInitialPositionInList; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; // int positionInitialInTermPostingList; __shared__ int sharedMinDoc; __shared__ int sharedMaxDoc; __shared__ int sharedExtraPositions[TERM_NUMBER]; __shared__ int sharedInitialDocId[TERM_NUMBER]; __shared__ int sharedDocNumberByList[TERM_NUMBER]; if(THREAD_MASTER){ iGlobalInitialPositionInList = docIdNumberByBlock * blockIdx.x; } // __syncthreads(); if(threadIdx.x < iTermNumber){ sharedDocNumberByList[threadIdx.x] = iDocNumberByTermList[threadIdx.x]; int docAmount = sharedDocNumberByList[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; // positionInitialInTermPostingList = iInitialPositionPostingList[threadIdx.x]; int maxDoc; // int aux, maxDoc; // int maxNeighbor; if(blockIdx.x != 0){ int maxDoc = (globalIndex < docAmount) ? iDocIdList[iInitialPositionPostingList[threadIdx.x] + globalIndex - 1] : -1; maxDoc++; sharedInitialDocId[threadIdx.x] = maxDoc; // aux = maxDoc; // for (int i = iTermNumber-1; i > 0; --i) { // maxNeighbor = __shfl(aux,i); // if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; // } atomicMax(&sharedMinDoc, maxDoc); // if(THREAD_MASTER) sharedMinDoc = maxDoc; // if(aux < limitDoc.minDocId && aux != 0) // needSearchDocRange[threadIdx.x] = 1; }else sharedMinDoc = 0; int isTail = globalIndex < docAmount; globalIndex += docIdNumberByBlock; isTail &= globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? (iDocIdList[iInitialPositionPostingList[threadIdx.x] + globalIndex]-1) : -1; // aux = maxDoc; // for (int i = 1; i < iTermNumber; ++i) { // maxNeighbor = __shfl(aux,i); // if(maxNeighbor > maxDoc) // maxDoc = maxNeighbor; // } // if(THREAD_MASTER) sharedMaxDoc = maxDoc; atomicMax(&sharedMaxDoc, maxDoc); } __syncthreads(); for (int iTerm = 0; iTerm < iTermNumber; ++iTerm) { if(sharedInitialDocId[iTerm] < sharedMinDoc){ globalIndex = iInitialPositionPostingList[iTerm] + iGlobalInitialPositionInList + threadIdx.x; int docLocal = -1; while (docLocal < sharedMinDoc && globalIndex < sharedDocNumberByList[iTerm]){ docLocal = iDocIdList[globalIndex]; globalIndex += blockDim.x; } globalIndex-= blockDim.x; long long int initialPosition; if(docLocal < sharedMinDoc)//Caso no encontre initialPosition = NO_VALID_POSITION; else initialPosition = globalIndex - iGlobalInitialPositionInList - iInitialPositionPostingList[iTerm]; int positionNeighbor; for (int i = 16; i >= 1; i /= 2) { positionNeighbor = __shfl_down_sync(0xFFFFFFFF,initialPosition, i); if(positionNeighbor < initialPosition) initialPosition = positionNeighbor; } if( ((threadIdx.x & 0x1f) == 0) && initialPosition != NO_MORE_DOC){ atomicMin(&sharedExtraPositions[iTerm] , initialPosition); } //__syncthreads(); // if(THREAD_MASTER){ // globalIndex = iGlobalInitialPositionInList + iElementQuantityByBlock * roundGlobalNumber + threadIdx.x; // globalIndex += limitDoc->extraPosition[termId]; // // // if(globalIndex < iDocNumberByTermList[termId]){ // if(limitDoc->secondMaxDocId < iDocIdList[positionInListGlobal + globalIndex] -1) // limitDoc->secondMaxDocId = iDocIdList[positionInListGlobal + globalIndex]-1; // } // } } if(threadIdx.x < iTermNumber){ extraPositions[iTermNumber*blockIdx.x + threadIdx.x] = sharedExtraPositions[threadIdx.x]; if(THREAD_MASTER) docMaxList[blockIdx.x] = sharedMaxDoc; } } } __global__ void matchWandParallel_VARIABLE_3_Teste(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* d_iDocNumberByTermList, const int* extraPositions, const int* docMaxList){ __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; // __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; // int count = 0; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(threadIdx.x < iTermNumber){ limitDoc.extraPosition[threadIdx.x] = extraPositions[blockIdx.x*iTermNumber + threadIdx.x]; fingers[threadIdx.x].final = 0; } if(THREAD_MASTER){ documentTopk.padding = iTopK; limitDoc.secondMaxDocId = docMaxList[blockIdx.x]; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; iGlobalInitialPositionInList = iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } __syncthreads(); //Preenche a memria compartilhada positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; docAmount = d_iDocNumberByTermList[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : NO_MORE_DOC; if(docLocal > limitDoc.secondMaxDocId || globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[positionInitialInTermPostingList + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[positionInitialInTermPostingList + globalIndex]; globalIndex += blockDim.x; } positionInitialInTermPostingList += d_iDocNumberByTermList[termId]; } // __syncthreads(); if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; // fingers[threadIdx.x].final = 0; } __syncthreads(); __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); } __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ searchMoreDocs(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId,iGlobalInitialPositionInList, &limitDoc,iElementQuantityByBlock, &(fingers[termId]),docCurrent,d_iDocNumberByTermList); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } } } //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); if(THREAD_MASTER && thresholdLocal > globalThreshold){ atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } // if(threadIdx.x == 0) // printf("---------%d----------",count); } __global__ void matchWandParallel_BATCH_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iBlockRoundNumber, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions, int idQuery,int *iDocNumberByTermListGlobal){ // // if(idQuery != 18 || blockIdx.x != 0) // return; __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // long long positionInitialInTermPostingList; if(thresholdLocal < globalThresholdBatch[idQuery]) thresholdLocal = globalThresholdBatch[idQuery]; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ limitDoc.minDocId = 0; limitDoc.secondMaxDocId = 0; iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; } if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[idQuery]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm];//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = iGlobalInitialPositionInList; int maxDoc; if(blockIdx.x != 0){ maxDoc = (globalIndex < docAmount) ? iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex - 1] : -1; maxDoc++; atomicMax(&(limitDoc.minDocId), maxDoc); }else{ if(THREAD_MASTER) limitDoc.minDocId = 0; } int isTail = globalIndex < docAmount; globalIndex = globalIndex + DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber; isTail = isTail && globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = ( (isTail || (globalIndex < docAmount)) ? (iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex-1]) : -1); atomicMax(&(limitDoc.secondMaxDocId), maxDoc); } __syncthreads(); long long pos; int docLocal; for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { pos = iSharedPositionInitialInList[idTerm] + iGlobalInitialPositionInList + threadIdx.x; docLocal = -1; while(pos < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId ){ docLocal = iDocIdList[pos]; pos += blockDim.x; } docLocal = ((docLocal != -1) && (docLocal >= limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId)) ? docLocal : NO_MORE_DOC; pos = (docLocal != NO_MORE_DOC) ? pos-blockDim.x : NO_VALID_POSITION; atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = pos; } } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//No Vlido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; } } } } } else{ int pivotDoc = docCurrent; int position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//At alcanar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc && docLocal <= limitDoc.secondMaxDocId){ docLocal = iDocIdList[position]; position += blockDim.x; } docLocal = (docLocal >= pivotDoc && docLocal <= limitDoc.secondMaxDocId) ? docLocal : NO_MORE_DOC; position = (docLocal != NO_MORE_DOC) ? position-blockDim.x : NO_VALID_POSITION; __syncthreads(); atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThresholdBatch[idQuery]){ // atomicMaxD(&globalThreshold,thresholdLocal); // atomicMax((unsigned long long int*)&(globalThresholdBatch[idQuery]),(unsigned long long int)thresholdLocal); // atomicMaxD((volatile double*)&(globalThresholdBatch[idQuery]),thresholdLocal); globalThresholdBatch[idQuery] = thresholdLocal; } if(thresholdLocal < globalThresholdBatch[idQuery]){ thresholdLocal = globalThresholdBatch[idQuery]; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThresholdBatch[idQuery]){ // atomicMax((unsigned long long int*)&(globalThresholdBatch[idQuery]),(unsigned long long int)thresholdLocal); // atomicMaxD(((volatile double*)&(globalThresholdBatch[idQuery])),thresholdLocal); globalThresholdBatch[idQuery] = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThresholdBatch[idQuery]){ thresholdLocal = globalThresholdBatch[idQuery]; } } __syncthreads(); } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // if(thresholdLocal > globalThreshold) // globalThreshold = thresholdLocal; // if(THREAD_MASTER) //// atomicAdd(&globalCount,count); // // if(THREAD_MASTER && idQuery == 0) // printf("-----%d %d----", blockIdx.x, count); } __global__ void matchWandParallel_VARIABLE_4_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermListGlobal){ // if(blockIdx.x != 720) // return; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ postingList2 postings[TERM_NUMBER]; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; // int count = iTopK; // __shared__ int paddingInShared; int padding; float thresholdLocal = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; limitDoc.minDocId = 0; limitDoc.secondMaxDocId = 0; } if(threadIdx.x < iTermNumber){ // paddingInShared=0; fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[threadIdx.x]; dUBlist[threadIdx.x] = dUBlistGlobal[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[threadIdx.x]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } iSharedPositionInitialInList[threadIdx.x] = positionInitialInTermPostingList; int maxDoc; if(blockIdx.x != 0){ maxDoc = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex - 1] : -1; maxDoc++; atomicMax(&(limitDoc.minDocId), maxDoc); }else{ if(THREAD_MASTER) limitDoc.minDocId = 0; } int isTail = globalIndex < docAmount; globalIndex += DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber - 1; isTail = (isTail && globalIndex >= docAmount); if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : -1; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); } __syncthreads(); long long pos; // int docLocal; for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { pos = iSharedPositionInitialInList[idTerm] + iGlobalInitialPositionInList + threadIdx.x; int docLocal = -1; while(pos < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId ){ docLocal = iDocIdList[pos]; pos += blockDim.x; } docLocal = ( (docLocal != -1) && (docLocal >= limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId)) ? docLocal : NO_MORE_DOC; pos = (docLocal != NO_MORE_DOC) ? pos-blockDim.x : NO_VALID_POSITION; // atomicMin(&(fingers[idTerm].docId) , docLocal); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = pos; } } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].position != NO_VALID_POSITION){ long long gIndex = fingers[termId].position + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < DOCS_TEST; localIndex+=blockDim.x) { if(gIndex < (iSharedPositionInitialInList[termId]+iDocNumberByTermList[termId]) && (iDocIdList[gIndex] <= limitDoc.secondMaxDocId) ){ postings[termId].docId[localIndex] = iDocIdList[gIndex]; postings[termId].freq[localIndex] = iFreqList[gIndex]; postings[termId].docLenght[localIndex] = iDocLenghtList[gIndex]; if(localIndex == 0) postings[termId].positionInShared = 0; } else{ postings[termId].docId[localIndex] = NO_MORE_DOC; if(localIndex == 0) postings[termId].positionInShared = NO_VALID_POSITION; } gIndex += blockDim.x; } } else{ postings[termId].positionInShared = NO_VALID_POSITION; } } sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; // if(fingers[sharedPivot.idTerm].docId==33769946 && THREAD_MASTER) // printf("blockId.x %d!!!\n",blockIdx.x); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(postings[termId].freq[postings[termId].positionInShared], postings[termId].docLenght[postings[termId].positionInShared], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ // if(THREAD_MASTER && fingers[sharedPivot.idTerm].docId==6364669)//&& score == 3.53512168))//40920063 // printf("blockIdx.x %d\n",blockIdx.x); thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); // if(count != 0) count--; } // float test = checkMinHeapProperty(documentTopk,score,fingers[sharedPivot.idTerm].docId,iTopK); // if(count != documentTopk.padding){ // printf("Padding error! count %d | padding %d | blockIdx %d | docId %d\n",count, documentTopk.padding, blockIdx.x, fingers[sharedPivot.idTerm].docId); // } // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi\n"); // return; // } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; int posInShared; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; postings[threadIdx.x].positionInShared++; posInShared = postings[threadIdx.x].positionInShared; if(posInShared >= DOCS_TEST || postings[threadIdx.x].docId[posInShared] == NO_MORE_DOC){ fingers[threadIdx.x].docId = NO_MORE_DOC; if(docPivot == docCurrent) atomicInc((unsigned int*)(&docCurrent),docCurrent); }else{ fingers[threadIdx.x].docId = postings[threadIdx.x].docId[posInShared]; } } } } else{ int pivotDoc = docCurrent; int position; int docLocal; int idTerm; // __syncthreads(); for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == pivotDoc)//At alcanar um finger q aponte a um documento pivo break; position = postings[idTerm].positionInShared + 1 + threadIdx.x; fingers[idTerm].docId = NO_MORE_DOC; if(position < DOCS_TEST) docLocal = postings[idTerm].docId[position]; else docLocal = NO_MORE_DOC; while( (position < DOCS_TEST) && (docLocal < pivotDoc) ){ docLocal = postings[idTerm].docId[position]; position += blockDim.x; } docLocal = (docLocal > pivotDoc) ? docLocal : NO_MORE_DOC; position = (docLocal != NO_MORE_DOC) ? position-blockDim.x : DOCS_TEST; // __syncthreads(); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ if(position != DOCS_TEST){ fingers[idTerm].position += (position-postings[idTerm].positionInShared); postings[idTerm].positionInShared += threadIdx.x + 1; } else { postings[idTerm].positionInShared = DOCS_TEST; // fingers[idTerm].position = NO_VALID_POSITION; } } } } for (int termId = 0; termId < iTermNumber; ++termId) { long long gIndex; int count=0,isValid=0, docLocal, isOutRange=0; if(postings[termId].positionInShared >= DOCS_TEST && postings[termId].positionInShared != NO_VALID_POSITION){ gIndex = fingers[termId].position + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < DOCS_TEST; localIndex+=blockDim.x) { count=0;isValid=0;isOutRange=0; do{ isOutRange = gIndex >= (iSharedPositionInitialInList[termId]+iDocNumberByTermList[termId]); docLocal = (!isOutRange) ? iDocIdList[gIndex] : NO_MORE_DOC; isOutRange = isOutRange || (docLocal > limitDoc.secondMaxDocId); isValid = isOutRange || (docLocal >= docCurrent); // count = __syncthreads_count(!isValid); count = __ballot_sync(0xFFFFFFFF,!isValid); count = __popc(count); // if((threadIdx.x & 0x1f) == 0){ // atomicAdd(&paddingInShared,count); // } // __syncthreads(); // count = paddingInShared; gIndex += count; if(localIndex == 0) fingers[termId].position += count; }while(count != 0); if(!isOutRange){ postings[termId].docId[localIndex] = docLocal; postings[termId].freq[localIndex] = iFreqList[gIndex]; postings[termId].docLenght[localIndex] = iDocLenghtList[gIndex]; if(localIndex == 0) postings[termId].positionInShared = 0; } else{ postings[termId].docId[localIndex] = NO_MORE_DOC; if(localIndex == 0) postings[termId].positionInShared = NO_VALID_POSITION; } gIndex += blockDim.x; } if(threadIdx.x == 0){ fingers[termId].docId = postings[termId].docId[0]; } // paddingInShared=0; // __syncthreads(); } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold=thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold=thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } }//Fim do WAND - pivot = NO_MORE_DOC // for (int i = blockIdx.x*iTopK+threadIdx.x; i < blockIdx.x*iTopK; i+= blockDim.x) { // printf("---%d %d---",blockIdx.x,iTopkDocListGlobal[i]); // } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // __syncthreads(); // float test = checkSorting(documentTopk, dTopkScoreListGlobal, iTopkDocListGlobal, iTopK); // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi no Sorting!\n"); // return; // } // if(thresholdLocal > globalThreshold) // thresholdGlobal = thresholdLocal; // if(THREAD_MASTER) // atomicAdd(&globalCount,count); ////// // if(THREAD_MASTER) // printf("-----%d----", globalCount); } __global__ void matchWandParallel_VARIABLE_4(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermListGlobal){ // if(blockIdx.x != 1104) // return; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; // int count = iTopK; int padding; float thresholdLocal = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; limitDoc.minDocId = 0; limitDoc.secondMaxDocId = 0; } if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[threadIdx.x]; dUBlist[threadIdx.x] = dUBlistGlobal[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[threadIdx.x]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // __syncthreads(); // if(blockIdx.x == 83 && THREAD_MASTER) // printf("Oi! \n"); //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } iSharedPositionInitialInList[threadIdx.x] = positionInitialInTermPostingList; int maxDoc; if(blockIdx.x != 0){ maxDoc = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex - 1] : -1; maxDoc++; atomicMax(&(limitDoc.minDocId), maxDoc); }else{ if(THREAD_MASTER) limitDoc.minDocId = 0; } int isTail = globalIndex < docAmount; globalIndex += DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber - 1; isTail = (isTail && globalIndex >= docAmount); if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : -1; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); } __syncthreads(); long long pos; int docLocal; for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { pos = iSharedPositionInitialInList[idTerm] + iGlobalInitialPositionInList + threadIdx.x; docLocal = -1; while(pos < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId ){ docLocal = iDocIdList[pos]; pos += blockDim.x; } docLocal = ( (docLocal != -1) && (docLocal >= limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId)) ? docLocal : NO_MORE_DOC; pos = (docLocal != NO_MORE_DOC) ? pos-blockDim.x : NO_VALID_POSITION; atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = pos; } } sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ if(THREAD_MASTER && fingers[sharedPivot.idTerm].docId==46517642)//&& score == 3.53512168))//40920063 printf("blockIdx.x %d\n",blockIdx.x); thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); // if(count != 0) count--; } // float test = checkMinHeapProperty(documentTopk,score,fingers[sharedPivot.idTerm].docId,iTopK); // if(count != documentTopk.padding){ // printf("Padding error! count %d | padding %d | blockIdx %d | docId %d\n",count, documentTopk.padding, blockIdx.x, fingers[sharedPivot.idTerm].docId); // } // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi\n"); // return; // } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//No Vlido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; } } } } } else{ int pivotDoc = docCurrent; long long position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//At alcanar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc && docLocal <= limitDoc.secondMaxDocId){ docLocal = iDocIdList[position]; position += blockDim.x; } docLocal = (docLocal >= pivotDoc && docLocal <= limitDoc.secondMaxDocId) ? docLocal : NO_MORE_DOC; position = (docLocal != NO_MORE_DOC) ? position-blockDim.x : NO_VALID_POSITION; __syncthreads(); atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold = thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } } // for (int i = blockIdx.x*iTopK+threadIdx.x; i < blockIdx.x*iTopK; i+= blockDim.x) { // printf("---%d %d---",blockIdx.x,iTopkDocListGlobal[i]); // } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // float test = checkSorting(documentTopk, dTopkScoreListGlobal, iTopkDocListGlobal, iTopK); // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi no Sorting!\n"); // return; // } // if(thresholdLocal > globalThreshold) // thresholdGlobal = thresholdLocal; // if(THREAD_MASTER) // atomicAdd(&globalCount,count); ////// // if(THREAD_MASTER) // printf("-----%d----", globalCount); } __global__ void matchWandParallel_FIXED_3(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermListGlobal){ // if(blockIdx.x != 0) // return; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; // __shared__ long long int iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ long long finalPositions[TERM_NUMBER]; // int count =0; int padding; float thresholdLocal = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; long long int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; } if(threadIdx.x < iTermNumber){ iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[threadIdx.x]; dUBlist[threadIdx.x] = dUBlistGlobal[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } // iSharedPositionInitialInList[threadIdx.x] = positionInitialInTermPostingList; fingers[threadIdx.x].position = positionInitialInTermPostingList + globalIndex; if(fingers[threadIdx.x].position < (positionInitialInTermPostingList+iDocNumberByTermList[threadIdx.x])){ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; }else{ fingers[threadIdx.x].position = NO_VALID_POSITION; fingers[threadIdx.x].docId = NO_PIVOT_TERM; } finalPositions[threadIdx.x] = positionInitialInTermPostingList + globalIndex + DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber; if(finalPositions[threadIdx.x] >= (positionInitialInTermPostingList+iDocNumberByTermList[threadIdx.x])) finalPositions[threadIdx.x] = positionInitialInTermPostingList+iDocNumberByTermList[threadIdx.x]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.1); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= finalPositions[threadIdx.x] ){//No Vlido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; } } } } else{ int pivotDoc = docCurrent; long long int position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//At alcanar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < finalPositions[idTerm] && docLocal < pivotDoc){ docLocal = iDocIdList[position]; position += blockDim.x; } position -= blockDim.x; if((docLocal < pivotDoc || position >= finalPositions[idTerm])){ docLocal = NO_MORE_DOC; position = NO_VALID_POSITION; } __syncthreads(); atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold = thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // if(thresholdLocal > globalThreshold) // globalThreshold = thresholdLocal; // if(THREAD_MASTER) // atomicAdd(&globalCount,count); //// // if(THREAD_MASTER) // printf("-----%d----", globalCount); } __global__ void matchWandParallel_VARIABLE_3(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermList){ // if(blockIdx.x != 1687) // return; // int count = 0; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; // __shared__ float dUBlist[TERM_NUMBER]; // __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; float thresholdLocal;// = iInitialThreshold; // int count = 0; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; iGlobalInitialPositionInList = iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um nmero mltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // if(THREAD_MASTER) documentTopk.padding = iTopK; __syncthreads(); //Define o max e o min if(threadIdx.x < iTermNumber){ // iDocNumberByTermList[threadIdx.x] = globalDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; // dUBlist[threadIdx.x] = dUBlistGlobal[blockIdx.x * iTermNumber + threadIdx.x]; int docAmount = iDocNumberByTermList[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } // if(threadIdx.x == 0 && blockIdx.x == 1687){ // printf("Oi"); // } int aux, maxDoc; int maxNeighbor; if(blockIdx.x != 0){ int maxDoc = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex - 1] : -1; maxDoc++; aux = maxDoc; // atomicMax(&limitDoc.minDocId, maxDoc); // __syncwarp(0xFFFFFFFF); for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } // if(THREAD_MASTER) { limitDoc.minDocId = maxDoc; //atomicExch(&(limitDoc.minDocId), maxDoc); } __syncwarp(0xFFFFFFFF); if(aux < limitDoc.minDocId && aux != 0) needSearchDocRange[threadIdx.x] = 1; }else if(THREAD_MASTER) limitDoc.minDocId = 0; int isTail = globalIndex < docAmount; globalIndex += iElementQuantityByBlock * iGlobalRoundNumber; isTail &= globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] - 1 : -1; aux = maxDoc; for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } if(THREAD_MASTER) limitDoc.secondMaxDocId = maxDoc; } __syncthreads(); //Busca faixa de documentos; for (int termId = 0; termId < iTermNumber; ++termId) { if(needSearchDocRange[termId]) searchRangeOfDocs(iDocIdList,postingLists, termId, iGlobalInitialPositionInList, &limitDoc, iElementQuantityByBlock,iGlobalRoundNumber,iDocNumberByTermList); } __syncthreads(); //Preenche a memria compartilhada positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; docAmount = iDocNumberByTermList[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : NO_MORE_DOC; if(docLocal > limitDoc.secondMaxDocId || globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[positionInitialInTermPostingList + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[positionInitialInTermPostingList + globalIndex]; globalIndex += blockDim.x; } positionInitialInTermPostingList += iDocNumberByTermList[termId]; } if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; // fingers[threadIdx.x].final = 0 | fingers[threadIdx.x].final; } // if(threadIdx.x == 0 && blockIdx.x == 3430){ // printf("Oi"); // } __syncthreads(); // __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); } __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ // if(termId == 0) count++; // // if(blockIdx.x == 27 && count == 48 && THREAD_MASTER) // printf("Oi!"); searchMoreDocs(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId,iGlobalInitialPositionInList, &limitDoc,iElementQuantityByBlock, &(fingers[termId]),docCurrent,iDocNumberByTermList); //#endif if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); // atomicMaxD(&globalThreshold,thresholdLocal); } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } //#endif } } // __syncthreads();//Talvez no precise //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads();//Talvez no precise //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // if(threadIdx.x == 0){// && blockIdx.x == 1687){ // printf("----%d %d----",blockIdx.x,count); // } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // globalIndex = iTopK * blockIdx.x + threadIdx.x + documentTopk.padding; // for (localIndex = threadIdx.x; localIndex < (iTopK - documentTopk.padding) ; localIndex += blockDim.x) { // iTopkDocListGlobal[globalIndex] = documentTopk.id[localIndex]; // dTopkScoreListGlobal[globalIndex] = documentTopk.score[localIndex]; // globalIndex += blockDim.x; // } // __syncthreads(); if(THREAD_MASTER && thresholdLocal > globalThreshold){ atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } }
1d745e3b4c8189b049b1fc491ce9515809f9417d.cu
/* * ParallelPrunningDaat.cu * * Created on: 08/12/2017 * Author: roussian */ #include "ParallelPrunningDaat.cuh" #include "DeviceFunctions.cuh" #include "UnityTest.cuh" #include <stdio.h> __device__ volatile float globalThreshold = 0.0; __device__ volatile float globalThresholdBatch[500]; __device__ int globalCount=0; __global__ void mergeTopkLists_v3(float *dTopkScoreList, int *iTopkDocList, int iTopk, int iMergeNumber, int iSkipTopkBetweenMerges, int iSkipTopkBetweenBlocks, int iTotalElementos){ // if(blockIdx.x != 74) // return; //Peguei o doc idblock 4 skipBlock 32 skipMerges 16 na posição 18559 ! // if(iSkipTopkBetweenBlocks == 4 && iSkipTopkBetweenMerges == 2) // if(blockIdx.x != 87) // return; __shared__ documentTopkList documentTopkSharedList_1; __shared__ documentTopkList documentTopkSharedList_2; __shared__ documentTopkList documentTopkSharedList_Partial; __shared__ short int halfNumberTopk;// = iTopk >> 1; __shared__ short int iTopkPosition;// = iTopk - 1; // Começa no índice 0 e vai iTopk - 1 __shared__ short int halfPositionNumberTopk;// = iTopkPosition >> 1; if(THREAD_MASTER){ halfNumberTopk = iTopk >> 1; }else if(THREAD_MASTER_2){ iTopkPosition = iTopk - 1; halfPositionNumberTopk = iTopkPosition >> 1; } int warpIndex = threadIdx.x >> 5; int threadWarpId = threadIdx.x - (warpIndex << 5); // (threadIdx.x & 0x1f); //threadIdx.x % 32; int isOdd = warpIndex & 1; //Verifica se o IdWarp é ímpar int numberThreadsInList = ((blockDim.x >> 6) << 5); // (#Block/Tamanho da Warp--2⁵) / 2; ---> isso pq metade do # de warps trabalham sobre uma lista warpIndex = warpIndex >> 1; //Isso pois as warps são divididas por impar e par. Então, se o idWarp é 5, então o novo id é 2 __syncthreads(); // int proportion = iTopk / blockDim.x; //K é múltiplo do numero de threads por bloco int offset = iTopkPosition; //a Posição que cada thread irá inserir o seu elemento //A posição das threads nas listas --- half + (pos. da warp * #threads dentro da warp) + id int indexInMemShared = halfNumberTopk + ( warpIndex << 5) + threadWarpId;//(iTopk >> 1) + ((warpId >> 1) << 5) + threadWarpId;///half + (pos. da warp * #threads na warp) + id float score_1, score_2; float *ownScorePtr, *workListPtr; int *ownDocId; int position; int index_1, index_2, indexLocal; // int isEndPart; //Obtém a posição inicial que a thread irá inserir na lista final. offset -= (iTopkPosition - indexInMemShared ) << 1;//A multiplicação por 2 é por causa das duas listas // if(blockIdx.x == 203) // printf("Oi!\n"); __syncthreads(); //As listas estão alinhadas em uma lista, por isso que o indice tem que seguir para //a próxima parte não processada referente ao bloco //Um merge pega 2 listas ou 1 lista + Resultado anterior index_1 = blockIdx.x * iTopk * iSkipTopkBetweenBlocks + threadIdx.x;//blockIdx.x * iTopk * (iMergeNumber + 1) * iSkipBetweenMerge + threadIdx.x; index_2 = index_1 + iTopk * iSkipTopkBetweenMerges;//index_1 + iTopk * iSkipTopkBetweenMerges; // isEndPart = 0;//index_2 > totalElements; indexLocal = threadIdx.x; //O número de threads por bloco pode ser menor que K while(indexLocal < iTopk){ documentTopkSharedList_1.id[indexLocal] = iTopkDocList[index_1]; documentTopkSharedList_1.score[indexLocal] = dTopkScoreList[index_1]; // if(iTopkDocList[index_1] == 46517642) // printf("Peguei o doc idblock %d skipBlock %d skipMerges %d na posição %d !\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,index_1); index_1 += blockDim.x; indexLocal += blockDim.x; } // if(THREAD_MASTER && blockIdx.x == 0 && iSkipTopkBetweenBlocks >= 2048) // printf("idblock %d skipBlock %d skipMerges %d na posição inicial %d %d!\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,blockIdx.x * iTopk * iSkipTopkBetweenBlocks, // blockIdx.x * iTopk * iSkipTopkBetweenBlocks + iTopk * iSkipTopkBetweenMerges); __syncthreads(); // if(THREAD_MASTER && blockIdx.x == 0){ // printf("First List - "); // for (int i = 0; i < iTopk; ++i) { // printf(" %.2f ", documentTopkSharedList_1.score[i]); // } // printf("\n"); // } for (int globalRound = 0; globalRound < iMergeNumber; ++globalRound) { //O número de threads por bloco pode ser menor que K indexLocal = threadIdx.x; while(indexLocal < iTopk){ if(index_2 >= iTotalElementos || index_2 < 0){ documentTopkSharedList_2.id[indexLocal] = 0; documentTopkSharedList_2.score[indexLocal] = 0; }else{ documentTopkSharedList_2.id[indexLocal] = iTopkDocList[index_2]; documentTopkSharedList_2.score[indexLocal] = dTopkScoreList[index_2]; // if(iTopkDocList[index_2] == 46517642) // printf("Peguei o doc idblock %d skipBlock %d skipMerges %d na posição %d !\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,index_2); } index_2 += blockDim.x; indexLocal += blockDim.x; } __syncthreads(); if(!isOdd){//As threads das Warps com ids par trabalham sobre os maiores elementos da mesma posição. do {//Esse bloco de instruções trabalha somente com dados que estão na memória compartilhada score_1 = documentTopkSharedList_1.score[indexInMemShared]; score_2 = documentTopkSharedList_2.score[indexInMemShared]; //Escolhe o maior elemento de uma mesma posição e a lista, a que tiver o menor elemento, que irá pecorrer. if(score_1 >= score_2){ ownScorePtr = &score_1; ownDocId = &documentTopkSharedList_1.id[indexInMemShared]; // if(*ownDocId == 46517642){// && blockIdx.x == 5 // printf("1 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_2.score; //A lista de trabalho sempre é a lista do menor elemento } else{ ownScorePtr = &score_2; ownDocId = &documentTopkSharedList_2.id[indexInMemShared]; // if(*ownDocId == 46517642){// && blockIdx.x == 5){ // printf("1.1 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_1.score; } if(score_1 != score_2){ //Busca atualizar o offset, i.e., procura o 1º elemento maior position = indexInMemShared;//Define a posição início para fazer as comparações (//Se workscore está em A[i] e A[i] < B[i], então A[i] < B[i + (1,2,3...)]) while( (position+1 < iTopk) && (*ownScorePtr > workListPtr[position+1]) ){ offset++; //Ao encontrar um elemento menor, ele irá aumentar a posição que irá inserir o seu elemento position++; } } //Insere os maiores elementos das listas, i.e., os elementos mais a direita da lista dos top-k documentTopkSharedList_Partial.score[offset] = *ownScorePtr; documentTopkSharedList_Partial.id[offset] = *ownDocId; //Redefine as variáveis para inicializar outro bloco de dados que está na memória compartilhada indexInMemShared += numberThreadsInList; offset = iTopkPosition - ((iTopkPosition - indexInMemShared ) << 1); //Reinicia o offset } while (indexInMemShared < iTopk); }else{ int count; //Quantos elementos irá buscar; float *ownScoreListPtr; do{//Esse bloco de instrução trabalha somente com dados que estão na memória compartilhada offset--;//É o menor elemento entre dois elementos (mesmo índice) score_1 = documentTopkSharedList_1.score[indexInMemShared]; score_2 = documentTopkSharedList_2.score[indexInMemShared]; //Escolhe o menor elemento de uma mesma posição e a lista, a que tiver o maior elemento, que irá pecorrer. if(score_1 < score_2){ ownScorePtr = &score_1; ownDocId = &documentTopkSharedList_1.id[indexInMemShared]; // if(*ownDocId == 46517642){ // printf("2 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_2.score; ownScoreListPtr = documentTopkSharedList_1.score; } else{//Entra igual(se for igual, entao o score_2 é selecionado) ou menor ownScorePtr = &score_2; ownDocId = &documentTopkSharedList_2.id[indexInMemShared]; // if(*ownDocId == 46517642){ // printf("2.1 - blockId %d threadId %d\n", blockIdx.x, threadIdx.x); // } workListPtr = documentTopkSharedList_1.score; ownScoreListPtr = documentTopkSharedList_2.score; } //Duas possibilidades podem ocorrer: (1) O elemento adquirido está entre os k maiores elementos //(2) o elemento não está entre os k maiore elementos if(*ownScorePtr >= workListPtr[halfPositionNumberTopk]){//Compara-se com o elemento que está na metade//if(*ownScorePtr > workListPtr[iTopkPosition >> 1]){//Compara-se com o elemento que está na metade position = indexInMemShared;// - 1; while( (position - 1 > 0) && (*ownScorePtr < workListPtr[position-1]) ){ offset--; position--; } documentTopkSharedList_Partial.score[offset] = *ownScorePtr; documentTopkSharedList_Partial.id[offset] = *ownDocId; }else{ offset -= indexInMemShared - halfNumberTopk;//(iTopk >> 1); Subtrai da metade do número das posições e não do índice máx, pois já ouve uma subtração do conjunto dos maiores elementos count = halfPositionNumberTopk - offset; //Quantos elementos irá buscar; float *aux; int posWork, posOwn; int *docIdOwn, *docIdWork; if(ownScoreListPtr[iTopkPosition] >= workListPtr[halfPositionNumberTopk] ){ ownScorePtr = &ownScoreListPtr[iTopkPosition]; posOwn = iTopkPosition; posWork = halfPositionNumberTopk; if(ownScoreListPtr == documentTopkSharedList_2.score){ docIdOwn = documentTopkSharedList_2.id; docIdWork = documentTopkSharedList_1.id; }else{ docIdOwn = documentTopkSharedList_1.id; docIdWork = documentTopkSharedList_2.id; } }else{ ownScorePtr = &workListPtr[halfPositionNumberTopk]; aux = ownScoreListPtr; ownScoreListPtr = workListPtr; workListPtr = aux; posOwn = halfPositionNumberTopk; posWork = iTopkPosition; if(workListPtr == documentTopkSharedList_2.score){ docIdWork = documentTopkSharedList_2.id; docIdOwn = documentTopkSharedList_1.id; }else{ docIdWork = documentTopkSharedList_1.id; docIdOwn = documentTopkSharedList_2.id; } } while(count > 0){ while((workListPtr[posWork] <= ownScoreListPtr[posOwn]) && (count > 0)){ posOwn--; count--; } // posOwn++; if(count == 0){ ownScorePtr = &ownScoreListPtr[posOwn]; ownDocId = &docIdOwn[posOwn]; }else{ while((ownScoreListPtr[posOwn] <= workListPtr[posWork]) && count > 0){ posWork--; count--; } // posWork++; if(count == 0){ ownScorePtr = &workListPtr[posWork]; ownDocId = &docIdWork[posWork]; } } } documentTopkSharedList_Partial.score[offset] = *ownScorePtr; documentTopkSharedList_Partial.id[offset] = *ownDocId; } indexInMemShared += numberThreadsInList; offset = iTopkPosition - ((iTopkPosition - indexInMemShared ) << 1); //Reinicia o offset } while(indexInMemShared < iTopk); }//IF-ELSE ODD __syncthreads(); indexLocal = threadIdx.x; while(indexLocal < iTopk){ documentTopkSharedList_1.id[indexLocal] = documentTopkSharedList_Partial.id[indexLocal]; documentTopkSharedList_1.score[indexLocal] = documentTopkSharedList_Partial.score[indexLocal]; indexLocal += blockDim.x; } // -1 por causa do avanço realizado pelas threads para o próximo bloco de topk documentos no último loop index_2 += iTopk * (iSkipTopkBetweenMerges - 1); indexInMemShared = halfNumberTopk + ( warpIndex << 5) + threadWarpId; offset = iTopkPosition - ((iTopkPosition - indexInMemShared ) << 1); //Reinicia o offset // checkMerge_Sorting_Documents(documentTopkSharedList_Partial, iSkipTopkBetweenMerges, iSkipTopkBetweenBlocks, iTopk); } __syncthreads(); index_1 = blockIdx.x * iTopk * iSkipTopkBetweenBlocks + threadIdx.x; indexLocal = threadIdx.x; while(indexLocal < iTopk){ // if(isEndPart) // break; // if(documentTopkSharedList_Partial.id[indexLocal] == 46517642) // printf("Entregando o doc idblock %d skipBlock %d skipMerges %d em %d!\n", // blockIdx.x, iSkipTopkBetweenBlocks,iSkipTopkBetweenMerges,index_1); if(documentTopkSharedList_Partial.score[indexLocal] != 0.0){ iTopkDocList[index_1] = documentTopkSharedList_Partial.id[indexLocal]; dTopkScoreList[index_1] = documentTopkSharedList_Partial.score[indexLocal]; } indexLocal += blockDim.x; index_1 += blockDim.x; } // __syncthreads(); // // if(THREAD_MASTER && blockIdx.x == 0){ // printf("Final List - "); // for (int i = 0; i < iTopk; ++i) { // printf(" %.2f ", documentTopkSharedList_Partial.score[i]); // } // printf("\n"); // } } __global__ void matchWandParallel_FIXED_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iGlobalRoundNumber,// const int iBlockRoundNumber, const short int iTopK, const float iInitialThreshold,const int* d_iDocNumberByTermList){ int count=0; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ int iGlobalInitialPosition; __shared__ float score; __shared__ bool isValidCandidate; int positionInitialInTermPostingList; float thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; if(THREAD_MASTER){ iGlobalInitialPosition = blockDim.x * blockIdx.x * iGlobalRoundNumber; documentTopk.padding = iTopK; } if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } __syncthreads(); for (int globalRound = 0; globalRound < iGlobalRoundNumber; ++globalRound) { positionInitialInTermPostingList = 0; for (int termIndex = 0; termIndex < iTermNumber; ++termIndex) { localIndex = threadIdx.x; globalIndex = positionInitialInTermPostingList + iGlobalInitialPosition + localIndex; while(localIndex < DOC_QUANTITY_IN_MEMORY){//(globalIndex < d_iDocNumberByTermList[termIndex] && localIndex < DOC_QUANTITY_IN_MEMORY){ if(globalIndex < d_iDocNumberByTermList[termIndex] + positionInitialInTermPostingList){ postingLists[termIndex].docId[localIndex] = iDocIdList[globalIndex];//[positionInitialInTermPostingList + globalIndex]; postingLists[termIndex].freq[localIndex] = iFreqList[globalIndex]; postingLists[termIndex].docLenght[localIndex] = iDocLenghtList[globalIndex]; } else{ postingLists[termIndex].docId[localIndex] = NO_MORE_DOC; } localIndex += blockDim.x; globalIndex += blockDim.x; } if(THREAD_MASTER){ fingers[termIndex].docId = postingLists[termIndex].docId[0]; fingers[termIndex].position = (fingers[termIndex].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0 ; } positionInitialInTermPostingList += d_iDocNumberByTermList[termIndex]; } __syncthreads(); // if(fingers[0].docId == 16563866) // printf("Oi!"); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER) isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); count++; padding = documentTopk.padding; __syncthreads(); if(isValidCandidate){ //Avaliação Completa if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } __syncthreads(); // if(padding != 0 || thresholdLocal < score){ if(thresholdLocal < score){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId,score,padding); // thresholdLocal = documentTopk.score[0]; } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,DOC_QUANTITY_IN_MEMORY,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, DOC_QUANTITY_IN_MEMORY); } // if(fingers[0].docId == 16563866) // printf("Oi!"); __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); //Select term sharedPivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); } if(THREAD_MASTER){ iGlobalInitialPosition += DOC_QUANTITY_IN_MEMORY; } if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMax(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } __syncthreads(); } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); if(THREAD_MASTER) atomicAdd(&globalCount,count); if(THREAD_MASTER) printf("-----%d----", globalCount); } __global__ void matchWandParallel_VARIABLE_Batch_Block_3(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int* iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions,int *iDocNumberByTermListGlobal){ __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ postingList2 postings[TERM_NUMBER]; __shared__ int positionInShared[TERM_NUMBER]; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; __shared__ short int iTermNumber; int count; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; limitDoc.secondMaxDocId = -1; iTermNumber = iTermNumberByQuery[blockIdx.x]; } __syncthreads(); if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[blockIdx.x]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm]*1.0;//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; positionInShared[threadIdx.x] = -1; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco #pragma unroll 4 for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; globalIndex = 0; int maxDoc; if(THREAD_MASTER) limitDoc.minDocId = 0; globalIndex = docAmount; maxDoc = iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex - 1]; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); fingers[threadIdx.x].docId = iDocIdList[iSharedPositionInitialInList[threadIdx.x]]; fingers[threadIdx.x].position = iSharedPositionInitialInList[threadIdx.x]; } // __syncthreads(); int pos; for (int termId = 0; termId < iTermNumber; ++termId) { for (int localIndex = threadIdx.x; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { pos = fingers[termId].position+localIndex+1; if(pos < iSharedPositionInitialInList[termId] + iDocNumberByTermList[termId]){ postings[termId].docId[localIndex] = iDocIdList[pos]; }else{ postings[termId].docId[localIndex] = NO_MORE_DOC; } } } sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); count++; // if(count == 2559) // printf("Oi"); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { if(THREAD_MASTER && positionInShared[idTerm] == -1) fingers[idTerm].position++; if(docCurrent == fingers[idTerm].docId){ fingers[idTerm].docId = NO_MORE_DOC; int docIdLocal, localIndex=0; for (localIndex = threadIdx.x + positionInShared[idTerm]; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { docIdLocal = postings[idTerm].docId[localIndex]; if(docIdLocal > docCurrent && docIdLocal != NO_MORE_DOC){ if(localIndex == 0 || (postings[idTerm].docId[localIndex-1] <= docCurrent)){ fingers[idTerm].docId = docIdLocal; fingers[idTerm].position += localIndex - positionInShared[idTerm]; positionInShared[idTerm] = localIndex; } break; } } } } } else{ int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[sharedPivot.positionInOrderedList]; if(docCurrent == fingers[threadIdx.x].docId) break; fingers[idTerm].docId = NO_MORE_DOC; if(THREAD_MASTER && positionInShared[idTerm] == -1) fingers[idTerm].position++; int docIdLocal, localIndex=0; for (localIndex = threadIdx.x+positionInShared[idTerm]; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { docIdLocal = postings[idTerm].docId[localIndex]; if(docIdLocal >= docCurrent && docIdLocal != NO_MORE_DOC){ if(localIndex == 0 || (postings[idTerm].docId[localIndex-1] < docCurrent)){ fingers[idTerm].docId = docIdLocal; fingers[idTerm].position += localIndex - positionInShared[idTerm]; positionInShared[idTerm] = localIndex; } break; } } } } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].position != NO_VALID_POSITION){ int pos, localIndex; for (localIndex = threadIdx.x; localIndex < DOC_QUANTITY_IN_MEMORY; localIndex+=blockDim.x) { pos = fingers[termId].position+localIndex+1; if(pos < iSharedPositionInitialInList[termId] + iDocNumberByTermList[termId]){ postings[termId].docId[localIndex] = iDocIdList[pos]; }else{ postings[termId].docId[localIndex] = NO_MORE_DOC; } } if(THREAD_MASTER && postings[termId].docId[0] == NO_MORE_DOC) fingers[termId].position = NO_VALID_POSITION; else{ fingers[termId].docId = postings[termId].docId[0]; positionInShared[termId] = -1; fingers[termId].position = pos; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } if(blockIdx.x==499 && THREAD_MASTER) printf("-----%d %d----", blockIdx.x, count); sortLocalTopkDocAndStoreInGlobal((float*)&(dTopkScoreListGlobal[blockIdx.x*iTopK]),(int*)&(iTopkDocListGlobal[blockIdx.x*iTopK]),iTopK,&documentTopk); // if(THREAD_MASTER) //// atomicAdd(&globalCount,count); //// // if } __global__ void matchWandParallel_VARIABLE_Batch_Block_Test(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int* iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions,int *iDocNumberByTermListGlobal, const int* iOrderQueryList){ __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; __shared__ short int iTermNumber; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ // limitDoc.minDocId = -1; limitDoc.secondMaxDocId = -1; iTermNumber = iTermNumberByQuery[blockIdx.x]; } __syncthreads(); if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[iOrderQueryList[blockIdx.x]]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm];//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; // printf(" %.2f ",dUBlist[threadIdx.x]); iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco #pragma unroll 4 for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; // fingers[threadIdx.x].final = 0; // limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = 0; int maxDoc; if(THREAD_MASTER) limitDoc.minDocId = 0; globalIndex = docAmount-1; maxDoc = iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex]; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); fingers[threadIdx.x].docId = iDocIdList[iSharedPositionInitialInList[threadIdx.x]]; fingers[threadIdx.x].position = iSharedPositionInitialInList[threadIdx.x]; } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; // // if(fingers[sharedPivot.idTerm].docId == 38182) // printf("Oi"); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//Não Válido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; // if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ // fingers[threadIdx.x].docId = NO_MORE_DOC; // fingers[threadIdx.x].position = NO_VALID_POSITION; // } } } } } else{ int pivotDoc = docCurrent; long long position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//Até alcançar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc){ docLocal = iDocIdList[position]; position += blockDim.x; } position -= blockDim.x; if(docLocal < pivotDoc || position >= (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm])){ docLocal = NO_MORE_DOC; position = NO_VALID_POSITION; } // atomicMin(&(fingers[idTerm].docId) , docLocal); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // sortLocalTopkDocAndStoreInGlobal((float*)&(dTopkScoreListGlobal[blockIdx.x*iTopK]),(int*)&(iTopkDocListGlobal[blockIdx.x*iTopK]),iTopK,&documentTopk); int gIndex = blockIdx.x * iTopK + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < iTopK; localIndex+=blockDim.x) { dTopkScoreListGlobal[gIndex] = documentTopk.score[localIndex]; iTopkDocListGlobal[gIndex] = documentTopk.id[localIndex]; gIndex+=blockDim.x; } } __global__ void matchWandParallel_VARIABLE_Batch_Block_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int* iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions,int *iDocNumberByTermListGlobal){ // // if(blockIdx.x!=1) // return; __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; __shared__ short int iTermNumber; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ // limitDoc.minDocId = -1; limitDoc.secondMaxDocId = -1; iTermNumber = iTermNumberByQuery[blockIdx.x]; } __syncthreads(); if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[blockIdx.x]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm];//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; // printf(" %.2f ",dUBlist[threadIdx.x]); iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco #pragma unroll 4 for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; // fingers[threadIdx.x].final = 0; // limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = 0; int maxDoc; if(THREAD_MASTER) limitDoc.minDocId = 0; globalIndex = docAmount-1; maxDoc = iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex]; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); fingers[threadIdx.x].docId = iDocIdList[iSharedPositionInitialInList[threadIdx.x]]; fingers[threadIdx.x].position = iSharedPositionInitialInList[threadIdx.x]; } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; // // if(fingers[sharedPivot.idTerm].docId == 38182) // printf("Oi"); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//Não Válido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; // if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ // fingers[threadIdx.x].docId = NO_MORE_DOC; // fingers[threadIdx.x].position = NO_VALID_POSITION; // } } } } } else{ int pivotDoc = docCurrent; long long position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//Até alcançar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc){ docLocal = iDocIdList[position]; position += blockDim.x; } position -= blockDim.x; if(docLocal < pivotDoc || position >= (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm])){ docLocal = NO_MORE_DOC; position = NO_VALID_POSITION; } // atomicMin(&(fingers[idTerm].docId) , docLocal); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // sortLocalTopkDocAndStoreInGlobal((float*)&(dTopkScoreListGlobal[blockIdx.x*iTopK]),(int*)&(iTopkDocListGlobal[blockIdx.x*iTopK]),iTopK,&documentTopk); int gIndex = blockIdx.x * iTopK + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < iTopK; localIndex+=blockDim.x) { dTopkScoreListGlobal[gIndex] = documentTopk.score[localIndex]; iTopkDocListGlobal[gIndex] = documentTopk.id[localIndex]; gIndex+=blockDim.x; } // if(THREAD_MASTER) //// atomicAdd(&globalCount,count); // // if(THREAD_MASTER) // printf("-----%d %d----", blockIdx.x, count); } __global__ void matchWandParallel_VARIABLE_Batch_Block(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int *iTermNumberByQuery, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iBlockRoundNumber, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrPostingPositions, int* ptrQueryPositions, int *iDocNumberByTermList){ if(blockIdx.x != 4999) return; __shared__ short int iTermNumber; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ long long ptrPostingPositionShared[TERM_NUMBER]; __shared__ int iDocNumberByTermListShared[TERM_NUMBER]; __shared__ int queryPosition; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; // __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; // __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex = 0;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // long long positionInitialInTermPostingList;//int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; iTermNumber = iTermNumberByQuery[blockDim.x]; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; // iGlobalInitialPositionInList = 0;//iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // if(THREAD_MASTER) documentTopk.padding = iTopK; __syncthreads(); //Define o max e o min if(threadIdx.x < iTermNumber){ limitDoc.extraPosition[threadIdx.x] = 0; queryPosition = ptrQueryPositions[blockDim.x]; iDocNumberByTermListShared[threadIdx.x] = iDocNumberByTermList[iQueryTerms[queryPosition + threadIdx.x]]; ptrPostingPositionShared[threadIdx.x] = ptrPostingPositions[iQueryTerms[queryPosition+threadIdx.x]]; int docAmount = iDocNumberByTermListShared[threadIdx.x];//iDocNumberByTermList[threadIdx.x]; // globalIndex = iGlobalInitialPositionInList; int aux, maxDoc; int maxNeighbor; if(THREAD_MASTER) limitDoc.minDocId = 0; // int isTail = globalIndex < docAmount; // globalIndex += iElementQuantityByBlock * iGlobalRoundNumber; // isTail &= globalIndex >= docAmount; globalIndex = docAmount - 1; // int isTail = iElementQuantityByBlock >= docAmount; // // if(isTail){ // globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); // } // maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[ptrPostingPositionShared[threadIdx.x] + globalIndex] - 1 : // -1; maxDoc = iDocIdList[ptrPostingPositionShared[threadIdx.x] + globalIndex]; aux = maxDoc; for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } if(THREAD_MASTER) limitDoc.secondMaxDocId = maxDoc; } // __syncthreads(); // // //Busca faixa de documentos; // for (int internTermId = 0; internTermId < iTermNumber; ++internTermId) { // if(needSearchDocRange[internTermId]) // searchRangeOfDocs_batch(iDocIdList,postingLists, internTermId, iGlobalInitialPositionInList, // &limitDoc,iElementQuantityByBlock,iGlobalRoundNumber, // iDocNumberByTermListShared[internTermId], ptrPostingPositionShared[internTermId]); // } __syncthreads(); //Preenche a memória compartilhada // positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { // globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; globalIndex = threadIdx.x; docAmount = iDocNumberByTermListShared[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[ptrPostingPositionShared[termId] + globalIndex] : NO_MORE_DOC; if(globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[ptrPostingPositionShared[termId] + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[ptrPostingPositionShared[termId] + globalIndex]; globalIndex += blockDim.x; } // positionInitialInTermPostingList += iDocNumberByTermList[termId]; } // __syncthreads(); if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; fingers[threadIdx.x].final = (fingers[threadIdx.x].final == 1) ? 1 : 0; } __syncthreads(); __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER) isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); if(thresholdLocal < score){ thresholdLocal = managerMinValue_v5(&documentTopk, docCurrent, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ // searchMoreDocs_batch(iDocIdList,iFreqList,iDocLenghtList,postingLists, // termId,iGlobalInitialPositionInList,&limitDoc, // iElementQuantityByBlock,&(fingers[termId]),docCurrent, // iDocNumberByTermListShared[termId],ptrPostingPositionShared[termId]); searchMoreDocs_batch(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId, 0, &limitDoc, iElementQuantityByBlock,&(fingers[termId]),docCurrent, iDocNumberByTermListShared[termId],ptrPostingPositionShared[termId]); // // ////// // if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > thresholdGlobal){ //// if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > thresholdGlobal){ // atomicMaxD(&thresholdGlobal,thresholdLocal); // } // // if((documentTopk.padding < (iTopK >> 1))) // if(thresholdLocal < thresholdGlobal){ // thresholdLocal = thresholdGlobal; // } } } // __syncthreads();//Talvez não precise //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads();//Talvez não precise //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // if(THREAD_MASTER){ // int max = iTopK - documentTopk.padding; // int i = 0; // while(i > max){ // if(2*i+2 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+2]) // printf("ERRADO!!!\n"); // // if(2*i+1 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+1]) // printf("ERRADO!!!\n"); // // i++; // }} // __syncthreads(); sortLocalTopkDocAndStoreInGlobal_BLOCK(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // globalIndex = iTopK * blockIdx.x + threadIdx.x + documentTopk.padding; // for (localIndex = threadIdx.x; localIndex < (iTopK - documentTopk.padding) ; localIndex += blockDim.x) { // iTopkDocListGlobal[globalIndex] = documentTopk.id[localIndex]; // dTopkScoreListGlobal[globalIndex] = documentTopk.score[localIndex]; // globalIndex += blockDim.x; // } // __syncthreads(); } __global__ void matchWandParallel_BATCH(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iBlockRoundNumber, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions, int idQuery,int *iDocNumberByTermList){ __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ long long ptrInitPostingListShared[TERM_NUMBER]; __shared__ int iDocNumberByTermListShared[TERM_NUMBER]; __shared__ int queryPosition; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // long long positionInitialInTermPostingList;//int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; iGlobalInitialPositionInList = iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } #ifdef DEBUG if(THREAD_MASTER_2) if(iGlobalInitialPositionInList < 0) printf("Opa!!!!"); #endif //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // if(THREAD_MASTER) documentTopk.padding = iTopK; // __syncthreads(); //Define o max e o min if(threadIdx.x < iTermNumber){ limitDoc.extraPosition[threadIdx.x] = 0; queryPosition = ptrQueryPositions[idQuery]; iDocNumberByTermListShared[threadIdx.x] = iDocNumberByTermList[iQueryTerms[queryPosition + threadIdx.x]]; ptrInitPostingListShared[threadIdx.x] = ptrInitPostingList[iQueryTerms[queryPosition + threadIdx.x]]; int docAmount = iDocNumberByTermListShared[threadIdx.x];//iDocNumberByTermList[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; // positionInitialInTermPostingList = 0; // // for (int i = 0; i < threadIdx.x; ++i) { // positionInitialInTermPostingList += iDocNumberByTermList[iQueryTerms[i]];//iDocNumberByTermList[i]; // } // positionInitialInTermPostingList = ptrPostingPositionShared[threadIdx.x]; int aux, maxDoc; int maxNeighbor; if(blockIdx.x != 0){ int maxDoc = (globalIndex < docAmount) ? iDocIdList[ptrInitPostingListShared[threadIdx.x] + globalIndex - 1] : -1; maxDoc++; aux = maxDoc; atomicMax(&limitDoc.minDocId, maxDoc); // for (int i = 1; i < iTermNumber; ++i) { // maxNeighbor = __shfl(aux,i); // if(maxNeighbor > maxDoc) // maxDoc = maxNeighbor; // } // // if(THREAD_MASTER) limitDoc.minDocId = maxDoc; //atomicExch(&(limitDoc.minDocId), maxDoc); if(aux < limitDoc.minDocId && aux != 0){ needSearchDocRange[threadIdx.x] = 1; limitDoc.extraPosition[threadIdx.x] = NO_MORE_DOC; } }else if(THREAD_MASTER) limitDoc.minDocId = 0; int isTail = globalIndex < docAmount; globalIndex += iElementQuantityByBlock * iGlobalRoundNumber; isTail &= globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[ptrInitPostingListShared[threadIdx.x] + globalIndex] : -1; aux = maxDoc; for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_down_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } if(THREAD_MASTER) limitDoc.secondMaxDocId = maxDoc; } __syncthreads(); //Busca faixa de documentos; for (int internTermId = 0; internTermId < iTermNumber; ++internTermId) { if(needSearchDocRange[internTermId]) searchRangeOfDocs_batch(iDocIdList,postingLists, internTermId, iGlobalInitialPositionInList, &limitDoc,iElementQuantityByBlock,iGlobalRoundNumber, iDocNumberByTermListShared[internTermId], ptrInitPostingListShared[internTermId]); } __syncthreads(); //Preenche a memória compartilhada // positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; docAmount = iDocNumberByTermListShared[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[ptrInitPostingListShared[termId] + globalIndex] : NO_MORE_DOC; if(docLocal > limitDoc.secondMaxDocId || globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[ptrInitPostingListShared[termId] + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[ptrInitPostingListShared[termId] + globalIndex]; globalIndex += blockDim.x; } // positionInitialInTermPostingList += iDocNumberByTermList[termId]; } // __syncthreads(); if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; fingers[threadIdx.x].final = 0; } __syncthreads(); __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER) isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); if(thresholdLocal < score){ thresholdLocal = managerMinValue_v5(&documentTopk, docCurrent, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ searchMoreDocs_batch(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId,iGlobalInitialPositionInList,&limitDoc, iElementQuantityByBlock,&(fingers[termId]),docCurrent, iDocNumberByTermListShared[termId],ptrInitPostingListShared[termId]); // // ////// // if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > thresholdGlobal){ //// if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > thresholdGlobal){ // atomicMaxD(&thresholdGlobal,thresholdLocal); // } // // if((documentTopk.padding < (iTopK >> 1))) // if(thresholdLocal < thresholdGlobal){ // thresholdLocal = thresholdGlobal; // } } } // __syncthreads();//Talvez não precise //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads();//Talvez não precise //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // if(THREAD_MASTER){ // int max = iTopK - documentTopk.padding; // int i = 0; // while(i > max){ // if(2*i+2 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+2]) // printf("ERRADO!!!\n"); // // if(2*i+1 < max) // if(documentTopk.score[i] > documentTopk.score[2*i+1]) // printf("ERRADO!!!\n"); // // i++; // }} // // __syncthreads(); sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // globalIndex = iTopK * blockIdx.x + threadIdx.x + documentTopk.padding; // for (localIndex = threadIdx.x; localIndex < (iTopK - documentTopk.padding) ; localIndex += blockDim.x) { // iTopkDocListGlobal[globalIndex] = documentTopk.id[localIndex]; // dTopkScoreListGlobal[globalIndex] = documentTopk.score[localIndex]; // globalIndex += blockDim.x; // } // __syncthreads(); } __global__ void preProcessingWand(const int* iDocIdList, const short int iTermNumber, const int* iDocNumberByTermList, const int* iInitialPositionPostingList, const int docIdNumberByBlock, int* extraPositions, int* docMaxList){ __shared__ int iGlobalInitialPositionInList; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; // int positionInitialInTermPostingList; __shared__ int sharedMinDoc; __shared__ int sharedMaxDoc; __shared__ int sharedExtraPositions[TERM_NUMBER]; __shared__ int sharedInitialDocId[TERM_NUMBER]; __shared__ int sharedDocNumberByList[TERM_NUMBER]; if(THREAD_MASTER){ iGlobalInitialPositionInList = docIdNumberByBlock * blockIdx.x; } // __syncthreads(); if(threadIdx.x < iTermNumber){ sharedDocNumberByList[threadIdx.x] = iDocNumberByTermList[threadIdx.x]; int docAmount = sharedDocNumberByList[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; // positionInitialInTermPostingList = iInitialPositionPostingList[threadIdx.x]; int maxDoc; // int aux, maxDoc; // int maxNeighbor; if(blockIdx.x != 0){ int maxDoc = (globalIndex < docAmount) ? iDocIdList[iInitialPositionPostingList[threadIdx.x] + globalIndex - 1] : -1; maxDoc++; sharedInitialDocId[threadIdx.x] = maxDoc; // aux = maxDoc; // for (int i = iTermNumber-1; i > 0; --i) { // maxNeighbor = __shfl(aux,i); // if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; // } atomicMax(&sharedMinDoc, maxDoc); // if(THREAD_MASTER) sharedMinDoc = maxDoc; // if(aux < limitDoc.minDocId && aux != 0) // needSearchDocRange[threadIdx.x] = 1; }else sharedMinDoc = 0; int isTail = globalIndex < docAmount; globalIndex += docIdNumberByBlock; isTail &= globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? (iDocIdList[iInitialPositionPostingList[threadIdx.x] + globalIndex]-1) : -1; // aux = maxDoc; // for (int i = 1; i < iTermNumber; ++i) { // maxNeighbor = __shfl(aux,i); // if(maxNeighbor > maxDoc) // maxDoc = maxNeighbor; // } // if(THREAD_MASTER) sharedMaxDoc = maxDoc; atomicMax(&sharedMaxDoc, maxDoc); } __syncthreads(); for (int iTerm = 0; iTerm < iTermNumber; ++iTerm) { if(sharedInitialDocId[iTerm] < sharedMinDoc){ globalIndex = iInitialPositionPostingList[iTerm] + iGlobalInitialPositionInList + threadIdx.x; int docLocal = -1; while (docLocal < sharedMinDoc && globalIndex < sharedDocNumberByList[iTerm]){ docLocal = iDocIdList[globalIndex]; globalIndex += blockDim.x; } globalIndex-= blockDim.x; long long int initialPosition; if(docLocal < sharedMinDoc)//Caso não encontre initialPosition = NO_VALID_POSITION; else initialPosition = globalIndex - iGlobalInitialPositionInList - iInitialPositionPostingList[iTerm]; int positionNeighbor; for (int i = 16; i >= 1; i /= 2) { positionNeighbor = __shfl_down_sync(0xFFFFFFFF,initialPosition, i); if(positionNeighbor < initialPosition) initialPosition = positionNeighbor; } if( ((threadIdx.x & 0x1f) == 0) && initialPosition != NO_MORE_DOC){ atomicMin(&sharedExtraPositions[iTerm] , initialPosition); } //__syncthreads(); // if(THREAD_MASTER){ // globalIndex = iGlobalInitialPositionInList + iElementQuantityByBlock * roundGlobalNumber + threadIdx.x; // globalIndex += limitDoc->extraPosition[termId]; // // // if(globalIndex < iDocNumberByTermList[termId]){ // if(limitDoc->secondMaxDocId < iDocIdList[positionInListGlobal + globalIndex] -1) // limitDoc->secondMaxDocId = iDocIdList[positionInListGlobal + globalIndex]-1; // } // } } if(threadIdx.x < iTermNumber){ extraPositions[iTermNumber*blockIdx.x + threadIdx.x] = sharedExtraPositions[threadIdx.x]; if(THREAD_MASTER) docMaxList[blockIdx.x] = sharedMaxDoc; } } } __global__ void matchWandParallel_VARIABLE_3_Teste(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* d_iDocNumberByTermList, const int* extraPositions, const int* docMaxList){ __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; // __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; // int count = 0; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(threadIdx.x < iTermNumber){ limitDoc.extraPosition[threadIdx.x] = extraPositions[blockIdx.x*iTermNumber + threadIdx.x]; fingers[threadIdx.x].final = 0; } if(THREAD_MASTER){ documentTopk.padding = iTopK; limitDoc.secondMaxDocId = docMaxList[blockIdx.x]; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; iGlobalInitialPositionInList = iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } __syncthreads(); //Preenche a memória compartilhada positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; docAmount = d_iDocNumberByTermList[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : NO_MORE_DOC; if(docLocal > limitDoc.secondMaxDocId || globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[positionInitialInTermPostingList + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[positionInitialInTermPostingList + globalIndex]; globalIndex += blockDim.x; } positionInitialInTermPostingList += d_iDocNumberByTermList[termId]; } // __syncthreads(); if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; // fingers[threadIdx.x].final = 0; } __syncthreads(); __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); } __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ searchMoreDocs(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId,iGlobalInitialPositionInList, &limitDoc,iElementQuantityByBlock, &(fingers[termId]),docCurrent,d_iDocNumberByTermList); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } } } //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); if(THREAD_MASTER && thresholdLocal > globalThreshold){ atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } // if(threadIdx.x == 0) // printf("---------%d----------",count); } __global__ void matchWandParallel_BATCH_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iBlockRoundNumber, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iQueryTerms, const long long* ptrInitPostingList, int* ptrQueryPositions, int idQuery,int *iDocNumberByTermListGlobal){ // // if(idQuery != 18 || blockIdx.x != 0) // return; __shared__ int queryPosition; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; int padding; float thresholdLocal;// = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; // long long positionInitialInTermPostingList; if(thresholdLocal < globalThresholdBatch[idQuery]) thresholdLocal = globalThresholdBatch[idQuery]; // int count = 0; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ limitDoc.minDocId = 0; limitDoc.secondMaxDocId = 0; iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; } if(threadIdx.x < iTermNumber){ queryPosition = ptrQueryPositions[idQuery]; int idTerm = iQueryTerms[queryPosition + threadIdx.x]; fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[idTerm]; dUBlist[threadIdx.x] = dUBlistGlobal[idTerm];//[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[idTerm];//[threadIdx.x]; iSharedPositionInitialInList[threadIdx.x] = ptrInitPostingList[idTerm]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = iGlobalInitialPositionInList; int maxDoc; if(blockIdx.x != 0){ maxDoc = (globalIndex < docAmount) ? iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex - 1] : -1; maxDoc++; atomicMax(&(limitDoc.minDocId), maxDoc); }else{ if(THREAD_MASTER) limitDoc.minDocId = 0; } int isTail = globalIndex < docAmount; globalIndex = globalIndex + DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber; isTail = isTail && globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = ( (isTail || (globalIndex < docAmount)) ? (iDocIdList[iSharedPositionInitialInList[threadIdx.x] + globalIndex-1]) : -1); atomicMax(&(limitDoc.secondMaxDocId), maxDoc); } __syncthreads(); long long pos; int docLocal; for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { pos = iSharedPositionInitialInList[idTerm] + iGlobalInitialPositionInList + threadIdx.x; docLocal = -1; while(pos < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId ){ docLocal = iDocIdList[pos]; pos += blockDim.x; } docLocal = ((docLocal != -1) && (docLocal >= limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId)) ? docLocal : NO_MORE_DOC; pos = (docLocal != NO_MORE_DOC) ? pos-blockDim.x : NO_VALID_POSITION; atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = pos; } } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//Não Válido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; } } } } } else{ int pivotDoc = docCurrent; int position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//Até alcançar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc && docLocal <= limitDoc.secondMaxDocId){ docLocal = iDocIdList[position]; position += blockDim.x; } docLocal = (docLocal >= pivotDoc && docLocal <= limitDoc.secondMaxDocId) ? docLocal : NO_MORE_DOC; position = (docLocal != NO_MORE_DOC) ? position-blockDim.x : NO_VALID_POSITION; __syncthreads(); atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThresholdBatch[idQuery]){ // atomicMaxD(&globalThreshold,thresholdLocal); // atomicMax((unsigned long long int*)&(globalThresholdBatch[idQuery]),(unsigned long long int)thresholdLocal); // atomicMaxD((volatile double*)&(globalThresholdBatch[idQuery]),thresholdLocal); globalThresholdBatch[idQuery] = thresholdLocal; } if(thresholdLocal < globalThresholdBatch[idQuery]){ thresholdLocal = globalThresholdBatch[idQuery]; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThresholdBatch[idQuery]){ // atomicMax((unsigned long long int*)&(globalThresholdBatch[idQuery]),(unsigned long long int)thresholdLocal); // atomicMaxD(((volatile double*)&(globalThresholdBatch[idQuery])),thresholdLocal); globalThresholdBatch[idQuery] = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThresholdBatch[idQuery]){ thresholdLocal = globalThresholdBatch[idQuery]; } } __syncthreads(); } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // if(thresholdLocal > globalThreshold) // globalThreshold = thresholdLocal; // if(THREAD_MASTER) //// atomicAdd(&globalCount,count); // // if(THREAD_MASTER && idQuery == 0) // printf("-----%d %d----", blockIdx.x, count); } __global__ void matchWandParallel_VARIABLE_4_2(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermListGlobal){ // if(blockIdx.x != 720) // return; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ postingList2 postings[TERM_NUMBER]; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; // int count = iTopK; // __shared__ int paddingInShared; int padding; float thresholdLocal = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; limitDoc.minDocId = 0; limitDoc.secondMaxDocId = 0; } if(threadIdx.x < iTermNumber){ // paddingInShared=0; fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[threadIdx.x]; dUBlist[threadIdx.x] = dUBlistGlobal[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[threadIdx.x]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } iSharedPositionInitialInList[threadIdx.x] = positionInitialInTermPostingList; int maxDoc; if(blockIdx.x != 0){ maxDoc = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex - 1] : -1; maxDoc++; atomicMax(&(limitDoc.minDocId), maxDoc); }else{ if(THREAD_MASTER) limitDoc.minDocId = 0; } int isTail = globalIndex < docAmount; globalIndex += DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber - 1; isTail = (isTail && globalIndex >= docAmount); if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : -1; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); } __syncthreads(); long long pos; // int docLocal; for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { pos = iSharedPositionInitialInList[idTerm] + iGlobalInitialPositionInList + threadIdx.x; int docLocal = -1; while(pos < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId ){ docLocal = iDocIdList[pos]; pos += blockDim.x; } docLocal = ( (docLocal != -1) && (docLocal >= limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId)) ? docLocal : NO_MORE_DOC; pos = (docLocal != NO_MORE_DOC) ? pos-blockDim.x : NO_VALID_POSITION; // atomicMin(&(fingers[idTerm].docId) , docLocal); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = pos; } } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].position != NO_VALID_POSITION){ long long gIndex = fingers[termId].position + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < DOCS_TEST; localIndex+=blockDim.x) { if(gIndex < (iSharedPositionInitialInList[termId]+iDocNumberByTermList[termId]) && (iDocIdList[gIndex] <= limitDoc.secondMaxDocId) ){ postings[termId].docId[localIndex] = iDocIdList[gIndex]; postings[termId].freq[localIndex] = iFreqList[gIndex]; postings[termId].docLenght[localIndex] = iDocLenghtList[gIndex]; if(localIndex == 0) postings[termId].positionInShared = 0; } else{ postings[termId].docId[localIndex] = NO_MORE_DOC; if(localIndex == 0) postings[termId].positionInShared = NO_VALID_POSITION; } gIndex += blockDim.x; } } else{ postings[termId].positionInShared = NO_VALID_POSITION; } } sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; // if(fingers[sharedPivot.idTerm].docId==33769946 && THREAD_MASTER) // printf("blockId.x %d!!!\n",blockIdx.x); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(postings[termId].freq[postings[termId].positionInShared], postings[termId].docLenght[postings[termId].positionInShared], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ // if(THREAD_MASTER && fingers[sharedPivot.idTerm].docId==6364669)//&& score == 3.53512168))//40920063 // printf("blockIdx.x %d\n",blockIdx.x); thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); // if(count != 0) count--; } // float test = checkMinHeapProperty(documentTopk,score,fingers[sharedPivot.idTerm].docId,iTopK); // if(count != documentTopk.padding){ // printf("Padding error! count %d | padding %d | blockIdx %d | docId %d\n",count, documentTopk.padding, blockIdx.x, fingers[sharedPivot.idTerm].docId); // } // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi\n"); // return; // } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; int posInShared; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; postings[threadIdx.x].positionInShared++; posInShared = postings[threadIdx.x].positionInShared; if(posInShared >= DOCS_TEST || postings[threadIdx.x].docId[posInShared] == NO_MORE_DOC){ fingers[threadIdx.x].docId = NO_MORE_DOC; if(docPivot == docCurrent) atomicInc((unsigned int*)(&docCurrent),docCurrent); }else{ fingers[threadIdx.x].docId = postings[threadIdx.x].docId[posInShared]; } } } } else{ int pivotDoc = docCurrent; int position; int docLocal; int idTerm; // __syncthreads(); for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == pivotDoc)//Até alcançar um finger q aponte a um documento pivo break; position = postings[idTerm].positionInShared + 1 + threadIdx.x; fingers[idTerm].docId = NO_MORE_DOC; if(position < DOCS_TEST) docLocal = postings[idTerm].docId[position]; else docLocal = NO_MORE_DOC; while( (position < DOCS_TEST) && (docLocal < pivotDoc) ){ docLocal = postings[idTerm].docId[position]; position += blockDim.x; } docLocal = (docLocal > pivotDoc) ? docLocal : NO_MORE_DOC; position = (docLocal != NO_MORE_DOC) ? position-blockDim.x : DOCS_TEST; // __syncthreads(); int docNeighbor, docAux = docLocal; for (int i = 16; i >= 1; i /= 2) { docNeighbor = __shfl_down_sync(0xFFFFFFFF,docAux, i); if(docNeighbor < docAux) docAux = docNeighbor; } if( ((threadIdx.x & 0x1f) == 0)){ atomicMin(&(fingers[idTerm].docId) , docAux); } __syncthreads(); if(fingers[idTerm].docId == docLocal){ if(position != DOCS_TEST){ fingers[idTerm].position += (position-postings[idTerm].positionInShared); postings[idTerm].positionInShared += threadIdx.x + 1; } else { postings[idTerm].positionInShared = DOCS_TEST; // fingers[idTerm].position = NO_VALID_POSITION; } } } } for (int termId = 0; termId < iTermNumber; ++termId) { long long gIndex; int count=0,isValid=0, docLocal, isOutRange=0; if(postings[termId].positionInShared >= DOCS_TEST && postings[termId].positionInShared != NO_VALID_POSITION){ gIndex = fingers[termId].position + threadIdx.x; for (int localIndex = threadIdx.x; localIndex < DOCS_TEST; localIndex+=blockDim.x) { count=0;isValid=0;isOutRange=0; do{ isOutRange = gIndex >= (iSharedPositionInitialInList[termId]+iDocNumberByTermList[termId]); docLocal = (!isOutRange) ? iDocIdList[gIndex] : NO_MORE_DOC; isOutRange = isOutRange || (docLocal > limitDoc.secondMaxDocId); isValid = isOutRange || (docLocal >= docCurrent); // count = __syncthreads_count(!isValid); count = __ballot_sync(0xFFFFFFFF,!isValid); count = __popc(count); // if((threadIdx.x & 0x1f) == 0){ // atomicAdd(&paddingInShared,count); // } // __syncthreads(); // count = paddingInShared; gIndex += count; if(localIndex == 0) fingers[termId].position += count; }while(count != 0); if(!isOutRange){ postings[termId].docId[localIndex] = docLocal; postings[termId].freq[localIndex] = iFreqList[gIndex]; postings[termId].docLenght[localIndex] = iDocLenghtList[gIndex]; if(localIndex == 0) postings[termId].positionInShared = 0; } else{ postings[termId].docId[localIndex] = NO_MORE_DOC; if(localIndex == 0) postings[termId].positionInShared = NO_VALID_POSITION; } gIndex += blockDim.x; } if(threadIdx.x == 0){ fingers[termId].docId = postings[termId].docId[0]; } // paddingInShared=0; // __syncthreads(); } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold=thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold=thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } }//Fim do WAND - pivot = NO_MORE_DOC // for (int i = blockIdx.x*iTopK+threadIdx.x; i < blockIdx.x*iTopK; i+= blockDim.x) { // printf("---%d %d---",blockIdx.x,iTopkDocListGlobal[i]); // } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // __syncthreads(); // float test = checkSorting(documentTopk, dTopkScoreListGlobal, iTopkDocListGlobal, iTopK); // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi no Sorting!\n"); // return; // } // if(thresholdLocal > globalThreshold) // thresholdGlobal = thresholdLocal; // if(THREAD_MASTER) // atomicAdd(&globalCount,count); ////// // if(THREAD_MASTER) // printf("-----%d----", globalCount); } __global__ void matchWandParallel_VARIABLE_4(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermListGlobal){ // if(blockIdx.x != 1104) // return; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; __shared__ long long iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ limitDocId limitDoc; // int count = iTopK; int padding; float thresholdLocal = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; // }else if(THREAD_MASTER_2){ iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; limitDoc.minDocId = 0; limitDoc.secondMaxDocId = 0; } if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[threadIdx.x]; dUBlist[threadIdx.x] = dUBlistGlobal[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[threadIdx.x]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // __syncthreads(); // if(blockIdx.x == 83 && THREAD_MASTER) // printf("Oi! \n"); //Define o max e o min if(threadIdx.x < iTermNumber){ int docAmount = iDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } iSharedPositionInitialInList[threadIdx.x] = positionInitialInTermPostingList; int maxDoc; if(blockIdx.x != 0){ maxDoc = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex - 1] : -1; maxDoc++; atomicMax(&(limitDoc.minDocId), maxDoc); }else{ if(THREAD_MASTER) limitDoc.minDocId = 0; } int isTail = globalIndex < docAmount; globalIndex += DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber - 1; isTail = (isTail && globalIndex >= docAmount); if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : -1; atomicMax(&(limitDoc.secondMaxDocId), maxDoc); } __syncthreads(); long long pos; int docLocal; for (int idTerm = 0; idTerm < iTermNumber; ++idTerm) { pos = iSharedPositionInitialInList[idTerm] + iGlobalInitialPositionInList + threadIdx.x; docLocal = -1; while(pos < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId ){ docLocal = iDocIdList[pos]; pos += blockDim.x; } docLocal = ( (docLocal != -1) && (docLocal >= limitDoc.minDocId && docLocal <= limitDoc.secondMaxDocId)) ? docLocal : NO_MORE_DOC; pos = (docLocal != NO_MORE_DOC) ? pos-blockDim.x : NO_VALID_POSITION; atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = pos; } } sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.0); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ if(THREAD_MASTER && fingers[sharedPivot.idTerm].docId==46517642)//&& score == 3.53512168))//40920063 printf("blockIdx.x %d\n",blockIdx.x); thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); // if(count != 0) count--; } // float test = checkMinHeapProperty(documentTopk,score,fingers[sharedPivot.idTerm].docId,iTopK); // if(count != documentTopk.padding){ // printf("Padding error! count %d | padding %d | blockIdx %d | docId %d\n",count, documentTopk.padding, blockIdx.x, fingers[sharedPivot.idTerm].docId); // } // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi\n"); // return; // } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= (iDocNumberByTermList[threadIdx.x]+iSharedPositionInitialInList[threadIdx.x])){//Não Válido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; if(fingers[threadIdx.x].docId > limitDoc.secondMaxDocId){ fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; } } } } } else{ int pivotDoc = docCurrent; long long position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//Até alcançar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < (iSharedPositionInitialInList[idTerm]+iDocNumberByTermList[idTerm]) && docLocal < pivotDoc && docLocal <= limitDoc.secondMaxDocId){ docLocal = iDocIdList[position]; position += blockDim.x; } docLocal = (docLocal >= pivotDoc && docLocal <= limitDoc.secondMaxDocId) ? docLocal : NO_MORE_DOC; position = (docLocal != NO_MORE_DOC) ? position-blockDim.x : NO_VALID_POSITION; __syncthreads(); atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold = thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } } // for (int i = blockIdx.x*iTopK+threadIdx.x; i < blockIdx.x*iTopK; i+= blockDim.x) { // printf("---%d %d---",blockIdx.x,iTopkDocListGlobal[i]); // } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // float test = checkSorting(documentTopk, dTopkScoreListGlobal, iTopkDocListGlobal, iTopK); // // int result = __syncthreads_or(test != 0.0); // if(THREAD_MASTER && result != 0){ // printf("Oi no Sorting!\n"); // return; // } // if(thresholdLocal > globalThreshold) // thresholdGlobal = thresholdLocal; // if(THREAD_MASTER) // atomicAdd(&globalCount,count); ////// // if(THREAD_MASTER) // printf("-----%d----", globalCount); } __global__ void matchWandParallel_FIXED_3(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlistGlobal, const float *dIdfListGlobal, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLengthGlobal, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermListGlobal){ // if(blockIdx.x != 0) // return; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; // __shared__ long long int iSharedPositionInitialInList[TERM_NUMBER]; __shared__ float dUBlist[TERM_NUMBER]; __shared__ float dIdfList[TERM_NUMBER]; __shared__ float dAverageDocumentLength; __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ long long finalPositions[TERM_NUMBER]; // int count =0; int padding; float thresholdLocal = iInitialThreshold; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; long long int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; dAverageDocumentLength = dAverageDocumentLengthGlobal; iGlobalInitialPositionInList = DOC_QUANTITY_IN_MEMORY * blockIdx.x * iGlobalRoundNumber; } if(threadIdx.x < iTermNumber){ iDocNumberByTermList[threadIdx.x] = iDocNumberByTermListGlobal[threadIdx.x]; dUBlist[threadIdx.x] = dUBlistGlobal[threadIdx.x]; dIdfList[threadIdx.x] = dIdfListGlobal[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } // iSharedPositionInitialInList[threadIdx.x] = positionInitialInTermPostingList; fingers[threadIdx.x].position = positionInitialInTermPostingList + globalIndex; if(fingers[threadIdx.x].position < (positionInitialInTermPostingList+iDocNumberByTermList[threadIdx.x])){ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; }else{ fingers[threadIdx.x].position = NO_VALID_POSITION; fingers[threadIdx.x].docId = NO_PIVOT_TERM; } finalPositions[threadIdx.x] = positionInitialInTermPostingList + globalIndex + DOC_QUANTITY_IN_MEMORY * iGlobalRoundNumber; if(finalPositions[threadIdx.x] >= (positionInitialInTermPostingList+iDocNumberByTermList[threadIdx.x])) finalPositions[threadIdx.x] = positionInitialInTermPostingList+iDocNumberByTermList[threadIdx.x]; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } __syncthreads(); sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; // }else if(THREAD_MASTER_2){ score = 0.0; } __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); // count++; if(isValidCandidate){ if(threadIdx.x < iTermNumber){ int termId = iOrderedTermSharedList[threadIdx.x]; float scoreL = 0.0; if(fingers[termId].docId == fingers[sharedPivot.idTerm].docId){ scoreL = scoreTf_Idf(iFreqList[fingers[termId].position], iDocLenghtList[fingers[termId].position], dIdfList[termId],dAverageDocumentLength,1.1); } float aux = 0; for (int i = 0; i < TERM_NUMBER; ++i) { aux += __shfl_sync(0xFFFFFFFF,scoreL,i); } if(THREAD_MASTER) score = aux; // atomicAdd(&score,scoreL); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(threadIdx.x < iTermNumber ){ int docPivot = fingers[sharedPivot.idTerm].docId; if(fingers[threadIdx.x].docId == docPivot){ fingers[threadIdx.x].position++; if(fingers[threadIdx.x].position >= finalPositions[threadIdx.x] ){//Não Válido fingers[threadIdx.x].docId = NO_MORE_DOC; fingers[threadIdx.x].position = NO_VALID_POSITION; }else{ fingers[threadIdx.x].docId = iDocIdList[fingers[threadIdx.x].position]; } } } } else{ int pivotDoc = docCurrent; long long int position; int docLocal; int idTerm; for (int j = 0; j < sharedPivot.positionInOrderedList; ++j) { idTerm = iOrderedTermSharedList[j]; if(fingers[idTerm].docId == fingers[sharedPivot.idTerm].docId)//Até alcançar um finger q aponte a um documento pivo break; fingers[idTerm].docId = NO_MORE_DOC; position = fingers[idTerm].position + 1 + threadIdx.x; docLocal = -1; while(position < finalPositions[idTerm] && docLocal < pivotDoc){ docLocal = iDocIdList[position]; position += blockDim.x; } position -= blockDim.x; if((docLocal < pivotDoc || position >= finalPositions[idTerm])){ docLocal = NO_MORE_DOC; position = NO_VALID_POSITION; } __syncthreads(); atomicMin(&(fingers[idTerm].docId) , docLocal); __syncthreads(); if(fingers[idTerm].docId == docLocal){ fingers[idTerm].position = position; } } } __syncthreads(); //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); //Select term pivot if(THREAD_MASTER){ selectTermPivot_No_SharedMemory(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); globalThreshold = thresholdLocal; } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ // atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); // atomicMaxD(&globalThreshold,thresholdLocal); globalThreshold = thresholdLocal; } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // if(thresholdLocal > globalThreshold) // globalThreshold = thresholdLocal; // if(THREAD_MASTER) // atomicAdd(&globalCount,count); //// // if(THREAD_MASTER) // printf("-----%d----", globalCount); } __global__ void matchWandParallel_VARIABLE_3(const int* iDocIdList, const unsigned short int* iFreqList, const float *dUBlist, const float *dIdfList, const int *iDocLenghtList, const short int iTermNumber, int *iTopkDocListGlobal, float *dTopkScoreListGlobal, const float dAverageDocumentLength, const int iGlobalRoundNumber, const short int iTopK, const float iInitialThreshold, const int* iDocNumberByTermList){ // if(blockIdx.x != 1687) // return; // int count = 0; __shared__ pivot sharedPivot; __shared__ finger fingers[TERM_NUMBER]; __shared__ postingList postingLists[TERM_NUMBER]; __shared__ documentTopkList documentTopk; __shared__ unsigned int iOrderedTermSharedList[TERM_NUMBER]; // __shared__ float dUBlist[TERM_NUMBER]; // __shared__ int iDocNumberByTermList[TERM_NUMBER]; __shared__ int iGlobalInitialPositionInList; __shared__ unsigned short int iElementQuantityByBlock; __shared__ float score; __shared__ bool isValidCandidate; __shared__ int docCurrent; __shared__ short int needSearchDocRange[TERM_NUMBER]; __shared__ limitDocId limitDoc; float thresholdLocal;// = iInitialThreshold; // int count = 0; thresholdLocal = iInitialThreshold; int globalIndex;// = iInitialPositionGlobal + threadIdx.x; int localIndex; int positionInitialInTermPostingList; if(thresholdLocal < globalThreshold) thresholdLocal = globalThreshold; if(THREAD_MASTER){ documentTopk.padding = iTopK; }else if(THREAD_MASTER_2){ iElementQuantityByBlock = DOC_QUANTITY_IN_MEMORY;//iBlockRoundNumber * DOC_QUANTITY_IN_MEMORY; iGlobalInitialPositionInList = iElementQuantityByBlock * blockIdx.x * iGlobalRoundNumber; } //Inicializa a lista de Score e Documentos dos Topk //Considero que o Top_K seja um número múltiplo do tamanho do bloco for (localIndex = threadIdx.x; localIndex < iTopK; localIndex += blockDim.x) { documentTopk.id[localIndex] = -1; documentTopk.score[localIndex] = 0.0; } // if(THREAD_MASTER) documentTopk.padding = iTopK; __syncthreads(); //Define o max e o min if(threadIdx.x < iTermNumber){ // iDocNumberByTermList[threadIdx.x] = globalDocNumberByTermList[threadIdx.x]; fingers[threadIdx.x].final = 0; limitDoc.extraPosition[threadIdx.x] = 0; // dUBlist[threadIdx.x] = dUBlistGlobal[blockIdx.x * iTermNumber + threadIdx.x]; int docAmount = iDocNumberByTermList[threadIdx.x]; globalIndex = iGlobalInitialPositionInList; positionInitialInTermPostingList = 0; for (int i = 0; i < threadIdx.x; ++i) { positionInitialInTermPostingList += iDocNumberByTermList[i]; } // if(threadIdx.x == 0 && blockIdx.x == 1687){ // printf("Oi"); // } int aux, maxDoc; int maxNeighbor; if(blockIdx.x != 0){ int maxDoc = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex - 1] : -1; maxDoc++; aux = maxDoc; // atomicMax(&limitDoc.minDocId, maxDoc); // __syncwarp(0xFFFFFFFF); for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } // if(THREAD_MASTER) { limitDoc.minDocId = maxDoc; //atomicExch(&(limitDoc.minDocId), maxDoc); } __syncwarp(0xFFFFFFFF); if(aux < limitDoc.minDocId && aux != 0) needSearchDocRange[threadIdx.x] = 1; }else if(THREAD_MASTER) limitDoc.minDocId = 0; int isTail = globalIndex < docAmount; globalIndex += iElementQuantityByBlock * iGlobalRoundNumber; isTail &= globalIndex >= docAmount; if(isTail){ globalIndex = iGlobalInitialPositionInList + (docAmount - iGlobalInitialPositionInList - 1); } maxDoc = (isTail || globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] - 1 : -1; aux = maxDoc; for (int i = 1; i < iTermNumber; ++i) { maxNeighbor = __shfl_sync(0xFFFFFFFF,aux,i); if(maxNeighbor > maxDoc) maxDoc = maxNeighbor; } if(THREAD_MASTER) limitDoc.secondMaxDocId = maxDoc; } __syncthreads(); //Busca faixa de documentos; for (int termId = 0; termId < iTermNumber; ++termId) { if(needSearchDocRange[termId]) searchRangeOfDocs(iDocIdList,postingLists, termId, iGlobalInitialPositionInList, &limitDoc, iElementQuantityByBlock,iGlobalRoundNumber,iDocNumberByTermList); } __syncthreads(); //Preenche a memória compartilhada positionInitialInTermPostingList = 0; int docLocal, docAmount; for (int termId = 0; termId < iTermNumber; ++termId) { globalIndex = iGlobalInitialPositionInList + limitDoc.extraPosition[termId] + threadIdx.x; docAmount = iDocNumberByTermList[termId]; docLocal = -1; for (localIndex = threadIdx.x; localIndex < iElementQuantityByBlock; localIndex+=blockDim.x) { docLocal = (globalIndex < docAmount) ? iDocIdList[positionInitialInTermPostingList + globalIndex] : NO_MORE_DOC; if(docLocal > limitDoc.secondMaxDocId || globalIndex > docAmount){ postingLists[termId].docId[localIndex] = NO_MORE_DOC; fingers[termId].final = 1; break; } postingLists[termId].docId[localIndex] = docLocal; postingLists[termId].docLenght[localIndex] = iDocLenghtList[positionInitialInTermPostingList + globalIndex]; postingLists[termId].freq[localIndex] = iFreqList[positionInitialInTermPostingList + globalIndex]; globalIndex += blockDim.x; } positionInitialInTermPostingList += iDocNumberByTermList[termId]; } if(threadIdx.x < iTermNumber){ fingers[threadIdx.x].docId = postingLists[threadIdx.x].docId[0]; fingers[threadIdx.x].position = (fingers[threadIdx.x].docId == NO_MORE_DOC) ? NO_VALID_POSITION : 0; // fingers[threadIdx.x].final = 0 | fingers[threadIdx.x].final; } // if(threadIdx.x == 0 && blockIdx.x == 3430){ // printf("Oi"); // } __syncthreads(); // __shared__ int docCurrent; sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads(); if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; }else if(THREAD_MASTER_2){ score = 0.0; } int padding; int threadIdInWarp = (threadIdx.x & 0x1f); int idWarp = ((blockDim.x >> 5) == 1 ) ? 1 : threadIdx.x >> 5; __syncthreads(); while((sharedPivot.positionInOrderedList < iTermNumber) && (sharedPivot.idTerm < iTermNumber)){ if(THREAD_MASTER){ isValidCandidate = (fingers[sharedPivot.idTerm].docId == fingers[iOrderedTermSharedList[0]].docId); } __syncthreads(); if(isValidCandidate){ if(threadIdx.x < iTermNumber){ fullScore_3_1(&score, fingers[sharedPivot.idTerm].docId, iOrderedTermSharedList, fingers,postingLists, dIdfList, dAverageDocumentLength); } padding = documentTopk.padding; __syncthreads(); /* If the heap is not full the candidate is inserted into the heap. If the heap is full and the new score is larger than the minimum score in the heap, the new document is inserted into the heap, replacing the one with the minimum score. */ if(padding != 0 || thresholdLocal < score ){ thresholdLocal = managerMinValue_v5(&documentTopk, fingers[sharedPivot.idTerm].docId, score,padding); } if(idWarp == 1 && threadIdInWarp < iTermNumber ){ advancePivoTermFinger_4(sharedPivot,fingers, postingLists,iElementQuantityByBlock,threadIdInWarp); } } else{ advanceDocIdOfPredecessorTerm_4(postingLists, iOrderedTermSharedList, fingers,sharedPivot,fingers[sharedPivot.idTerm].docId, iElementQuantityByBlock); } __syncthreads(); for (int termId = 0; termId < iTermNumber; ++termId) { if(fingers[termId].docId == NO_MORE_DOC && fingers[termId].final == 0){ // if(termId == 0) count++; // // if(blockIdx.x == 27 && count == 48 && THREAD_MASTER) // printf("Oi!"); searchMoreDocs(iDocIdList,iFreqList,iDocLenghtList,postingLists, termId,iGlobalInitialPositionInList, &limitDoc,iElementQuantityByBlock, &(fingers[termId]),docCurrent,iDocNumberByTermList); //#endif if (SHAREDTHESHOLD == 1){//SHARED_READ if(THREAD_MASTER && documentTopk.padding == 0 && thresholdLocal > globalThreshold){ // atomicMaxD(&globalThreshold,thresholdLocal); atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } }else if (SHAREDTHESHOLD == 2){ //TSHARED_WRITEREAD if(THREAD_MASTER && (documentTopk.padding < (iTopK >> 1)) && thresholdLocal > globalThreshold){ atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); // atomicMaxD(&globalThreshold,thresholdLocal); } if((documentTopk.padding < (iTopK >> 1))) if(thresholdLocal < globalThreshold){ thresholdLocal = globalThreshold; } } //#endif } } // __syncthreads();//Talvez não precise //Sort the terms in non decreasing order of DID sortingTerms_2(fingers, iOrderedTermSharedList, iTermNumber); // __syncthreads();//Talvez não precise //Select term pivot if(THREAD_MASTER){ selectTermPivot_2(&sharedPivot,iOrderedTermSharedList,fingers,dUBlist,iTermNumber,thresholdLocal); docCurrent = (sharedPivot.idTerm != NO_PIVOT_TERM) ? fingers[sharedPivot.idTerm].docId : NO_MORE_DOC; score = 0.0; } __syncthreads(); } // if(threadIdx.x == 0){// && blockIdx.x == 1687){ // printf("----%d %d----",blockIdx.x,count); // } sortLocalTopkDocAndStoreInGlobal(dTopkScoreListGlobal,iTopkDocListGlobal,iTopK,&documentTopk); // globalIndex = iTopK * blockIdx.x + threadIdx.x + documentTopk.padding; // for (localIndex = threadIdx.x; localIndex < (iTopK - documentTopk.padding) ; localIndex += blockDim.x) { // iTopkDocListGlobal[globalIndex] = documentTopk.id[localIndex]; // dTopkScoreListGlobal[globalIndex] = documentTopk.score[localIndex]; // globalIndex += blockDim.x; // } // __syncthreads(); if(THREAD_MASTER && thresholdLocal > globalThreshold){ atomicMax((unsigned long long int*)&globalThreshold,(unsigned long long int)thresholdLocal); } }
63ea9fd9630d8cdbf58d54b37f7ecc70397bbc1e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string> #include <iostream> #include <fstream> #include <math.h> #include <stdlib.h> #include <limits> #include <ctime> #include <string> #include <hip/hip_runtime.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> using namespace std; #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ cout << "Error: "<<__FILE__<< " : "<<__LINE__ << endl; \ cout << "code: "<<error << ", reason: " <<hipGetErrorString(error)<<endl; \ exit(1); \ } \ } #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float fSum = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) { ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; } else { ds_M[ty][tx] = 0; } if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) { ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; } else { ds_N[ty][tx] = 0; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { fSum += ds_M[ty][k] * ds_N[k][tx]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[Row*numCColumns+Col] = fSum; } } __host__ void gpuMultShared(float *h_A, float *h_B, float *h_C, const int A_rows, const int A_cols,const int B_rows,const int B_cols) { float *d_A, *d_B, *d_C; int C_rows,C_cols; const int Matrix_A_SizeInBytes = A_rows*A_cols*sizeof(float); const int Matrix_B_SizeInBytes = A_cols*B_cols*sizeof(float); const int Matrix_C_SizeInBytes = A_rows*B_cols*sizeof(float); C_rows = A_rows; C_cols = B_cols; hipEvent_t kernel_start; hipEvent_t kernel_stop; CHECK(hipEventCreate(&kernel_start)); CHECK(hipEventCreate(&kernel_stop)); //Allocate device memory on the global memory CHECK(hipMalloc((void**)&d_A, Matrix_A_SizeInBytes)); CHECK(hipMalloc((void**)&d_B, Matrix_B_SizeInBytes)); CHECK(hipMalloc((void**)&d_C, Matrix_C_SizeInBytes)); //transfer data from CPU Memory to GPU Memory CHECK(hipMemcpy(d_A, h_A, Matrix_A_SizeInBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, h_B, Matrix_B_SizeInBytes, hipMemcpyHostToDevice)); dim3 dimGrid((C_cols-1)/TILE_WIDTH+1, (C_rows-1)/TILE_WIDTH+1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //Launch the GPU Kernel here CHECK(hipEventRecord(kernel_start)); hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols, C_rows, C_cols); hipDeviceSynchronize(); CHECK(hipEventRecord(kernel_stop)); CHECK(hipMemcpy(h_C, d_C, Matrix_C_SizeInBytes, hipMemcpyDeviceToHost)); CHECK(hipFree(d_A)); CHECK(hipFree(d_B)); CHECK(hipFree(d_C)); CHECK(hipEventDestroy(kernel_start)); CHECK(hipEventDestroy(kernel_stop)); }
63ea9fd9630d8cdbf58d54b37f7ecc70397bbc1e.cu
#include <stdio.h> #include <string> #include <iostream> #include <fstream> #include <math.h> #include <stdlib.h> #include <limits> #include <ctime> #include <string> #include <cuda.h> #include <math.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> using namespace std; #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ cout << "Error: "<<__FILE__<< " : "<<__LINE__ << endl; \ cout << "code: "<<error << ", reason: " <<cudaGetErrorString(error)<<endl; \ exit(1); \ } \ } #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float fSum = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) { ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; } else { ds_M[ty][tx] = 0; } if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) { ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; } else { ds_N[ty][tx] = 0; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { fSum += ds_M[ty][k] * ds_N[k][tx]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[Row*numCColumns+Col] = fSum; } } __host__ void gpuMultShared(float *h_A, float *h_B, float *h_C, const int A_rows, const int A_cols,const int B_rows,const int B_cols) { float *d_A, *d_B, *d_C; int C_rows,C_cols; const int Matrix_A_SizeInBytes = A_rows*A_cols*sizeof(float); const int Matrix_B_SizeInBytes = A_cols*B_cols*sizeof(float); const int Matrix_C_SizeInBytes = A_rows*B_cols*sizeof(float); C_rows = A_rows; C_cols = B_cols; cudaEvent_t kernel_start; cudaEvent_t kernel_stop; CHECK(cudaEventCreate(&kernel_start)); CHECK(cudaEventCreate(&kernel_stop)); //Allocate device memory on the global memory CHECK(cudaMalloc((void**)&d_A, Matrix_A_SizeInBytes)); CHECK(cudaMalloc((void**)&d_B, Matrix_B_SizeInBytes)); CHECK(cudaMalloc((void**)&d_C, Matrix_C_SizeInBytes)); //transfer data from CPU Memory to GPU Memory CHECK(cudaMemcpy(d_A, h_A, Matrix_A_SizeInBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, h_B, Matrix_B_SizeInBytes, cudaMemcpyHostToDevice)); dim3 dimGrid((C_cols-1)/TILE_WIDTH+1, (C_rows-1)/TILE_WIDTH+1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //Launch the GPU Kernel here CHECK(cudaEventRecord(kernel_start)); matrixMultiply<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols, C_rows, C_cols); cudaThreadSynchronize(); CHECK(cudaEventRecord(kernel_stop)); CHECK(cudaMemcpy(h_C, d_C, Matrix_C_SizeInBytes, cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_A)); CHECK(cudaFree(d_B)); CHECK(cudaFree(d_C)); CHECK(cudaEventDestroy(kernel_start)); CHECK(cudaEventDestroy(kernel_stop)); }
639d3aed38ee9dddafc0ec4f27d43de48f551c37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } #include "newhalf.hpp" #include <hip/hip_fp16.h> #include "half_operator_overload.cuh" __global__ void hotspotOpt1(__half *p, __half* tIn, __half *tOut, __half sdc, int nx, int ny, int nz, __half ce_t, __half cw_t, __half cn_t, __half cs_t, __half ct_t, __half cb_t, __half cc_t) { __half ce = ce_t; __half cw = cw_t; __half cn = cn_t; __half cs = cs_t; __half ct = ct_t; __half cb = cb_t; __half cc = cc_t; __half amb_temp = __float2half(80.0); int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //add in for performance measurement for(int run = 0; run <100; run++){ int c = i + j * nx; int xy = nx * ny; int W = (i == 0) ? c : c - 1; int E = (i == nx-1) ? c : c + 1; int N = (j == 0) ? c : c - nx; int S = (j == ny-1) ? c : c + nx; __half temp1, temp2, temp3; temp1 = temp2 = tIn[c]; temp3 = tIn[c+xy]; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; c += xy; W += xy; E += xy; N += xy; S += xy; for (int k = 1; k < nz-1; ++k) { temp1 = temp2; temp2 = temp3; temp3 = tIn[c+xy]; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; c += xy; W += xy; E += xy; N += xy; S += xy; } temp1 = temp2; temp2 = temp3; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; } //endof loop return; } void hotspot_opt1(float *p, float *tIn, float *tOut, int nx, int ny, int nz, float Cap, float Rx, float Ry, float Rz, float dt, int numiter) { float ce, cw, cn, cs, ct, cb, cc; float stepDivCap = dt / Cap; half_float::half* p_half; half_float::half* tIn_half; half_float::half* tOut_half; size_t half_size = sizeof(half)* nx * ny * nz; p_half = (half_float::half*)malloc(half_size); tIn_half =(half_float::half*)malloc(half_size); tOut_half = (half_float::half*)malloc(half_size); ce = cw =stepDivCap/ Rx; cn = cs =stepDivCap/ Ry; ct = cb =stepDivCap/ Rz; cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct); size_t s = sizeof(half) * nx * ny * nz; for(int i = 0; i < nx * ny * nz; i++ ){ p_half[i] = p[i]; tIn_half[i] = tIn[i]; } __half *tIn_d, *tOut_d, *p_d; printf ("val: %f %f %f %f %f %f %f %f \n",stepDivCap,ce,cw,cn,ct,cb,cc); hipMalloc((void**)&p_d,s); hipMalloc((void**)&tIn_d,s); hipMalloc((void**)&tOut_d,s); hipMemcpy(tIn_d, tIn_half, s, hipMemcpyHostToDevice); hipMemcpy(p_d, p_half, s, hipMemcpyHostToDevice); hipFuncSetCacheConfig(hotspotOpt1, hipFuncCachePreferL1); dim3 block_dim(64, 4, 1); dim3 grid_dim(nx / 64, ny / 4, 1); half_float::half stepDivCap_half = half_float::half(stepDivCap); __half stdc = *(__half*)&(stepDivCap_half); half_float::half ce_half = half_float::half(ce); __half ce_dev = *(__half*)&ce_half; half_float::half cw_half = half_float::half(cw); __half cw_dev = *(__half*)&cw_half; half_float::half cn_half = half_float::half(cn); __half cn_dev = *(__half*)&cn_half; half_float::half cs_half = half_float::half(cs); __half cs_dev = *(__half*)&cs_half; half_float::half ct_half = half_float::half(ct); __half ct_dev = *(__half*)&ct_half; half_float::half cb_half = half_float::half(cb); __half cb_dev = *(__half*)&cb_half; half_float::half cc_half = half_float::half(cc); __half cc_dev = *(__half*)&cc_half; long long start = get_time(); for (int i = 0; i < numiter; ++i) { hipLaunchKernelGGL(( hotspotOpt1), dim3(grid_dim), dim3(block_dim), 0, 0, p_d, tIn_d, tOut_d, stdc, nx, ny, nz, ce_dev, cw_dev, cn_dev, cs_dev, ct_dev, cb_dev, cc_dev); __half *t = tIn_d; tIn_d = tOut_d; tOut_d = t; } hipDeviceSynchronize(); long long stop = get_time(); float time = (float)((stop - start)/(1000.0 * 1000.0)); printf("Time: %.3f (s)\n",time); hipMemcpy(tOut_half, tOut_d, s, hipMemcpyDeviceToHost); for(int i = 0; i < nx * ny * nz; i++ ){ tOut [i] = float(tOut_half[i]); } hipFree(p_d); hipFree(tIn_d); hipFree(tOut_d); return; }
639d3aed38ee9dddafc0ec4f27d43de48f551c37.cu
long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } #include "newhalf.hpp" #include <cuda_fp16.h> #include "half_operator_overload.cuh" __global__ void hotspotOpt1(__half *p, __half* tIn, __half *tOut, __half sdc, int nx, int ny, int nz, __half ce_t, __half cw_t, __half cn_t, __half cs_t, __half ct_t, __half cb_t, __half cc_t) { __half ce = ce_t; __half cw = cw_t; __half cn = cn_t; __half cs = cs_t; __half ct = ct_t; __half cb = cb_t; __half cc = cc_t; __half amb_temp = __float2half(80.0); int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //add in for performance measurement for(int run = 0; run <100; run++){ int c = i + j * nx; int xy = nx * ny; int W = (i == 0) ? c : c - 1; int E = (i == nx-1) ? c : c + 1; int N = (j == 0) ? c : c - nx; int S = (j == ny-1) ? c : c + nx; __half temp1, temp2, temp3; temp1 = temp2 = tIn[c]; temp3 = tIn[c+xy]; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; c += xy; W += xy; E += xy; N += xy; S += xy; for (int k = 1; k < nz-1; ++k) { temp1 = temp2; temp2 = temp3; temp3 = tIn[c+xy]; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; c += xy; W += xy; E += xy; N += xy; S += xy; } temp1 = temp2; temp2 = temp3; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; } //endof loop return; } void hotspot_opt1(float *p, float *tIn, float *tOut, int nx, int ny, int nz, float Cap, float Rx, float Ry, float Rz, float dt, int numiter) { float ce, cw, cn, cs, ct, cb, cc; float stepDivCap = dt / Cap; half_float::half* p_half; half_float::half* tIn_half; half_float::half* tOut_half; size_t half_size = sizeof(half)* nx * ny * nz; p_half = (half_float::half*)malloc(half_size); tIn_half =(half_float::half*)malloc(half_size); tOut_half = (half_float::half*)malloc(half_size); ce = cw =stepDivCap/ Rx; cn = cs =stepDivCap/ Ry; ct = cb =stepDivCap/ Rz; cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct); size_t s = sizeof(half) * nx * ny * nz; for(int i = 0; i < nx * ny * nz; i++ ){ p_half[i] = p[i]; tIn_half[i] = tIn[i]; } __half *tIn_d, *tOut_d, *p_d; printf ("val: %f %f %f %f %f %f %f %f \n",stepDivCap,ce,cw,cn,ct,cb,cc); cudaMalloc((void**)&p_d,s); cudaMalloc((void**)&tIn_d,s); cudaMalloc((void**)&tOut_d,s); cudaMemcpy(tIn_d, tIn_half, s, cudaMemcpyHostToDevice); cudaMemcpy(p_d, p_half, s, cudaMemcpyHostToDevice); cudaFuncSetCacheConfig(hotspotOpt1, cudaFuncCachePreferL1); dim3 block_dim(64, 4, 1); dim3 grid_dim(nx / 64, ny / 4, 1); half_float::half stepDivCap_half = half_float::half(stepDivCap); __half stdc = *(__half*)&(stepDivCap_half); half_float::half ce_half = half_float::half(ce); __half ce_dev = *(__half*)&ce_half; half_float::half cw_half = half_float::half(cw); __half cw_dev = *(__half*)&cw_half; half_float::half cn_half = half_float::half(cn); __half cn_dev = *(__half*)&cn_half; half_float::half cs_half = half_float::half(cs); __half cs_dev = *(__half*)&cs_half; half_float::half ct_half = half_float::half(ct); __half ct_dev = *(__half*)&ct_half; half_float::half cb_half = half_float::half(cb); __half cb_dev = *(__half*)&cb_half; half_float::half cc_half = half_float::half(cc); __half cc_dev = *(__half*)&cc_half; long long start = get_time(); for (int i = 0; i < numiter; ++i) { hotspotOpt1<<<grid_dim, block_dim>>> (p_d, tIn_d, tOut_d, stdc, nx, ny, nz, ce_dev, cw_dev, cn_dev, cs_dev, ct_dev, cb_dev, cc_dev); __half *t = tIn_d; tIn_d = tOut_d; tOut_d = t; } cudaDeviceSynchronize(); long long stop = get_time(); float time = (float)((stop - start)/(1000.0 * 1000.0)); printf("Time: %.3f (s)\n",time); cudaMemcpy(tOut_half, tOut_d, s, cudaMemcpyDeviceToHost); for(int i = 0; i < nx * ny * nz; i++ ){ tOut [i] = float(tOut_half[i]); } cudaFree(p_d); cudaFree(tIn_d); cudaFree(tOut_d); return; }
a850152cd9e0087547bbc1f0d0ad9f3be855537a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #define EIGEN_USE_GPU #include<cassert> __device__ inline void swapf(float & a, float & b) { a += b ; b = a - b; a -= b; } __device__ inline void swap(int & a, int & b) { a += b ; b = a - b; a -= b; } __global__ void KnnKernel( int b, const int n,const int dim,const float * xyz,const int k,float* tmpd,int* tmpi,float * result,int * result_i) { float* dist = tmpd + ( blockIdx.x + blockIdx.y*gridDim.x )*n; int* idx = tmpi + ( blockIdx.x + blockIdx.y*gridDim.x )*n; for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x ) { for ( int i = blockIdx.y ; i < n ; i += gridDim.y ) { for ( int j = threadIdx.x ; j < n ; j += blockDim.x ) { if( i == j ){ dist[j] = 0; idx[j] = j; continue; } dist[j] = 0.0; for ( int dimi = 0 ; dimi < dim ; ++dimi ) { float dif = xyz[(bi*n+i)*dim+dimi] - xyz[(bi*n+j)*dim+dimi]; dist[j] += dif*dif; } idx[j] = j; } __syncthreads(); //odd-even sort int pownum = int(log2(float(n))); if ( n != pow(2, pownum) ) { for ( int cnt = 0 ; cnt < ( n + 1 ) / 2 ; ++cnt ) { for ( int j = 2*threadIdx.x + 1 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); for ( int j = 2*threadIdx.x + 2 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); } }else{ //Bitonic Sort for (unsigned int t = 2; t <= n ; t *= 2) { // Bitonic merge: for (unsigned int j = t / 2; j>0; j /= 2) { for (unsigned int tid = threadIdx.x ; tid < n ; tid += blockDim.x ) { unsigned int ixj = tid ^ j; if (ixj > tid) { if ((tid & t) == 0) { if (dist[tid] > dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } else { if (dist[tid] < dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } } } __syncthreads(); } } } __syncthreads(); //copy result for ( int j = threadIdx.x ; j < k ; j += blockDim.x ) { result[(bi*n+i)*k+j] = dist[j]; result_i[ ((bi*n+i)*k+j)*2+0 ] = bi; result_i[ ((bi*n+i)*k+j)*2+1 ] = idx[j]; } } } } void KnnKernelLauncher(const int b,const int subn, const int n,const int dim,const float * xyz,const int k,float* tmpd,int* tmpi,float * result,int * result_i){ hipLaunchKernelGGL(( KnnKernel), dim3(dim3(b,subn,1)),dim3(512), 0, 0, b,n,dim,xyz,k,tmpd,tmpi,result,result_i); } #endif
a850152cd9e0087547bbc1f0d0ad9f3be855537a.cu
#if GOOGLE_CUDA #define EIGEN_USE_GPU #include<cassert> __device__ inline void swapf(float & a, float & b) { a += b ; b = a - b; a -= b; } __device__ inline void swap(int & a, int & b) { a += b ; b = a - b; a -= b; } __global__ void KnnKernel( int b, const int n,const int dim,const float * xyz,const int k,float* tmpd,int* tmpi,float * result,int * result_i) { float* dist = tmpd + ( blockIdx.x + blockIdx.y*gridDim.x )*n; int* idx = tmpi + ( blockIdx.x + blockIdx.y*gridDim.x )*n; for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x ) { for ( int i = blockIdx.y ; i < n ; i += gridDim.y ) { for ( int j = threadIdx.x ; j < n ; j += blockDim.x ) { if( i == j ){ dist[j] = 0; idx[j] = j; continue; } dist[j] = 0.0; for ( int dimi = 0 ; dimi < dim ; ++dimi ) { float dif = xyz[(bi*n+i)*dim+dimi] - xyz[(bi*n+j)*dim+dimi]; dist[j] += dif*dif; } idx[j] = j; } __syncthreads(); //odd-even sort int pownum = int(log2(float(n))); if ( n != pow(2, pownum) ) { for ( int cnt = 0 ; cnt < ( n + 1 ) / 2 ; ++cnt ) { for ( int j = 2*threadIdx.x + 1 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); for ( int j = 2*threadIdx.x + 2 ; j < n ; j += 2*blockDim.x ) { if ( dist[j] < dist[ j - 1 ] ) { swapf(dist[j], dist[j-1]); swap(idx[j], idx[j-1]); } } __syncthreads(); } }else{ //Bitonic Sort for (unsigned int t = 2; t <= n ; t *= 2) { // Bitonic merge: for (unsigned int j = t / 2; j>0; j /= 2) { for (unsigned int tid = threadIdx.x ; tid < n ; tid += blockDim.x ) { unsigned int ixj = tid ^ j; if (ixj > tid) { if ((tid & t) == 0) { if (dist[tid] > dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } else { if (dist[tid] < dist[ixj]) { swapf(dist[tid], dist[ixj]); swap(idx[tid], idx[ixj]); } } } } __syncthreads(); } } } __syncthreads(); //copy result for ( int j = threadIdx.x ; j < k ; j += blockDim.x ) { result[(bi*n+i)*k+j] = dist[j]; result_i[ ((bi*n+i)*k+j)*2+0 ] = bi; result_i[ ((bi*n+i)*k+j)*2+1 ] = idx[j]; } } } } void KnnKernelLauncher(const int b,const int subn, const int n,const int dim,const float * xyz,const int k,float* tmpd,int* tmpi,float * result,int * result_i){ KnnKernel<<<dim3(b,subn,1),512>>>(b,n,dim,xyz,k,tmpd,tmpi,result,result_i); } #endif
abf45d26baf2741d9500348b7dbab96b41d8db18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file sequence_mask.cu * \brief * \author Sebastian Bodenstein */ #include "./sequence_mask-inl.h" namespace mshadow { namespace cuda { //////////////////////////////////////////////////////////////////////////////// // Cross-Entropy loss template<int n_bits, typename DType> __global__ void SequenceMaskKernel(Tensor<gpu, 3, DType> dst, const Tensor<gpu, 1, DType> lengths, DType value) { const index_t smax = dst.size(0); const index_t bmax = lengths.size(1); const index_t nmax = dst.size(2); unsigned int batch = threadIdx.x + blockIdx.x * blockDim.x; // early return if out of bounds if (batch >= bmax) return; // loop over batches for (index_t s = lengths[batch]; s < smax; ++s) for (index_t r = 0; r < nmax; ++r) dst[s][batch][r] = value; } //////////////////////////////////////////////////////////////////////////////// template<typename DType> inline void SequenceMask(const Tensor<gpu, 3, DType> &dst, const Tensor<gpu, 1, DType> &lengths, DType value) { dim3 dimBlock(kBaseThreadNum); dim3 dimGrid(dst.size(1)); CheckLaunchParam(dimGrid, dimBlock, "SequenceMask"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( SequenceMaskKernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, lengths, value); } //////////////////////////////////////////////////////////////////////////////// } // namespace cuda template<typename DType> inline void SequenceMask(Tensor<gpu, 3, DType> dst, const Tensor<gpu, 1, DType> &lengths, DType value) { cuda::SequenceMask(dst, lengths, value); } } // namespace mshadow //////////////////////////////////////////////////////////////////////////////// namespace mxnet { namespace op { template <> Operator *CreateOp<gpu>(SequenceMaskParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SequenceMaskOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
abf45d26baf2741d9500348b7dbab96b41d8db18.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file sequence_mask.cu * \brief * \author Sebastian Bodenstein */ #include "./sequence_mask-inl.h" namespace mshadow { namespace cuda { //////////////////////////////////////////////////////////////////////////////// // Cross-Entropy loss template<int n_bits, typename DType> __global__ void SequenceMaskKernel(Tensor<gpu, 3, DType> dst, const Tensor<gpu, 1, DType> lengths, DType value) { const index_t smax = dst.size(0); const index_t bmax = lengths.size(1); const index_t nmax = dst.size(2); unsigned int batch = threadIdx.x + blockIdx.x * blockDim.x; // early return if out of bounds if (batch >= bmax) return; // loop over batches for (index_t s = lengths[batch]; s < smax; ++s) for (index_t r = 0; r < nmax; ++r) dst[s][batch][r] = value; } //////////////////////////////////////////////////////////////////////////////// template<typename DType> inline void SequenceMask(const Tensor<gpu, 3, DType> &dst, const Tensor<gpu, 1, DType> &lengths, DType value) { dim3 dimBlock(kBaseThreadNum); dim3 dimGrid(dst.size(1)); CheckLaunchParam(dimGrid, dimBlock, "SequenceMask"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); SequenceMaskKernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>(dst, lengths, value); } //////////////////////////////////////////////////////////////////////////////// } // namespace cuda template<typename DType> inline void SequenceMask(Tensor<gpu, 3, DType> dst, const Tensor<gpu, 1, DType> &lengths, DType value) { cuda::SequenceMask(dst, lengths, value); } } // namespace mshadow //////////////////////////////////////////////////////////////////////////////// namespace mxnet { namespace op { template <> Operator *CreateOp<gpu>(SequenceMaskParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SequenceMaskOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
bd2fc3f6a99a3d50c70fa31dc0145ebcf351fafd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! SigmoidCrossEntropy <T = float32, Device = CUDA> */ template <typename T> __global__ void _SigmoidCrossEntropy( const int count, const T* logits, const T* targets, T* losses, int* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { losses[idx] = flags[idx] = 0; } else { losses[idx] = log(1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)) ) + logits[idx] * ((logits[idx] >= 0) - targets[idx]); flags[idx] = 1; } } } template <> void SigmoidCrossEntropy<float, CUDAContext>( const int count, const float* logits, const float* targets, float* losses, int* flags, CUDAContext* ctx) { _SigmoidCrossEntropy<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, logits, targets, losses, flags); } /*! SigmoidCrossEntropyGrad <T = float32, Device = CUDA> */ template <typename T> __global__ void _SigmoidCrossEntropyGrad( const int count, const T* logits, const T* targets, T* dlogits, int* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { dlogits[idx] = flags[idx] = 0; } else { dlogits[idx] = 1 / (1 + exp(-logits[idx])) - targets[idx]; flags[idx] = 1; } } } template <> void SigmoidCrossEntropyGrad<float, CUDAContext>( const int count, const float* logits, const float* targets, float* dlogits, int* flags, CUDAContext* ctx) { _SigmoidCrossEntropyGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, logits, targets, dlogits, flags); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
bd2fc3f6a99a3d50c70fa31dc0145ebcf351fafd.cu
#ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! SigmoidCrossEntropy <T = float32, Device = CUDA> */ template <typename T> __global__ void _SigmoidCrossEntropy( const int count, const T* logits, const T* targets, T* losses, int* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { losses[idx] = flags[idx] = 0; } else { losses[idx] = log(1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)) ) + logits[idx] * ((logits[idx] >= 0) - targets[idx]); flags[idx] = 1; } } } template <> void SigmoidCrossEntropy<float, CUDAContext>( const int count, const float* logits, const float* targets, float* losses, int* flags, CUDAContext* ctx) { _SigmoidCrossEntropy<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, logits, targets, losses, flags); } /*! SigmoidCrossEntropyGrad <T = float32, Device = CUDA> */ template <typename T> __global__ void _SigmoidCrossEntropyGrad( const int count, const T* logits, const T* targets, T* dlogits, int* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { dlogits[idx] = flags[idx] = 0; } else { dlogits[idx] = 1 / (1 + exp(-logits[idx])) - targets[idx]; flags[idx] = 1; } } } template <> void SigmoidCrossEntropyGrad<float, CUDAContext>( const int count, const float* logits, const float* targets, float* dlogits, int* flags, CUDAContext* ctx) { _SigmoidCrossEntropyGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, logits, targets, dlogits, flags); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
615a21bbd0544ca859477c12c17efcdede30be11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" // extern "C" { #include "avgpool_layer.h" #include "hip/hip_runtime.h" // } __global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, real_device *input, real_device *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int k = id % c; id /= c; int b = id; int i; int out_index = (k + c*b); output[out_index] = 0; for(i = 0; i < w*h; ++i){ int in_index = i + h*w*(k + b*c); output[out_index] += input[in_index]; } output[out_index] /= w*h; } __global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, real_device *in_delta, real_device *out_delta) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int k = id % c; id /= c; int b = id; int i; int out_index = (k + c*b); for(i = 0; i < w*h; ++i){ int in_index = i + h*w*(k + b*c); in_delta[in_index] += out_delta[out_index] / CAST_DEV(w*h); } } void forward_avgpool_layer_gpu(avgpool_layer layer, network net) { size_t n = layer.c*layer.batch; hipLaunchKernelGGL(( forward_avgpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.w, layer.h, layer.c, (real_device*)net.input_gpu, (real_device*)layer.output_gpu); check_error(hipPeekAtLastError()); } void backward_avgpool_layer_gpu(avgpool_layer layer, network net) { size_t n = layer.c*layer.batch; hipLaunchKernelGGL(( backward_avgpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.w, layer.h, layer.c, (real_device*)net.delta_gpu, (real_device*)layer.delta_gpu); check_error(hipPeekAtLastError()); }
615a21bbd0544ca859477c12c17efcdede30be11.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" // extern "C" { #include "avgpool_layer.h" #include "cuda.h" // } __global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, real_device *input, real_device *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int k = id % c; id /= c; int b = id; int i; int out_index = (k + c*b); output[out_index] = 0; for(i = 0; i < w*h; ++i){ int in_index = i + h*w*(k + b*c); output[out_index] += input[in_index]; } output[out_index] /= w*h; } __global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, real_device *in_delta, real_device *out_delta) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int k = id % c; id /= c; int b = id; int i; int out_index = (k + c*b); for(i = 0; i < w*h; ++i){ int in_index = i + h*w*(k + b*c); in_delta[in_index] += out_delta[out_index] / CAST_DEV(w*h); } } void forward_avgpool_layer_gpu(avgpool_layer layer, network net) { size_t n = layer.c*layer.batch; forward_avgpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.w, layer.h, layer.c, (real_device*)net.input_gpu, (real_device*)layer.output_gpu); check_error(cudaPeekAtLastError()); } void backward_avgpool_layer_gpu(avgpool_layer layer, network net) { size_t n = layer.c*layer.batch; backward_avgpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.w, layer.h, layer.c, (real_device*)net.delta_gpu, (real_device*)layer.delta_gpu); check_error(cudaPeekAtLastError()); }
c5cd04eed342d2ed31c0313f3f2e9a7a69c860a9.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ndarray_function.cu * \brief GPU Implementation of ndarray function. */ // this will be invoked by nvcc and compile GPU version #include <hipcub/hipcub.hpp> #include <dmlc/logging.h> #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/elemwise_sum.h" #include "../operator/tensor/indexing_op.h" #include "../operator/tensor/init_op.h" #include "../operator/tensor/util/tensor_util-inl.h" #include "../operator/tensor/util/tensor_util-inl.cuh" #include "../common/cuda_utils.h" #include "./ndarray_function.h" #include "./ndarray_function-inl.h" #include "./ndarray_function-inl.cuh" namespace mxnet { namespace ndarray { template<> void Copy<cpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<gpu, DType>(), from.FlatTo1D<cpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, cpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<cpu, DType>(), from.FlatTo1D<gpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { if (from_ctx.dev_id == to_ctx.dev_id) { mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { mshadow::Copy(to->FlatTo1D<gpu, DType>(s), from.FlatTo1D<gpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to->FlatTo1D<gpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s)); }) } }) } else { CHECK(from.CheckContiguous() && to->CheckContiguous()) << "copy across only support continugous memory"; CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); CHECK(s != NULL) << "need stream in GPU context"; hipMemcpyPeerAsync(to->dptr_, to_ctx.dev_id, from.dptr_, from_ctx.dev_id, from.shape_.Size() * mshadow::mshadow_sizeof(to->type_flag_), s->stream_); } } /*! * \brief GPU impl of elemwise sum for rowsparse tensors. */ void ElementwiseSumRspImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace rowsparse; using nnvm::dim_t; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "Expected rowsparse storage_type (" << out->storage_type() << " given)"; int init = 0; for (const auto& nd : nds) { if (nd.storage_initialized()) { init++; break; } } if (init == 0) { FillZerosRspImpl(s, *out); return; } const dim_t num_rows = out->shape()[0]; const dim_t row_length = out->shape().ProdShape(1, out->shape().ndim()); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { // row_idx type // Allocate temporary storage for row_flg array and cub's prefix sum operation IType* row_flg = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); mshadow::Tensor<gpu, 1, char> workspace = rsc .get_space_typed<gpu, 1, char>(mshadow::Shape1(num_rows * sizeof(IType) + temp_storage_bytes), s); row_flg = reinterpret_cast<IType*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(IType); // Mark row_flg array with 0 for zero rows and 1 for non-zero rows dim_t num_threads = num_rows; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, row_flg); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr; mxnet_op::Kernel<MarkRspRowFlgKernel, gpu>::Launch(s, num_threads, row_flg, nd_row_idx, nd_nnr); } } // Compute inclusive prefix sum over row_flg hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); // Get total number of output non-zero rows from GPU and allocate out data and row_idx dim_t nnr_out = 0; CUDA_CALL(hipMemcpy(&nnr_out, &row_flg[num_rows-1], sizeof(dim_t), hipMemcpyDeviceToHost)); out->CheckAndAlloc({mshadow::Shape1(nnr_out)}); IType* out_row_idx = out->aux_data(kIdx).dptr<IType>(); DType* out_data = out->data().dptr<DType>(); // Fill row_idx array of output using row_flg num_threads = num_rows; mxnet_op::Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_threads, out_row_idx, row_flg, num_rows); // Perform elementwise addition, writing to output data num_threads = nnr_out * row_length; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, out_data); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const DType* nd_data = nd.data().dptr<DType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr * row_length; mxnet_op::Kernel<ElementWiseRspAdditionKernel, gpu>::Launch(s, num_threads, out_data, row_flg, nd_row_idx, nd_data, nd_nnr, row_length); } } }); }); } void ElementwiseSumDnsCsrDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type Kernel<Sum, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), kWriteTo, nds[0].data().dptr<DType>(), nds[2].data().dptr<DType>()); const TBlob& csr_data = nds[1].data(); const TBlob& csr_indices = nds[1].aux_data(csr::kIdx); const TBlob& csr_indptr = nds[1].aux_data(csr::kIndPtr); const nnvm::dim_t num_rows = nds[1].shape()[0]; const nnvm::dim_t num_cols = nds[1].shape()[1]; MSHADOW_IDX_TYPE_SWITCH(csr_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(csr_indptr.type_flag_, CType, { // indptr type if (nds[1].storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), csr_data.dptr<DType>(), csr_indices.dptr<IType>(), csr_indptr.dptr<CType>(), num_rows, num_cols); } }); }); }); } void ElementwiseSumContainsDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type for (size_t i = 0; i < nds.size(); ++i) { const NDArray& nd = nds[i]; const nnvm::dim_t num_rows = nd.shape()[0]; const nnvm::dim_t num_cols = nd.shape()[1]; const TBlob& nd_data = nd.data(); if (i == 0) { if (nd.storage_type() == kDefaultStorage) { Kernel<op_with_req<mshadow_op::identity, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), nd_data.dptr<DType>()); continue; } else { Kernel<set_zero, gpu>::Launch(s, out_data.Size(), out_data.dptr<DType>()); } } switch (nd.storage_type()) { case kDefaultStorage: { Kernel<op_with_req<mshadow_op::plus, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>()); break; } case kCSRStorage: { const TBlob& nd_indices = nd.aux_data(csr::kIdx); const TBlob& nd_indptr = nd.aux_data(csr::kIndPtr); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(nd_indptr.type_flag_, CType, { // indptr type if (nd.storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), nd_indptr.dptr<CType>(), num_rows, num_cols); } }); }); break; } case kRowSparseStorage: { const TBlob& nd_indices = nd.aux_data(rowsparse::kIdx); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type if (nd.storage_initialized()) { const nnvm::dim_t nz_rows = nd_indices.Size(); Kernel<ElemwiseDnsRspDnsKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, nz_rows * num_cols, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), num_rows, nz_rows, num_cols); } }); break; } default: LOG(FATAL) << "unknown storage type " << nd.storage_type() << "encountered..."; } } }); } /*! * \brief Parallel gpu impl of elemwise sum for sparse tensors. * Currently only support row sparse sum. */ template<> void ElementwiseSum<gpu>(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { if (nds.empty()) return; if (common::ContainsOnlyStorage(nds, kRowSparseStorage)) { ElementwiseSumRspImpl(s, rsc, nds, out); } else if (nds.size() == 3U && nds[0].storage_type() == kDefaultStorage && nds[1].storage_type() == kCSRStorage && nds[2].storage_type() == kDefaultStorage && out->storage_type() == kDefaultStorage) { ElementwiseSumDnsCsrDnsImpl(s, rsc, nds, out); } else if (nds.size() > 4U && common::ContainsStorageType(nds, kDefaultStorage) && out->storage_type() == kDefaultStorage) { ElementwiseSumContainsDnsImpl(s, rsc, nds, out); } else { LOG(FATAL) << "ElementwiseSum<gpu> has not been implemented for storage_type = << " << nds[0].storage_type(); } } template<> void Eval<gpu>(mshadow::Stream<gpu> *s, const real_t val, const NDArray& dst) { NDArray temp = dst; const NDArrayStorageType stype = temp.storage_type(); if (stype == kRowSparseStorage) { SetValueRspImpl(s, val, &temp); } else { LOG(FATAL) << "Not implemented for storage type" << stype; } } } // namespace ndarray } // namespace mxnet
c5cd04eed342d2ed31c0313f3f2e9a7a69c860a9.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ndarray_function.cu * \brief GPU Implementation of ndarray function. */ // this will be invoked by nvcc and compile GPU version #include <cub/cub.cuh> #include <dmlc/logging.h> #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/elemwise_sum.h" #include "../operator/tensor/indexing_op.h" #include "../operator/tensor/init_op.h" #include "../operator/tensor/util/tensor_util-inl.h" #include "../operator/tensor/util/tensor_util-inl.cuh" #include "../common/cuda_utils.h" #include "./ndarray_function.h" #include "./ndarray_function-inl.h" #include "./ndarray_function-inl.cuh" namespace mxnet { namespace ndarray { template<> void Copy<cpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<gpu, DType>(), from.FlatTo1D<cpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, cpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<cpu, DType>(), from.FlatTo1D<gpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { if (from_ctx.dev_id == to_ctx.dev_id) { mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { mshadow::Copy(to->FlatTo1D<gpu, DType>(s), from.FlatTo1D<gpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to->FlatTo1D<gpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s)); }) } }) } else { CHECK(from.CheckContiguous() && to->CheckContiguous()) << "copy across only support continugous memory"; CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); CHECK(s != NULL) << "need stream in GPU context"; cudaMemcpyPeerAsync(to->dptr_, to_ctx.dev_id, from.dptr_, from_ctx.dev_id, from.shape_.Size() * mshadow::mshadow_sizeof(to->type_flag_), s->stream_); } } /*! * \brief GPU impl of elemwise sum for rowsparse tensors. */ void ElementwiseSumRspImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace rowsparse; using nnvm::dim_t; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "Expected rowsparse storage_type (" << out->storage_type() << " given)"; int init = 0; for (const auto& nd : nds) { if (nd.storage_initialized()) { init++; break; } } if (init == 0) { FillZerosRspImpl(s, *out); return; } const dim_t num_rows = out->shape()[0]; const dim_t row_length = out->shape().ProdShape(1, out->shape().ndim()); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { // row_idx type // Allocate temporary storage for row_flg array and cub's prefix sum operation IType* row_flg = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); mshadow::Tensor<gpu, 1, char> workspace = rsc .get_space_typed<gpu, 1, char>(mshadow::Shape1(num_rows * sizeof(IType) + temp_storage_bytes), s); row_flg = reinterpret_cast<IType*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(IType); // Mark row_flg array with 0 for zero rows and 1 for non-zero rows dim_t num_threads = num_rows; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, row_flg); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr; mxnet_op::Kernel<MarkRspRowFlgKernel, gpu>::Launch(s, num_threads, row_flg, nd_row_idx, nd_nnr); } } // Compute inclusive prefix sum over row_flg cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); // Get total number of output non-zero rows from GPU and allocate out data and row_idx dim_t nnr_out = 0; CUDA_CALL(cudaMemcpy(&nnr_out, &row_flg[num_rows-1], sizeof(dim_t), cudaMemcpyDeviceToHost)); out->CheckAndAlloc({mshadow::Shape1(nnr_out)}); IType* out_row_idx = out->aux_data(kIdx).dptr<IType>(); DType* out_data = out->data().dptr<DType>(); // Fill row_idx array of output using row_flg num_threads = num_rows; mxnet_op::Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_threads, out_row_idx, row_flg, num_rows); // Perform elementwise addition, writing to output data num_threads = nnr_out * row_length; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, out_data); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const DType* nd_data = nd.data().dptr<DType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr * row_length; mxnet_op::Kernel<ElementWiseRspAdditionKernel, gpu>::Launch(s, num_threads, out_data, row_flg, nd_row_idx, nd_data, nd_nnr, row_length); } } }); }); } void ElementwiseSumDnsCsrDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type Kernel<Sum, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), kWriteTo, nds[0].data().dptr<DType>(), nds[2].data().dptr<DType>()); const TBlob& csr_data = nds[1].data(); const TBlob& csr_indices = nds[1].aux_data(csr::kIdx); const TBlob& csr_indptr = nds[1].aux_data(csr::kIndPtr); const nnvm::dim_t num_rows = nds[1].shape()[0]; const nnvm::dim_t num_cols = nds[1].shape()[1]; MSHADOW_IDX_TYPE_SWITCH(csr_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(csr_indptr.type_flag_, CType, { // indptr type if (nds[1].storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), csr_data.dptr<DType>(), csr_indices.dptr<IType>(), csr_indptr.dptr<CType>(), num_rows, num_cols); } }); }); }); } void ElementwiseSumContainsDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type for (size_t i = 0; i < nds.size(); ++i) { const NDArray& nd = nds[i]; const nnvm::dim_t num_rows = nd.shape()[0]; const nnvm::dim_t num_cols = nd.shape()[1]; const TBlob& nd_data = nd.data(); if (i == 0) { if (nd.storage_type() == kDefaultStorage) { Kernel<op_with_req<mshadow_op::identity, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), nd_data.dptr<DType>()); continue; } else { Kernel<set_zero, gpu>::Launch(s, out_data.Size(), out_data.dptr<DType>()); } } switch (nd.storage_type()) { case kDefaultStorage: { Kernel<op_with_req<mshadow_op::plus, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>()); break; } case kCSRStorage: { const TBlob& nd_indices = nd.aux_data(csr::kIdx); const TBlob& nd_indptr = nd.aux_data(csr::kIndPtr); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(nd_indptr.type_flag_, CType, { // indptr type if (nd.storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), nd_indptr.dptr<CType>(), num_rows, num_cols); } }); }); break; } case kRowSparseStorage: { const TBlob& nd_indices = nd.aux_data(rowsparse::kIdx); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type if (nd.storage_initialized()) { const nnvm::dim_t nz_rows = nd_indices.Size(); Kernel<ElemwiseDnsRspDnsKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, nz_rows * num_cols, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), num_rows, nz_rows, num_cols); } }); break; } default: LOG(FATAL) << "unknown storage type " << nd.storage_type() << "encountered..."; } } }); } /*! * \brief Parallel gpu impl of elemwise sum for sparse tensors. * Currently only support row sparse sum. */ template<> void ElementwiseSum<gpu>(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { if (nds.empty()) return; if (common::ContainsOnlyStorage(nds, kRowSparseStorage)) { ElementwiseSumRspImpl(s, rsc, nds, out); } else if (nds.size() == 3U && nds[0].storage_type() == kDefaultStorage && nds[1].storage_type() == kCSRStorage && nds[2].storage_type() == kDefaultStorage && out->storage_type() == kDefaultStorage) { ElementwiseSumDnsCsrDnsImpl(s, rsc, nds, out); } else if (nds.size() > 4U && common::ContainsStorageType(nds, kDefaultStorage) && out->storage_type() == kDefaultStorage) { ElementwiseSumContainsDnsImpl(s, rsc, nds, out); } else { LOG(FATAL) << "ElementwiseSum<gpu> has not been implemented for storage_type = << " << nds[0].storage_type(); } } template<> void Eval<gpu>(mshadow::Stream<gpu> *s, const real_t val, const NDArray& dst) { NDArray temp = dst; const NDArrayStorageType stype = temp.storage_type(); if (stype == kRowSparseStorage) { SetValueRspImpl(s, val, &temp); } else { LOG(FATAL) << "Not implemented for storage type" << stype; } } } // namespace ndarray } // namespace mxnet
c39b251dfe6ddc9e7433732f750b04bbfa457b2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #ifdef INFINITY /* INFINITY is supported */ #endif float **A, **D, *d2; //Table A distance, D minimum distance,d2 tempTable 1-d void makeAdjacency(int n, float p, int w); //kernel __global__ void calc(float *d_D, int n, int k){ int i = blockIdx.x * blockDim.x + threadIdx.x; //We find i & j in the Grid of threads int j = blockIdx.y * blockDim.y + threadIdx.y; if (d_D[i + j*n] > d_D[i + k*n] + d_D[k + j*n]) d_D[i + j*n] = d_D[i + k*n] + d_D[k + j*n]; //Every thread calculates its proper value } int main(int argc, char **argv){ int N, w; float p; N = atoi(argv[1]); //Read the console inputs p = atof(argv[2]); w = atoi(argv[3]); int n = pow(2, N); makeAdjacency(n, p, w); //Initialize table A clock_t start = clock(); //First time measurement int i, j, k; D = (float **)malloc(n*sizeof(float *)); //Allocation for table D for (i = 0; i<n; i++) D[i] = (float *)malloc(n*sizeof(float)); for (i = 0; i<n; i++){ //Initial values for D for (j = 0; j<n; j++){ D[i][j] = INFINITY; if ((!isinf(A[i][j])) && A[i][j] != 0) { D[i][j] = A[i][j]; } if (A[i][j] == 0) D[i][j] = 0; } } d2 = (float *)malloc(n*n*sizeof(float)); //Pass the values into the subtable d2 int index = 0; for (j = 0; j<n; j++){ for (i = 0; i<n; i++){ d2[index++] = D[i][j]; } } int gridx = pow(2, N - 4), gridy = pow(2, N - 4); //Dimensions of grid int blockx = pow(2, 4), blocky = pow(2, 4); dim3 dimGrid(gridx, gridy); dim3 dimBlock(blockx, blocky); int size = n*n*sizeof(float); float *d_D; hipMalloc((void**)&d_D, size); //Allocation of device Table hipMemcpy(d_D, d2, size, hipMemcpyHostToDevice); //Memory transfer from host to device for (k = 0; k<n; k++){ calc << <dimGrid, dimBlock >> >(d_D, n, k); //Run kernel for each k } hipMemcpy(d2, d_D, size, hipMemcpyDeviceToHost); //Pass values from device to host hipFree(d_D); index = 0; for (j = 0; j<n; j++){ for (i = 0; i<n; i++){ D[i][j] = d2[index++]; //Pass the values to the 2-d Table of min distance D[i][j] } } clock_t end = clock(); float seconds = (float)(end - start) / CLOCKS_PER_SEC; printf("Elapsed wall time = %f sec\n", seconds); //Elapsed time exit(0); } void makeAdjacency(int n, float p, int w){ //Set initial values to node distances int i, j; A = (float **)malloc(n*sizeof(float *)); for (i = 0; i<n; i++) A[i] = (float *)malloc(n*sizeof(float)); srand(time(NULL)); for (i = 0; i<n; i++){ for (j = 0; j<n; j++){ if (((float)rand() / (RAND_MAX)) > p) { A[i][j] = INFINITY; } else A[i][j] = ((float)rand() / (RAND_MAX)) * w; } A[i][i] = 0; } }
c39b251dfe6ddc9e7433732f750b04bbfa457b2e.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #ifdef INFINITY /* INFINITY is supported */ #endif float **A, **D, *d2; //Table A distance, D minimum distance,d2 tempTable 1-d void makeAdjacency(int n, float p, int w); //kernel __global__ void calc(float *d_D, int n, int k){ int i = blockIdx.x * blockDim.x + threadIdx.x; //We find i & j in the Grid of threads int j = blockIdx.y * blockDim.y + threadIdx.y; if (d_D[i + j*n] > d_D[i + k*n] + d_D[k + j*n]) d_D[i + j*n] = d_D[i + k*n] + d_D[k + j*n]; //Every thread calculates its proper value } int main(int argc, char **argv){ int N, w; float p; N = atoi(argv[1]); //Read the console inputs p = atof(argv[2]); w = atoi(argv[3]); int n = pow(2, N); makeAdjacency(n, p, w); //Initialize table A clock_t start = clock(); //First time measurement int i, j, k; D = (float **)malloc(n*sizeof(float *)); //Allocation for table D for (i = 0; i<n; i++) D[i] = (float *)malloc(n*sizeof(float)); for (i = 0; i<n; i++){ //Initial values for D for (j = 0; j<n; j++){ D[i][j] = INFINITY; if ((!isinf(A[i][j])) && A[i][j] != 0) { D[i][j] = A[i][j]; } if (A[i][j] == 0) D[i][j] = 0; } } d2 = (float *)malloc(n*n*sizeof(float)); //Pass the values into the subtable d2 int index = 0; for (j = 0; j<n; j++){ for (i = 0; i<n; i++){ d2[index++] = D[i][j]; } } int gridx = pow(2, N - 4), gridy = pow(2, N - 4); //Dimensions of grid int blockx = pow(2, 4), blocky = pow(2, 4); dim3 dimGrid(gridx, gridy); dim3 dimBlock(blockx, blocky); int size = n*n*sizeof(float); float *d_D; cudaMalloc((void**)&d_D, size); //Allocation of device Table cudaMemcpy(d_D, d2, size, cudaMemcpyHostToDevice); //Memory transfer from host to device for (k = 0; k<n; k++){ calc << <dimGrid, dimBlock >> >(d_D, n, k); //Run kernel for each k } cudaMemcpy(d2, d_D, size, cudaMemcpyDeviceToHost); //Pass values from device to host cudaFree(d_D); index = 0; for (j = 0; j<n; j++){ for (i = 0; i<n; i++){ D[i][j] = d2[index++]; //Pass the values to the 2-d Table of min distance D[i][j] } } clock_t end = clock(); float seconds = (float)(end - start) / CLOCKS_PER_SEC; printf("Elapsed wall time = %f sec\n", seconds); //Elapsed time exit(0); } void makeAdjacency(int n, float p, int w){ //Set initial values to node distances int i, j; A = (float **)malloc(n*sizeof(float *)); for (i = 0; i<n; i++) A[i] = (float *)malloc(n*sizeof(float)); srand(time(NULL)); for (i = 0; i<n; i++){ for (j = 0; j<n; j++){ if (((float)rand() / (RAND_MAX)) > p) { A[i][j] = INFINITY; } else A[i][j] = ((float)rand() / (RAND_MAX)) * w; } A[i][i] = 0; } }
279f9b50e433b92a31a883a9af38b2ee676e10bf.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } template <typename Dtype> void ReidPrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { ReidBatch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. CHECK_EQ(top[0]->count(), batch->data_.count()*2); top[0]->Reshape(batch->data_.num()*2, batch->data_.channels(), batch->data_.height(), batch->data_.width()); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); caffe_copy(batch->datap_.count(), batch->datap_.gpu_data(), top[0]->mutable_gpu_data()+batch->data_.count()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { // Reshape to loaded labels. vector<int> shape = batch->label_.shape(); CHECK_LT(shape.size(), 2); CHECK_EQ(top[1]->count(), batch->label_.count()*2); shape[0] *= 2; top[1]->Reshape(shape); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); caffe_copy(batch->labelp_.count(), batch->labelp_.gpu_data(), top[1]->mutable_gpu_data()+batch->label_.count()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD(ReidPrefetchingDataLayer); } // namespace caffe
279f9b50e433b92a31a883a9af38b2ee676e10bf.cu
#include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } template <typename Dtype> void ReidPrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { ReidBatch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. CHECK_EQ(top[0]->count(), batch->data_.count()*2); top[0]->Reshape(batch->data_.num()*2, batch->data_.channels(), batch->data_.height(), batch->data_.width()); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); caffe_copy(batch->datap_.count(), batch->datap_.gpu_data(), top[0]->mutable_gpu_data()+batch->data_.count()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { // Reshape to loaded labels. vector<int> shape = batch->label_.shape(); CHECK_LT(shape.size(), 2); CHECK_EQ(top[1]->count(), batch->label_.count()*2); shape[0] *= 2; top[1]->Reshape(shape); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); caffe_copy(batch->labelp_.count(), batch->labelp_.gpu_data(), top[1]->mutable_gpu_data()+batch->label_.count()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD(ReidPrefetchingDataLayer); } // namespace caffe
feb88e2155ab456ed28af08cb81e12b6e637da95.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHStorageCopy.h> #include <THH/THHGeneral.h> #include <TH/THHalf.h> #include <THH/THHTensorCopy.h> #include <THH/THHTensor.hpp> #include <THH/THHStorage.hpp> #include <THH/generic/THHStorageCopy.hip> #include <THH/THHGenerateAllTypes.h> #include <THH/generic/THHStorageCopy.hip> #include <THH/THHGenerateBoolType.h> #include <THH/generic/THHStorageCopy.hip> #include <THH/THHGenerateBFloat16Type.h>
feb88e2155ab456ed28af08cb81e12b6e637da95.cu
#include <THC/THCStorageCopy.h> #include <THC/THCGeneral.h> #include <TH/THHalf.h> #include <THC/THCTensorCopy.h> #include <THC/THCTensor.hpp> #include <THC/THCStorage.hpp> #include <THC/generic/THCStorageCopy.cu> #include <THC/THCGenerateAllTypes.h> #include <THC/generic/THCStorageCopy.cu> #include <THC/THCGenerateBoolType.h> #include <THC/generic/THCStorageCopy.cu> #include <THC/THCGenerateBFloat16Type.h>
51a98363f9eea8d59d0c3973877a24c2cf7974ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include <demosaic_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void printDiff(float*, float*, int, int); extern "C" void computeGold(float*, const float*, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); // set seed for rand() srand(2006); // allocate host memory for matrices A unsigned int size_A = WIDTH_A * HEIGHT_A; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); // initialize host memory randomInit(h_A, size_A); for (int i = 0; i < WIDTH_A; i++) { for (int j = 0; j < HEIGHT_A; j++) { if (i < 15 || j < 15 || i > WIDTH_A - 2 || j > HEIGHT_A - 2) { h_A[j * WIDTH_A + i] = 0.0f; } } } // allocate device memory float* d_A; CUDA_SAFE_CALL(hipMalloc((void**) &d_A, mem_size_A)); // copy host memory to device CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice)); // allocate device memory for result unsigned int size_C = WIDTH_C * HEIGHT_C; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // create and start timer unsigned int timer = 0; // compute reference solution float* reference = (float*) malloc(mem_size_C); computeGold(reference, h_A, WIDTH_A, HEIGHT_A); CUTBoolean res; { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(16, 16); dim3 grid(WIDTH_C / threads.x, HEIGHT_C / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); hipDeviceSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel hipLaunchKernelGGL(( demosaic_naive), dim3(grid), dim3(threads) , 0, 0, d_A, d_C, WIDTH_A); // stop and destroy timer hipDeviceSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost)); printf("demosaic_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0 * 0 / cutGetTimerValue(timer) / 1024 / 1024 / 1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(hipFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(32, 1); dim3 grid(WIDTH_C / threads.x, WIDTH_C / (1)); CUT_SAFE_CALL(cutCreateTimer(&timer)); hipDeviceSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel hipLaunchKernelGGL(( demosaic_coalesced), dim3(grid), dim3(threads) , 0, 0, d_A, d_C, WIDTH_A); // stop and destroy timer hipDeviceSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost)); printf("demosaic_coalesced Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0 * 0 / cutGetTimerValue(timer) / 1024 / 1024 / 1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(hipFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(256, 1); dim3 grid(WIDTH_C / threads.x, WIDTH_C / (32)); CUT_SAFE_CALL(cutCreateTimer(&timer)); hipDeviceSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel hipLaunchKernelGGL(( demosaic_opt), dim3(grid), dim3(threads) , 0, 0, d_A, d_C, WIDTH_A); // stop and destroy timer hipDeviceSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost)); printf("demosaic_opt Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0 * 0 / cutGetTimerValue(timer) / 1024 / 1024 / 1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(hipFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); // clean up memory free(h_A); free(h_C); free(reference); CUDA_SAFE_CALL(hipFree(d_A)); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float) RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height) { int i, j, k; int error_count = 0; for (j = 0; j < height; j++) { for (i = 0; i < width; i++) { k = j * width + i; if (data1[k] != data2[k]) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i, j, data1[k], data2[k]); error_count++; } } } printf(" nTotal Errors = %d n", error_count); }
51a98363f9eea8d59d0c3973877a24c2cf7974ea.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include <demosaic_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void printDiff(float*, float*, int, int); extern "C" void computeGold(float*, const float*, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); // set seed for rand() srand(2006); // allocate host memory for matrices A unsigned int size_A = WIDTH_A * HEIGHT_A; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); // initialize host memory randomInit(h_A, size_A); for (int i = 0; i < WIDTH_A; i++) { for (int j = 0; j < HEIGHT_A; j++) { if (i < 15 || j < 15 || i > WIDTH_A - 2 || j > HEIGHT_A - 2) { h_A[j * WIDTH_A + i] = 0.0f; } } } // allocate device memory float* d_A; CUDA_SAFE_CALL(cudaMalloc((void**) &d_A, mem_size_A)); // copy host memory to device CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice)); // allocate device memory for result unsigned int size_C = WIDTH_C * HEIGHT_C; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // create and start timer unsigned int timer = 0; // compute reference solution float* reference = (float*) malloc(mem_size_C); computeGold(reference, h_A, WIDTH_A, HEIGHT_A); CUTBoolean res; { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(16, 16); dim3 grid(WIDTH_C / threads.x, HEIGHT_C / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); cudaThreadSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel demosaic_naive<<< grid, threads >>>(d_A, d_C, WIDTH_A); // stop and destroy timer cudaThreadSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost)); printf("demosaic_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0 * 0 / cutGetTimerValue(timer) / 1024 / 1024 / 1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(cudaFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(32, 1); dim3 grid(WIDTH_C / threads.x, WIDTH_C / (1)); CUT_SAFE_CALL(cutCreateTimer(&timer)); cudaThreadSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel demosaic_coalesced<<< grid, threads >>>(d_A, d_C, WIDTH_A); // stop and destroy timer cudaThreadSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost)); printf("demosaic_coalesced Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0 * 0 / cutGetTimerValue(timer) / 1024 / 1024 / 1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(cudaFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(256, 1); dim3 grid(WIDTH_C / threads.x, WIDTH_C / (32)); CUT_SAFE_CALL(cutCreateTimer(&timer)); cudaThreadSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel demosaic_opt<<< grid, threads >>>(d_A, d_C, WIDTH_A); // stop and destroy timer cudaThreadSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost)); printf("demosaic_opt Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0 * 0 / cutGetTimerValue(timer) / 1024 / 1024 / 1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(cudaFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); // clean up memory free(h_A); free(h_C); free(reference); CUDA_SAFE_CALL(cudaFree(d_A)); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float) RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height) { int i, j, k; int error_count = 0; for (j = 0; j < height; j++) { for (i = 0; i < width; i++) { k = j * width + i; if (data1[k] != data2[k]) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i, j, data1[k], data2[k]); error_count++; } } } printf(" nTotal Errors = %d n", error_count); }
82fcc3ec4be67e324532007242fd2e2e321a2be8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<stdio.h> int main(void) { void MatrixMultiplication(float *, float *, float *, int,int,int); const int k =5 ; const int m=4,n=3; float M[k*n], N[n*m], P[k*m]; for(int i = 0; i < (k*n) ; i++) { M[i] = 6; } for(int i = 0; i < (n*m) ; i++) { //M[i] = 6; N[i] = 6; // P[i] = 0; } for(int i = 0; i < (k*m) ; i++) { //M[i] = 6; // N[i] = 6; P[i] = 0; } MatrixMultiplication(M, N, P,m,n,k); for(int i = 0; i < (k*m) ; i++) { printf("%f \n", P[i]); } int quit; scanf("%d",&quit); return 0; } //Matrix multiplication kernel - thread specification __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int N,int M) { //2D Thread ID int tx = threadIdx.x; int ty = threadIdx.y; printf("%d %d\n",tx,ty); //Pvalue stores the Pd element that is computed by the thread float Pvalue = 0; for(int k = 0; k <N ; ++k) { float Mdelement = Md[tx*N + k]; float Ndelement = Nd[k*M + ty]; Pvalue += (Mdelement*Ndelement); } Pd[tx*M + ty] = Pvalue; } void MatrixMultiplication(float *M, float *N, float *P, int m,int n,int k) { //int size = Width*Width*sizeof(float); float *Md, *Nd, *Pd; //Transfer M and N to device memory hipMalloc((void**)&Md, k*n*sizeof(float)); hipMemcpy(Md,M,k*n*sizeof(float),hipMemcpyHostToDevice); hipMalloc((void**)&Nd, n*m*sizeof(float)); hipMemcpy(Nd,N,n*m*sizeof(float),hipMemcpyHostToDevice); //Allocate P on the device hipMalloc((void**)&Pd,k*m*sizeof(float)); //Setup the execution configuration dim3 dimBlock(k,m); dim3 dimGrid(1,1); //Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd,n,m); //Transfer P from device to host hipMemcpy(P,Pd,m*k*sizeof(float),hipMemcpyDeviceToHost); //Free device matrices hipFree(Md); hipFree(Nd); hipFree(Pd); }
82fcc3ec4be67e324532007242fd2e2e321a2be8.cu
#include<cuda.h> #include<stdio.h> int main(void) { void MatrixMultiplication(float *, float *, float *, int,int,int); const int k =5 ; const int m=4,n=3; float M[k*n], N[n*m], P[k*m]; for(int i = 0; i < (k*n) ; i++) { M[i] = 6; } for(int i = 0; i < (n*m) ; i++) { //M[i] = 6; N[i] = 6; // P[i] = 0; } for(int i = 0; i < (k*m) ; i++) { //M[i] = 6; // N[i] = 6; P[i] = 0; } MatrixMultiplication(M, N, P,m,n,k); for(int i = 0; i < (k*m) ; i++) { printf("%f \n", P[i]); } int quit; scanf("%d",&quit); return 0; } //Matrix multiplication kernel - thread specification __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int N,int M) { //2D Thread ID int tx = threadIdx.x; int ty = threadIdx.y; printf("%d %d\n",tx,ty); //Pvalue stores the Pd element that is computed by the thread float Pvalue = 0; for(int k = 0; k <N ; ++k) { float Mdelement = Md[tx*N + k]; float Ndelement = Nd[k*M + ty]; Pvalue += (Mdelement*Ndelement); } Pd[tx*M + ty] = Pvalue; } void MatrixMultiplication(float *M, float *N, float *P, int m,int n,int k) { //int size = Width*Width*sizeof(float); float *Md, *Nd, *Pd; //Transfer M and N to device memory cudaMalloc((void**)&Md, k*n*sizeof(float)); cudaMemcpy(Md,M,k*n*sizeof(float),cudaMemcpyHostToDevice); cudaMalloc((void**)&Nd, n*m*sizeof(float)); cudaMemcpy(Nd,N,n*m*sizeof(float),cudaMemcpyHostToDevice); //Allocate P on the device cudaMalloc((void**)&Pd,k*m*sizeof(float)); //Setup the execution configuration dim3 dimBlock(k,m); dim3 dimGrid(1,1); //Launch the device computation threads! MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,n,m); //Transfer P from device to host cudaMemcpy(P,Pd,m*k*sizeof(float),cudaMemcpyDeviceToHost); //Free device matrices cudaFree(Md); cudaFree(Nd); cudaFree(Pd); }
d63314a618d79171356dc9818f5a4f622d9f4e9f.hip
// !!! This is a file automatically generated by hipify!!! #include <cusparse_v2.h> #include <stdio.h> #include <stdlib.h> #include <ctime> #ifdef WINDOWS #include <direct.h> #define GetCurrentDir _getcwd #else #include <unistd.h> #define GetCurrentDir getcwd #endif extern "C" { #include "component/libraries/mmio.h" } /////////////////////////////////////////// #include <boost/numeric/ublas/triangular.hpp> #include <boost/numeric/ublas/vector.hpp> #include <boost/numeric/ublas/vector_proxy.hpp> #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/numeric/ublas/operation_sparse.hpp> #include <boost/numeric/ublas/lu.hpp> #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" #include "viennacl/tools/timer.hpp" #include "viennacl/coordinate_matrix.hpp" #include "viennacl/compressed_matrix.hpp" #include "viennacl/ell_matrix.hpp" #include "viennacl/hyb_matrix.hpp" #include "viennacl/sliced_ell_matrix.hpp" #include "viennacl/linalg/prod.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/io/matrix_market.hpp" /////////////////////////////////////////// struct MyCSRMat { int * I; // ROW INDICES OF NZ int * J; // COLUMN INDICES OF NZ double * val; // VALUES OF NZ int nz; // NON-ZERO int M; // ROW int N; // COLUMN }myMat1,myMat2,myMat3; // error check macros #define CUSPARSE_CHECK(x) {hipsparseStatus_t _c=x; if (_c != HIPSPARSE_STATUS_SUCCESS) {printf("cusparse fail: %d, line: %d\n", (int)_c, __LINE__); exit(-1);}} #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) char* appendCharToCharArray(char* array, char a) { size_t len = strlen(array); char* ret = new char[len+2]; strcpy(ret, array); ret[len] = a; ret[len+1] = '\0'; return ret; } MyCSRMat initMatrix(MyCSRMat myMat, const char * myMatName) { int ret_code; MM_typecode matcode; FILE *f; int nz; int M, N; int i, *K, *I, *J; double *val; char buff[FILENAME_MAX]; GetCurrentDir( buff, FILENAME_MAX ); const char string[] = "/examples/testdata/"; strcat(buff,string); strcat(buff,myMatName); printf("matrix "); printf(myMatName); printf(" read at : \n"); printf(buff); printf("\n"); if ((f = fopen(buff, "r")) == NULL) exit(1); if (mm_read_banner(f, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode) ) { printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0) exit(1); /* reseve memory for matrices */ K = (int *) malloc(nz * sizeof(int)); I = (int *) malloc(nz+1 * sizeof(int)); // +1 because we put the number of nz in the end J = (int *) malloc(nz * sizeof(int)); val = (double *) malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i=0; i<nz; i++) { fscanf(f, "%d %d %lg\n", &K[i], &J[i], &val[i]); K[i]; /* adjust from 1-based to 0-based */ J[i]; } I = K; I[nz] = M; //printf("I[nz] : %d\n",I[nz]); //printf("I[nz-1] : %d\n",I[nz-1]); //printf("J[nz-1] : %d\n",J[nz-1]); //printf("val[nz-1] : %20.19g\n",val[nz-1]); if (f !=stdin) fclose(f); /************************/ /* now write out matrix */ /************************/ mm_write_banner(stdout, matcode); mm_write_mtx_crd_size(stdout, M, N, nz); //for (i=0; i<nz; i++) // fprintf(stdout, "%d %d %20.19g\n", I[i], J[i], val[i]); // myMat myMat.I = I; myMat.J = J; myMat.M = M; myMat.N = N; myMat.nz = nz; myMat.val = val; return myMat; } int compute(MyCSRMat myMat1, MyCSRMat myMat2) { bool test = false; // TRUE => exemple | False => loaded matrix int N = 50000; // matrix generation and validation depends on these relationships: int SCL = 2; int K = N; int M = SCL*N; // A: MxK B: KxN C: MxN std::clock_t start; double duration, computeT; hipsparseStatus_t stat; hipsparseHandle_t hndl; hipsparseMatDescr_t descrA, descrB, descrC; int *csrRowPtrA, *csrRowPtrB, *csrRowPtrC, *csrColIndA, *csrColIndB, *csrColIndC; int *h_csrRowPtrA, *h_csrRowPtrB, *h_csrRowPtrC, *h_csrColIndA, *h_csrColIndB, *h_csrColIndC; float *csrValA, *csrValB, *csrValC, *h_csrValA, *h_csrValB, *h_csrValC; double *h_csrValCd; int nnzA, nnzB, nnzC; // number of non-zero int m,n,k; m = M; n = N; k = K; if (test){ /////////////////////////// STEP 1 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* generate A, B=2I */ /* A: |1.0 0.0 0.0 ...| |1.0 0.0 0.0 ...| |0.0 1.0 0.0 ...| |0.0 1.0 0.0 ...| |0.0 0.0 1.0 ...| |0.0 0.0 1.0 ...| ... B: |2.0 0.0 0.0 ...| |0.0 2.0 0.0 ...| |0.0 0.0 2.0 ...| ... */ /* -------------------------------------------------------------------------- */ start = std::clock(); nnzA = m; nnzB = n; h_csrRowPtrA = (int *)malloc((nnzA+1)*sizeof(int)); h_csrColIndA = (int *)malloc(nnzA*sizeof(int)); h_csrValA = (float *)malloc(nnzA*sizeof(float)); h_csrRowPtrB = (int *)malloc((nnzB+1)*sizeof(int)); h_csrColIndB = (int *)malloc(nnzB*sizeof(int)); h_csrValB = (float *)malloc(nnzB*sizeof(float)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Host Malloc : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); if ((h_csrRowPtrA == NULL) || (h_csrRowPtrB == NULL) || (h_csrColIndA == NULL) || (h_csrColIndB == NULL) || (h_csrValA == NULL) || (h_csrValB == NULL)) {printf("malloc fail\n"); return -1;} for (int i = 0; i < nnzA; i++){ h_csrValA[i] = 1.0f; h_csrRowPtrA[i] = i; h_csrColIndA[i] = i/SCL; if (i < nnzB){ h_csrValB[i] = 2.0f; h_csrRowPtrB[i] = i; h_csrColIndB[i] = i;} } h_csrRowPtrA[nnzA] = nnzA; h_csrRowPtrB[nnzB] = nnzB; duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("CSR Matrix Generation : %f ms\n", duration); } else{ nnzA = myMat1.nz; nnzB = myMat2.nz; } /////////////////////////// STEP 2 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Allocate memory on the device */ /* and return a ptr of its memory emplacement */ /* -------------------------------------------------------------------------- */ if (test){ start = std::clock(); hipMalloc(&csrRowPtrA, (m+1)*sizeof(int)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("First hipMalloc : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); hipMalloc(&csrColIndA, nnzA*sizeof(int)); hipMalloc(&csrValA, nnzA*sizeof(float)); hipMalloc(&csrRowPtrB, (nnzB+1)*sizeof(int)); hipMalloc(&csrColIndB, nnzB*sizeof(int)); hipMalloc(&csrValB, nnzB*sizeof(float)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("hipMalloc csrRowPtrB|csrColIndA/B|csrValA/B : %f ms\n", duration); } else { start = std::clock(); hipMalloc(&csrRowPtrA, (myMat1.nz+1)*sizeof(int)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("hipMalloc csrRowPtrA : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); hipMalloc(&csrColIndA, myMat1.nz*sizeof(int)); hipMalloc(&csrValA, myMat1.nz*sizeof(double)); hipMalloc(&csrRowPtrB, (myMat2.nz+1)*sizeof(int)); hipMalloc(&csrColIndB, myMat2.nz*sizeof(int)); hipMalloc(&csrValB, myMat2.nz*sizeof(double)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("hipMalloc csrRowPtrB|csrColIndA/B|csrValA/B : %f ms\n", duration); } /////////////////////////// STEP 3 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Copy the data from the Host (CPU) */ /* to the device (GPU) */ /* -------------------------------------------------------------------------- */ start = std::clock(); computeT = start; if (test){ cudaCheckErrors("hipMalloc fail"); hipMemcpy(csrRowPtrA, h_csrRowPtrA, (nnzA+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrColIndA, h_csrColIndA, nnzA*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrValA, h_csrValA, nnzA*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(csrRowPtrB, h_csrRowPtrB, (nnzB+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrColIndB, h_csrColIndB, nnzB*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrValB, h_csrValB, nnzB*sizeof(float), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy fail"); } else{ cudaCheckErrors("hipMalloc fail"); hipMemcpy(csrRowPtrA, myMat1.I, (myMat1.nz+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrColIndA, myMat1.J, myMat1.nz*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrValA, myMat1.val, myMat1.nz*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(csrRowPtrB, myMat2.I, (myMat2.nz+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrColIndB, myMat2.J, myMat2.nz*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(csrValB, myMat2.val, myMat2.nz*sizeof(double), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy fail"); //printf(myMat2.I); } duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Copy Data from Host to Device : %f ms\n", duration); /////////////////////////// STEP 4 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* set cusparse matrix types */ /* ????? */ /* -------------------------------------------------------------------------- */ start = std::clock(); CUSPARSE_CHECK(hipsparseCreate(&hndl)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("hipsparseCreate(&hndl) : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); stat = hipsparseCreateMatDescr(&descrA); CUSPARSE_CHECK(stat); stat = hipsparseCreateMatDescr(&descrB); CUSPARSE_CHECK(stat); stat = hipsparseCreateMatDescr(&descrC); CUSPARSE_CHECK(stat); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("hipsparseCreateMatDescr(&descrA/B/C) : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); stat = hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); CUSPARSE_CHECK(stat); stat = hipsparseSetMatType(descrB, HIPSPARSE_MATRIX_TYPE_GENERAL); CUSPARSE_CHECK(stat); stat = hipsparseSetMatType(descrC, HIPSPARSE_MATRIX_TYPE_GENERAL); CUSPARSE_CHECK(stat); stat = hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO); CUSPARSE_CHECK(stat); stat = hipsparseSetMatIndexBase(descrB, HIPSPARSE_INDEX_BASE_ZERO); CUSPARSE_CHECK(stat); stat = hipsparseSetMatIndexBase(descrC, HIPSPARSE_INDEX_BASE_ZERO); CUSPARSE_CHECK(stat); hipsparseOperation_t transA = HIPSPARSE_OPERATION_NON_TRANSPOSE; hipsparseOperation_t transB = HIPSPARSE_OPERATION_NON_TRANSPOSE; duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Set cusparse matrix types : %f ms\n", duration); /////////////////////////// STEP 5 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Determine csrRowPtrC */ /* & */ /* the total number of nonzero elements */ /* */ /* -------------------------------------------------------------------------- */ start = std::clock(); // figure out size of C int baseC; if (test){ // nnzTotalDevHostPtr points to host memory int *nnzTotalDevHostPtr = &nnzC; stat = hipsparseSetPointerMode(hndl, HIPSPARSE_POINTER_MODE_HOST); CUSPARSE_CHECK(stat); hipMalloc((void**)&csrRowPtrC, sizeof(int)*(m+1)); cudaCheckErrors("hipMalloc fail"); //------------------------------------------------------------------------------ // ???? stat = hipsparseXcsrgemmNnz(hndl, transA, transB, m, n, k, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, descrC, csrRowPtrC, nnzTotalDevHostPtr ); CUSPARSE_CHECK(stat); //------------------------------------------------------------------------------ // ???? if (NULL != nnzTotalDevHostPtr){ nnzC = *nnzTotalDevHostPtr;} else{ hipMemcpy(&nnzC, csrRowPtrC+m, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&baseC, csrRowPtrC, sizeof(int), hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); nnzC -= baseC;} hipMalloc((void**)&csrColIndC, sizeof(int)*nnzC); hipMalloc((void**)&csrValC, sizeof(float)*nnzC); cudaCheckErrors("hipMalloc fail"); } else{ // nnzTotalDevHostPtr points to host memory int *nnzTotalDevHostPtr = &nnzC; printf("INFO 1 NNZC: %i\n",nnzC); stat = hipsparseSetPointerMode(hndl, HIPSPARSE_POINTER_MODE_HOST); CUSPARSE_CHECK(stat); hipMalloc((void**)&csrRowPtrC, sizeof(int)*(myMat1.M+1)); cudaCheckErrors("hipMalloc fail"); //------------------------------------------------------------------------------ // Determine csrRowPtrC stat = hipsparseXcsrgemmNnz(hndl, transA, transB, myMat1.M, myMat2.N, myMat1.N, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, descrC, csrRowPtrC, nnzTotalDevHostPtr ); CUSPARSE_CHECK(stat); //------------------------------------------------------------------------------ // Gathers nnzC if (NULL != nnzTotalDevHostPtr){ nnzC = *nnzTotalDevHostPtr;} else { hipMemcpy(&nnzC, csrRowPtrC+myMat1.M, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&baseC, csrRowPtrC, sizeof(int), hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); nnzC -= baseC; } hipMalloc((void**)&csrColIndC, sizeof(int)*nnzC); hipMalloc((void**)&csrValC, sizeof(double)*nnzC); cudaCheckErrors("hipMalloc fail"); printf("INFO 2 NNZC: %i\n",nnzC); } duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("nnzTotalDevHostPtr points to host memory : %f ms\n", duration); /////////////////////////// STEP 6 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Perform multiplication C = A*B */ /* */ /* -------------------------------------------------------------------------- */ start = std::clock(); if (test){ stat = hipsparseScsrgemm(hndl, transA, transB, m, n, k, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC, csrValC, csrRowPtrC, csrColIndC); CUSPARSE_CHECK(stat); } else{ stat = hipsparseScsrgemm(hndl, transA, transB, myMat1.M, myMat2.N, myMat1.N, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC, csrValC, csrRowPtrC, csrColIndC); CUSPARSE_CHECK(stat); } duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("GPU calculation time : %f ms\n", duration); /////////////////////////// STEP 7 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Copy result (C) back to host */ /* & test & validate it */ /* -------------------------------------------------------------------------- */ start = std::clock(); if (test){ // copy result (C) back to host h_csrRowPtrC = (int *)malloc((m+1)*sizeof(int)); h_csrColIndC = (int *)malloc(nnzC *sizeof(int)); h_csrValC = (float *)malloc(nnzC *sizeof(float)); if ((h_csrRowPtrC == NULL) || (h_csrColIndC == NULL) || (h_csrValC == NULL)) {printf("malloc fail\n"); return -1;} hipMemcpy(h_csrRowPtrC, csrRowPtrC, (m+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_csrColIndC, csrColIndC, nnzC*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_csrValC, csrValC, nnzC*sizeof(float), hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Copy GPU to CPU : %f ms\n", duration); //------------------------------------------------------------------------------ // check result, C = 2A if (nnzC != m) {printf("invalid matrix size C: %d, should be: %d\n", nnzC, m); return -1;} for (int i = 0; i < m; i++){ if (h_csrRowPtrA[i] != h_csrRowPtrC[i]) {printf("A/C row ptr mismatch at %d, A: %d, C: %d\n", i, h_csrRowPtrA[i], h_csrRowPtrC[i]); return -1;} if (h_csrColIndA[i] != h_csrColIndC[i]) {printf("A/C col ind mismatch at %d, A: %d, C: %d\n", i, h_csrColIndA[i], h_csrColIndC[i]); return -1;} if ((h_csrValA[i]*2.0f) != h_csrValC[i]) {printf("A/C value mismatch at %d, A: %f, C: %f\n", i, h_csrValA[i]*2.0f, h_csrValC[i]); return -1;} } } else{ // copy result (C) back to host h_csrRowPtrC = (int *)malloc((myMat1.M+1)*sizeof(int)); h_csrColIndC = (int *)malloc(nnzC *sizeof(int)); h_csrValCd = (double *)malloc(nnzC *sizeof(double)); if ((h_csrRowPtrC == NULL) || (h_csrColIndC == NULL) || (h_csrValCd == NULL)) {printf("malloc fail\n"); return -1;} hipMemcpy(h_csrRowPtrC, csrRowPtrC, (myMat1.M+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_csrColIndC, csrColIndC, nnzC*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_csrValCd, csrValC, nnzC*sizeof(double), hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Copy GPU to CPU : %f ms\n", duration); //------------------------------------------------------------------------------ //if (nnzC != myMat1.M) {printf("invalid matrix size C: %d, should be: %d\n", nnzC, myMat1.M); return -1;} } duration = (( std::clock() - computeT ) / (double) CLOCKS_PER_SEC) * 1000; printf("===========================================================\n"); printf("RESULT PRODUCT INFO NZ: %i\n",nnzC); printf("REAL TIME TO COMPUTE : %f ms\n", duration); myMat3.I = h_csrRowPtrC; myMat3.J = h_csrColIndC; myMat3.M = myMat1.M+1; myMat3.N = nnzC; myMat3.nz = nnzC; myMat3.val = h_csrValCd; // for (int i=0; i<myMat3.nz; i++) // fprintf(stdout, "%d %d %d\n", myMat3.I[i], myMat3.J[i], myMat3.val[i]); return 0; } inline double exec_time_ms(double time_sc){ return time_sc*1000; } // perform sparse-matrix multiplication C=AxB int main(int argc, char *argv[]){ // int ret_code; // MM_typecode matcode; // FILE *f; // int nz; // int M, N; // int i, *K, *I, *J; // double *val; // if (argc < 3) // { // fprintf(stderr, "Usage: %s [martix-market-filename]\n", argv[0]); // exit(1); // } // myMat1 = initMatrix(myMat1,argv[1]); // myMat2 = initMatrix(myMat2,argv[2]); // //printf("TEST :\n"); // //printf("I[nz] : %d\n",myMat1.I[myMat1.nz]); // //printf("I[nz-1] : %d\n",myMat1.I[myMat1.nz-1]); // //printf("J[nz-1] : %d\n",myMat1.J[myMat1.nz-1]); // //printf("val[nz-1] : %20.19g\n",myMat1.val[myMat1.nz-1]); // //printf("I[nz] : %d\n",myMat2.I[myMat2.nz]); // //printf("I[nz-1] : %d\n",myMat2.I[myMat2.nz-1]); // //printf("J[nz-1] : %d\n",myMat2.J[myMat2.nz-1]); // //printf("val[nz-1] : %20.19g\n",myMat2.val[myMat2.nz-1]); // std::clock_t start; // double duration; // printf("---------- COMPUTE K * K ---------------------\n\n"); // start = std::clock(); // compute(myMat1,myMat2); // duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; // printf("COMPLETE PROCESS 1 TIME : %f ms\n", duration); // printf("\n-----------------------------------------------------------\n\n"); // start = std::clock(); // compute(myMat1,myMat2); // duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; // printf("COMPLETE PROCESS 2 TIME : %f ms\n", duration); // return 0; /////////////////////////////////////////// /////////////////////////////////////////// char buff[FILENAME_MAX]; GetCurrentDir( buff, FILENAME_MAX ); std::string testdata_dir(buff); testdata_dir.append("/examples/testdata"); std::string myMatrix1 = testdata_dir; std::string myMatrix2 = testdata_dir; myMatrix1.append("/matKeig.mtx"); myMatrix2.append("/matJ1eig.mtx"); bool info = true; int benchmarkNbrRun = 100; typedef double ScalarType; viennacl::tools::timer timer; double exec_time_read,exec_time_read_ublas,exec_time_copy,exec_time; int sizeK = 46659; //20000;// int sizeJ = 31; //30;// /////////////////////////// STEP 1 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Init all the variables */ /* -------------------------------------------------------------------------- */ //////////////////////////////////////////////////////////////////////////////// /// VIENNACL SPARSE MATRIX viennacl::compressed_matrix<ScalarType> vcl_compressed_JtKJ; viennacl::compressed_matrix<ScalarType> vcl_compressed_K; viennacl::compressed_matrix<ScalarType> vcl_compressed_KJ(sizeK,sizeJ); viennacl::compressed_matrix<ScalarType> vcl_compressed_J; viennacl::compressed_matrix<ScalarType> vcl_compressed_Jt; //////////////////////////////////////////////////////////////////////////////// /// UBLAS SPARSE MATRIX boost::numeric::ublas::compressed_matrix<ScalarType> ublas_K(sizeK,sizeK); boost::numeric::ublas::compressed_matrix<ScalarType> ublas_J(sizeK,sizeJ); boost::numeric::ublas::compressed_matrix<ScalarType> ublas_Jt(sizeJ,sizeK); /////////////////////////// STEP 2 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Read from file the sparse matrix */ /* (in the Matrix Market format) */ /* -------------------------------------------------------------------------- */ //////////////////////////////////////////////////////////////////////////////// /// WITH UBLAS timer.start(); if (!viennacl::io::read_matrix_market_file(ublas_K, myMatrix1)) { std::cout << "Error reading Matrix file" << std::endl; return 0; } //unsigned int cg_mat_size = cg_mat.size(); std::cout << "done reading K" << std::endl; if (!viennacl::io::read_matrix_market_file(ublas_J, myMatrix2)) { std::cout << "Error reading Matrix file" << std::endl; return 0; } //unsigned int cg_mat_size = cg_mat.size(); std::cout << "done reading J" << std::endl; exec_time_read_ublas = timer.get(); /////////////////////////// STEP 3 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Copy matrix from CPU to GPU */ /* -------------------------------------------------------------------------- */ timer.start(); // With UBlas Matrix ublas_Jt = boost::numeric::ublas::trans(ublas_J); viennacl::copy(ublas_J, vcl_compressed_J); viennacl::copy(ublas_Jt, vcl_compressed_Jt); timer.start(); viennacl::copy(ublas_K, vcl_compressed_K); exec_time_copy = timer.get(); if (info) { std::cout << "\n" <<" ublas_K (rows,cols) : "<< ublas_K.size1() << " " << ublas_K.size2() << std::endl; std::cout << " ublas_J (rows,cols) : "<< ublas_J.size1() << " " << ublas_J.size2() << std::endl; std::cout << " ublas_Jt (rows,cols) : "<< ublas_Jt.size1() << " " << ublas_Jt.size2() << std::endl; std::cout << " ----------------------------------------------\n" << std::endl; std::cout << " Time to copy CPU->GPU : " << exec_time_ms(exec_time_copy) << " ms\n" << std::endl; } /////////////////////////// STEP 4 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Matrix operations */ /* Jt * K J */ /* -------------------------------------------------------------------------- */ // On GPU with ViennaCL Compressed Matrix std::cout << " ------- Jt*K*J product on GPU ----------" << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<benchmarkNbrRun; ++runs) { vcl_compressed_KJ = viennacl::linalg::prod(vcl_compressed_K,vcl_compressed_J); vcl_compressed_JtKJ = viennacl::linalg::prod(vcl_compressed_Jt,vcl_compressed_KJ); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << " GPU time align1: " << exec_time_ms(exec_time) << " ms\n"<< std::endl; return 0; }
d63314a618d79171356dc9818f5a4f622d9f4e9f.cu
#include <cusparse_v2.h> #include <stdio.h> #include <stdlib.h> #include <ctime> #ifdef WINDOWS #include <direct.h> #define GetCurrentDir _getcwd #else #include <unistd.h> #define GetCurrentDir getcwd #endif extern "C" { #include "component/libraries/mmio.h" } /////////////////////////////////////////// #include <boost/numeric/ublas/triangular.hpp> #include <boost/numeric/ublas/vector.hpp> #include <boost/numeric/ublas/vector_proxy.hpp> #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/numeric/ublas/operation_sparse.hpp> #include <boost/numeric/ublas/lu.hpp> #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" #include "viennacl/tools/timer.hpp" #include "viennacl/coordinate_matrix.hpp" #include "viennacl/compressed_matrix.hpp" #include "viennacl/ell_matrix.hpp" #include "viennacl/hyb_matrix.hpp" #include "viennacl/sliced_ell_matrix.hpp" #include "viennacl/linalg/prod.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/io/matrix_market.hpp" /////////////////////////////////////////// struct MyCSRMat { int * I; // ROW INDICES OF NZ int * J; // COLUMN INDICES OF NZ double * val; // VALUES OF NZ int nz; // NON-ZERO int M; // ROW int N; // COLUMN }myMat1,myMat2,myMat3; // error check macros #define CUSPARSE_CHECK(x) {cusparseStatus_t _c=x; if (_c != CUSPARSE_STATUS_SUCCESS) {printf("cusparse fail: %d, line: %d\n", (int)_c, __LINE__); exit(-1);}} #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) char* appendCharToCharArray(char* array, char a) { size_t len = strlen(array); char* ret = new char[len+2]; strcpy(ret, array); ret[len] = a; ret[len+1] = '\0'; return ret; } MyCSRMat initMatrix(MyCSRMat myMat, const char * myMatName) { int ret_code; MM_typecode matcode; FILE *f; int nz; int M, N; int i, *K, *I, *J; double *val; char buff[FILENAME_MAX]; GetCurrentDir( buff, FILENAME_MAX ); const char string[] = "/examples/testdata/"; strcat(buff,string); strcat(buff,myMatName); printf("matrix "); printf(myMatName); printf(" read at : \n"); printf(buff); printf("\n"); if ((f = fopen(buff, "r")) == NULL) exit(1); if (mm_read_banner(f, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode) ) { printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0) exit(1); /* reseve memory for matrices */ K = (int *) malloc(nz * sizeof(int)); I = (int *) malloc(nz+1 * sizeof(int)); // +1 because we put the number of nz in the end J = (int *) malloc(nz * sizeof(int)); val = (double *) malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i=0; i<nz; i++) { fscanf(f, "%d %d %lg\n", &K[i], &J[i], &val[i]); K[i]; /* adjust from 1-based to 0-based */ J[i]; } I = K; I[nz] = M; //printf("I[nz] : %d\n",I[nz]); //printf("I[nz-1] : %d\n",I[nz-1]); //printf("J[nz-1] : %d\n",J[nz-1]); //printf("val[nz-1] : %20.19g\n",val[nz-1]); if (f !=stdin) fclose(f); /************************/ /* now write out matrix */ /************************/ mm_write_banner(stdout, matcode); mm_write_mtx_crd_size(stdout, M, N, nz); //for (i=0; i<nz; i++) // fprintf(stdout, "%d %d %20.19g\n", I[i], J[i], val[i]); // myMat myMat.I = I; myMat.J = J; myMat.M = M; myMat.N = N; myMat.nz = nz; myMat.val = val; return myMat; } int compute(MyCSRMat myMat1, MyCSRMat myMat2) { bool test = false; // TRUE => exemple | False => loaded matrix int N = 50000; // matrix generation and validation depends on these relationships: int SCL = 2; int K = N; int M = SCL*N; // A: MxK B: KxN C: MxN std::clock_t start; double duration, computeT; cusparseStatus_t stat; cusparseHandle_t hndl; cusparseMatDescr_t descrA, descrB, descrC; int *csrRowPtrA, *csrRowPtrB, *csrRowPtrC, *csrColIndA, *csrColIndB, *csrColIndC; int *h_csrRowPtrA, *h_csrRowPtrB, *h_csrRowPtrC, *h_csrColIndA, *h_csrColIndB, *h_csrColIndC; float *csrValA, *csrValB, *csrValC, *h_csrValA, *h_csrValB, *h_csrValC; double *h_csrValCd; int nnzA, nnzB, nnzC; // number of non-zero int m,n,k; m = M; n = N; k = K; if (test){ /////////////////////////// STEP 1 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* generate A, B=2I */ /* A: |1.0 0.0 0.0 ...| |1.0 0.0 0.0 ...| |0.0 1.0 0.0 ...| |0.0 1.0 0.0 ...| |0.0 0.0 1.0 ...| |0.0 0.0 1.0 ...| ... B: |2.0 0.0 0.0 ...| |0.0 2.0 0.0 ...| |0.0 0.0 2.0 ...| ... */ /* -------------------------------------------------------------------------- */ start = std::clock(); nnzA = m; nnzB = n; h_csrRowPtrA = (int *)malloc((nnzA+1)*sizeof(int)); h_csrColIndA = (int *)malloc(nnzA*sizeof(int)); h_csrValA = (float *)malloc(nnzA*sizeof(float)); h_csrRowPtrB = (int *)malloc((nnzB+1)*sizeof(int)); h_csrColIndB = (int *)malloc(nnzB*sizeof(int)); h_csrValB = (float *)malloc(nnzB*sizeof(float)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Host Malloc : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); if ((h_csrRowPtrA == NULL) || (h_csrRowPtrB == NULL) || (h_csrColIndA == NULL) || (h_csrColIndB == NULL) || (h_csrValA == NULL) || (h_csrValB == NULL)) {printf("malloc fail\n"); return -1;} for (int i = 0; i < nnzA; i++){ h_csrValA[i] = 1.0f; h_csrRowPtrA[i] = i; h_csrColIndA[i] = i/SCL; if (i < nnzB){ h_csrValB[i] = 2.0f; h_csrRowPtrB[i] = i; h_csrColIndB[i] = i;} } h_csrRowPtrA[nnzA] = nnzA; h_csrRowPtrB[nnzB] = nnzB; duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("CSR Matrix Generation : %f ms\n", duration); } else{ nnzA = myMat1.nz; nnzB = myMat2.nz; } /////////////////////////// STEP 2 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Allocate memory on the device */ /* and return a ptr of its memory emplacement */ /* -------------------------------------------------------------------------- */ if (test){ start = std::clock(); cudaMalloc(&csrRowPtrA, (m+1)*sizeof(int)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("First cudaMalloc : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); cudaMalloc(&csrColIndA, nnzA*sizeof(int)); cudaMalloc(&csrValA, nnzA*sizeof(float)); cudaMalloc(&csrRowPtrB, (nnzB+1)*sizeof(int)); cudaMalloc(&csrColIndB, nnzB*sizeof(int)); cudaMalloc(&csrValB, nnzB*sizeof(float)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("cudaMalloc csrRowPtrB|csrColIndA/B|csrValA/B : %f ms\n", duration); } else { start = std::clock(); cudaMalloc(&csrRowPtrA, (myMat1.nz+1)*sizeof(int)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("cudaMalloc csrRowPtrA : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); cudaMalloc(&csrColIndA, myMat1.nz*sizeof(int)); cudaMalloc(&csrValA, myMat1.nz*sizeof(double)); cudaMalloc(&csrRowPtrB, (myMat2.nz+1)*sizeof(int)); cudaMalloc(&csrColIndB, myMat2.nz*sizeof(int)); cudaMalloc(&csrValB, myMat2.nz*sizeof(double)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("cudaMalloc csrRowPtrB|csrColIndA/B|csrValA/B : %f ms\n", duration); } /////////////////////////// STEP 3 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Copy the data from the Host (CPU) */ /* to the device (GPU) */ /* -------------------------------------------------------------------------- */ start = std::clock(); computeT = start; if (test){ cudaCheckErrors("cudaMalloc fail"); cudaMemcpy(csrRowPtrA, h_csrRowPtrA, (nnzA+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrColIndA, h_csrColIndA, nnzA*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrValA, h_csrValA, nnzA*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(csrRowPtrB, h_csrRowPtrB, (nnzB+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrColIndB, h_csrColIndB, nnzB*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrValB, h_csrValB, nnzB*sizeof(float), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy fail"); } else{ cudaCheckErrors("cudaMalloc fail"); cudaMemcpy(csrRowPtrA, myMat1.I, (myMat1.nz+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrColIndA, myMat1.J, myMat1.nz*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrValA, myMat1.val, myMat1.nz*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(csrRowPtrB, myMat2.I, (myMat2.nz+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrColIndB, myMat2.J, myMat2.nz*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(csrValB, myMat2.val, myMat2.nz*sizeof(double), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy fail"); //printf(myMat2.I); } duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Copy Data from Host to Device : %f ms\n", duration); /////////////////////////// STEP 4 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* set cusparse matrix types */ /* ????? */ /* -------------------------------------------------------------------------- */ start = std::clock(); CUSPARSE_CHECK(cusparseCreate(&hndl)); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("cusparseCreate(&hndl) : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); stat = cusparseCreateMatDescr(&descrA); CUSPARSE_CHECK(stat); stat = cusparseCreateMatDescr(&descrB); CUSPARSE_CHECK(stat); stat = cusparseCreateMatDescr(&descrC); CUSPARSE_CHECK(stat); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("cusparseCreateMatDescr(&descrA/B/C) : %f ms\n", duration); //------------------------------------------------------------------------------ start = std::clock(); stat = cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); CUSPARSE_CHECK(stat); stat = cusparseSetMatType(descrB, CUSPARSE_MATRIX_TYPE_GENERAL); CUSPARSE_CHECK(stat); stat = cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL); CUSPARSE_CHECK(stat); stat = cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO); CUSPARSE_CHECK(stat); stat = cusparseSetMatIndexBase(descrB, CUSPARSE_INDEX_BASE_ZERO); CUSPARSE_CHECK(stat); stat = cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ZERO); CUSPARSE_CHECK(stat); cusparseOperation_t transA = CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseOperation_t transB = CUSPARSE_OPERATION_NON_TRANSPOSE; duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Set cusparse matrix types : %f ms\n", duration); /////////////////////////// STEP 5 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Determine csrRowPtrC */ /* & */ /* the total number of nonzero elements */ /* */ /* -------------------------------------------------------------------------- */ start = std::clock(); // figure out size of C int baseC; if (test){ // nnzTotalDevHostPtr points to host memory int *nnzTotalDevHostPtr = &nnzC; stat = cusparseSetPointerMode(hndl, CUSPARSE_POINTER_MODE_HOST); CUSPARSE_CHECK(stat); cudaMalloc((void**)&csrRowPtrC, sizeof(int)*(m+1)); cudaCheckErrors("cudaMalloc fail"); //------------------------------------------------------------------------------ // ???? stat = cusparseXcsrgemmNnz(hndl, transA, transB, m, n, k, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, descrC, csrRowPtrC, nnzTotalDevHostPtr ); CUSPARSE_CHECK(stat); //------------------------------------------------------------------------------ // ???? if (NULL != nnzTotalDevHostPtr){ nnzC = *nnzTotalDevHostPtr;} else{ cudaMemcpy(&nnzC, csrRowPtrC+m, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&baseC, csrRowPtrC, sizeof(int), cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); nnzC -= baseC;} cudaMalloc((void**)&csrColIndC, sizeof(int)*nnzC); cudaMalloc((void**)&csrValC, sizeof(float)*nnzC); cudaCheckErrors("cudaMalloc fail"); } else{ // nnzTotalDevHostPtr points to host memory int *nnzTotalDevHostPtr = &nnzC; printf("INFO 1 NNZC: %i\n",nnzC); stat = cusparseSetPointerMode(hndl, CUSPARSE_POINTER_MODE_HOST); CUSPARSE_CHECK(stat); cudaMalloc((void**)&csrRowPtrC, sizeof(int)*(myMat1.M+1)); cudaCheckErrors("cudaMalloc fail"); //------------------------------------------------------------------------------ // Determine csrRowPtrC stat = cusparseXcsrgemmNnz(hndl, transA, transB, myMat1.M, myMat2.N, myMat1.N, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, descrC, csrRowPtrC, nnzTotalDevHostPtr ); CUSPARSE_CHECK(stat); //------------------------------------------------------------------------------ // Gathers nnzC if (NULL != nnzTotalDevHostPtr){ nnzC = *nnzTotalDevHostPtr;} else { cudaMemcpy(&nnzC, csrRowPtrC+myMat1.M, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&baseC, csrRowPtrC, sizeof(int), cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); nnzC -= baseC; } cudaMalloc((void**)&csrColIndC, sizeof(int)*nnzC); cudaMalloc((void**)&csrValC, sizeof(double)*nnzC); cudaCheckErrors("cudaMalloc fail"); printf("INFO 2 NNZC: %i\n",nnzC); } duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("nnzTotalDevHostPtr points to host memory : %f ms\n", duration); /////////////////////////// STEP 6 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Perform multiplication C = A*B */ /* */ /* -------------------------------------------------------------------------- */ start = std::clock(); if (test){ stat = cusparseScsrgemm(hndl, transA, transB, m, n, k, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC, csrValC, csrRowPtrC, csrColIndC); CUSPARSE_CHECK(stat); } else{ stat = cusparseScsrgemm(hndl, transA, transB, myMat1.M, myMat2.N, myMat1.N, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC, csrValC, csrRowPtrC, csrColIndC); CUSPARSE_CHECK(stat); } duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("GPU calculation time : %f ms\n", duration); /////////////////////////// STEP 7 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Copy result (C) back to host */ /* & test & validate it */ /* -------------------------------------------------------------------------- */ start = std::clock(); if (test){ // copy result (C) back to host h_csrRowPtrC = (int *)malloc((m+1)*sizeof(int)); h_csrColIndC = (int *)malloc(nnzC *sizeof(int)); h_csrValC = (float *)malloc(nnzC *sizeof(float)); if ((h_csrRowPtrC == NULL) || (h_csrColIndC == NULL) || (h_csrValC == NULL)) {printf("malloc fail\n"); return -1;} cudaMemcpy(h_csrRowPtrC, csrRowPtrC, (m+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_csrColIndC, csrColIndC, nnzC*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_csrValC, csrValC, nnzC*sizeof(float), cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Copy GPU to CPU : %f ms\n", duration); //------------------------------------------------------------------------------ // check result, C = 2A if (nnzC != m) {printf("invalid matrix size C: %d, should be: %d\n", nnzC, m); return -1;} for (int i = 0; i < m; i++){ if (h_csrRowPtrA[i] != h_csrRowPtrC[i]) {printf("A/C row ptr mismatch at %d, A: %d, C: %d\n", i, h_csrRowPtrA[i], h_csrRowPtrC[i]); return -1;} if (h_csrColIndA[i] != h_csrColIndC[i]) {printf("A/C col ind mismatch at %d, A: %d, C: %d\n", i, h_csrColIndA[i], h_csrColIndC[i]); return -1;} if ((h_csrValA[i]*2.0f) != h_csrValC[i]) {printf("A/C value mismatch at %d, A: %f, C: %f\n", i, h_csrValA[i]*2.0f, h_csrValC[i]); return -1;} } } else{ // copy result (C) back to host h_csrRowPtrC = (int *)malloc((myMat1.M+1)*sizeof(int)); h_csrColIndC = (int *)malloc(nnzC *sizeof(int)); h_csrValCd = (double *)malloc(nnzC *sizeof(double)); if ((h_csrRowPtrC == NULL) || (h_csrColIndC == NULL) || (h_csrValCd == NULL)) {printf("malloc fail\n"); return -1;} cudaMemcpy(h_csrRowPtrC, csrRowPtrC, (myMat1.M+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_csrColIndC, csrColIndC, nnzC*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_csrValCd, csrValC, nnzC*sizeof(double), cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; printf("Copy GPU to CPU : %f ms\n", duration); //------------------------------------------------------------------------------ //if (nnzC != myMat1.M) {printf("invalid matrix size C: %d, should be: %d\n", nnzC, myMat1.M); return -1;} } duration = (( std::clock() - computeT ) / (double) CLOCKS_PER_SEC) * 1000; printf("===========================================================\n"); printf("RESULT PRODUCT INFO NZ: %i\n",nnzC); printf("REAL TIME TO COMPUTE : %f ms\n", duration); myMat3.I = h_csrRowPtrC; myMat3.J = h_csrColIndC; myMat3.M = myMat1.M+1; myMat3.N = nnzC; myMat3.nz = nnzC; myMat3.val = h_csrValCd; // for (int i=0; i<myMat3.nz; i++) // fprintf(stdout, "%d %d %d\n", myMat3.I[i], myMat3.J[i], myMat3.val[i]); return 0; } inline double exec_time_ms(double time_sc){ return time_sc*1000; } // perform sparse-matrix multiplication C=AxB int main(int argc, char *argv[]){ // int ret_code; // MM_typecode matcode; // FILE *f; // int nz; // int M, N; // int i, *K, *I, *J; // double *val; // if (argc < 3) // { // fprintf(stderr, "Usage: %s [martix-market-filename]\n", argv[0]); // exit(1); // } // myMat1 = initMatrix(myMat1,argv[1]); // myMat2 = initMatrix(myMat2,argv[2]); // //printf("TEST :\n"); // //printf("I[nz] : %d\n",myMat1.I[myMat1.nz]); // //printf("I[nz-1] : %d\n",myMat1.I[myMat1.nz-1]); // //printf("J[nz-1] : %d\n",myMat1.J[myMat1.nz-1]); // //printf("val[nz-1] : %20.19g\n",myMat1.val[myMat1.nz-1]); // //printf("I[nz] : %d\n",myMat2.I[myMat2.nz]); // //printf("I[nz-1] : %d\n",myMat2.I[myMat2.nz-1]); // //printf("J[nz-1] : %d\n",myMat2.J[myMat2.nz-1]); // //printf("val[nz-1] : %20.19g\n",myMat2.val[myMat2.nz-1]); // std::clock_t start; // double duration; // printf("---------- COMPUTE K * K ---------------------\n\n"); // start = std::clock(); // compute(myMat1,myMat2); // duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; // printf("COMPLETE PROCESS 1 TIME : %f ms\n", duration); // printf("\n-----------------------------------------------------------\n\n"); // start = std::clock(); // compute(myMat1,myMat2); // duration = (( std::clock() - start ) / (double) CLOCKS_PER_SEC) * 1000; // printf("COMPLETE PROCESS 2 TIME : %f ms\n", duration); // return 0; /////////////////////////////////////////// /////////////////////////////////////////// char buff[FILENAME_MAX]; GetCurrentDir( buff, FILENAME_MAX ); std::string testdata_dir(buff); testdata_dir.append("/examples/testdata"); std::string myMatrix1 = testdata_dir; std::string myMatrix2 = testdata_dir; myMatrix1.append("/matKeig.mtx"); myMatrix2.append("/matJ1eig.mtx"); bool info = true; int benchmarkNbrRun = 100; typedef double ScalarType; viennacl::tools::timer timer; double exec_time_read,exec_time_read_ublas,exec_time_copy,exec_time; int sizeK = 46659; //20000;// int sizeJ = 31; //30;// /////////////////////////// STEP 1 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Init all the variables */ /* -------------------------------------------------------------------------- */ //////////////////////////////////////////////////////////////////////////////// /// VIENNACL SPARSE MATRIX viennacl::compressed_matrix<ScalarType> vcl_compressed_JtKJ; viennacl::compressed_matrix<ScalarType> vcl_compressed_K; viennacl::compressed_matrix<ScalarType> vcl_compressed_KJ(sizeK,sizeJ); viennacl::compressed_matrix<ScalarType> vcl_compressed_J; viennacl::compressed_matrix<ScalarType> vcl_compressed_Jt; //////////////////////////////////////////////////////////////////////////////// /// UBLAS SPARSE MATRIX boost::numeric::ublas::compressed_matrix<ScalarType> ublas_K(sizeK,sizeK); boost::numeric::ublas::compressed_matrix<ScalarType> ublas_J(sizeK,sizeJ); boost::numeric::ublas::compressed_matrix<ScalarType> ublas_Jt(sizeJ,sizeK); /////////////////////////// STEP 2 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Read from file the sparse matrix */ /* (in the Matrix Market format) */ /* -------------------------------------------------------------------------- */ //////////////////////////////////////////////////////////////////////////////// /// WITH UBLAS timer.start(); if (!viennacl::io::read_matrix_market_file(ublas_K, myMatrix1)) { std::cout << "Error reading Matrix file" << std::endl; return 0; } //unsigned int cg_mat_size = cg_mat.size(); std::cout << "done reading K" << std::endl; if (!viennacl::io::read_matrix_market_file(ublas_J, myMatrix2)) { std::cout << "Error reading Matrix file" << std::endl; return 0; } //unsigned int cg_mat_size = cg_mat.size(); std::cout << "done reading J" << std::endl; exec_time_read_ublas = timer.get(); /////////////////////////// STEP 3 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Copy matrix from CPU to GPU */ /* -------------------------------------------------------------------------- */ timer.start(); // With UBlas Matrix ublas_Jt = boost::numeric::ublas::trans(ublas_J); viennacl::copy(ublas_J, vcl_compressed_J); viennacl::copy(ublas_Jt, vcl_compressed_Jt); timer.start(); viennacl::copy(ublas_K, vcl_compressed_K); exec_time_copy = timer.get(); if (info) { std::cout << "\n" <<" ublas_K (rows,cols) : "<< ublas_K.size1() << " " << ublas_K.size2() << std::endl; std::cout << " ublas_J (rows,cols) : "<< ublas_J.size1() << " " << ublas_J.size2() << std::endl; std::cout << " ublas_Jt (rows,cols) : "<< ublas_Jt.size1() << " " << ublas_Jt.size2() << std::endl; std::cout << " ----------------------------------------------\n" << std::endl; std::cout << " Time to copy CPU->GPU : " << exec_time_ms(exec_time_copy) << " ms\n" << std::endl; } /////////////////////////// STEP 4 //////////////////////////////////// /* -------------------------------------------------------------------------- */ /* Matrix operations */ /* Jt * K J */ /* -------------------------------------------------------------------------- */ // On GPU with ViennaCL Compressed Matrix std::cout << " ------- Jt*K*J product on GPU ----------" << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<benchmarkNbrRun; ++runs) { vcl_compressed_KJ = viennacl::linalg::prod(vcl_compressed_K,vcl_compressed_J); vcl_compressed_JtKJ = viennacl::linalg::prod(vcl_compressed_Jt,vcl_compressed_KJ); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << " GPU time align1: " << exec_time_ms(exec_time) << " ms\n"<< std::endl; return 0; }
d66ea9a86ed768d9c0743be3a7deb9317db9c28f.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zgeisai_8.cu, normal z -> s, Sun Nov 20 20:20:43 2016 */ #include "magmasparse_internal.h" #define PRECISION_s #define REAL #define BLOCKSIZE 8 #define WARP_SIZE 8 #define WRP 8 #define WRQ 4 #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION >= 7000) __device__ void strsv_lower_8kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_upper_8kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_lower_8kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_lower_8kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_lower_8kernel_1( dA, dB ); break; case 2: strsv_lower_8kernel_2( dA, dB ); break; case 3: strsv_lower_8kernel_3( dA, dB ); break; case 4: strsv_lower_8kernel_4( dA, dB ); break; case 5: strsv_lower_8kernel_5( dA, dB ); break; case 6: strsv_lower_8kernel_6( dA, dB ); break; case 7: strsv_lower_8kernel_7( dA, dB ); break; case 8: strsv_lower_8kernel_8( dA, dB ); break; default: strsv_lower_8kernel_general( dA, dB, sizes ); break; } } } __device__ void strsv_upper_8kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_upper_8kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_upper_8kernel_1( dA, dB ); break; case 2: strsv_upper_8kernel_2( dA, dB ); break; case 3: strsv_upper_8kernel_3( dA, dB ); break; case 4: strsv_upper_8kernel_4( dA, dB ); break; case 5: strsv_upper_8kernel_5( dA, dB ); break; case 6: strsv_upper_8kernel_6( dA, dB ); break; case 7: strsv_upper_8kernel_7( dA, dB ); break; case 8: strsv_upper_8kernel_8( dA, dB ); break; default: strsv_upper_8kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_sgpumemzero_8kernel( float * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_S_MAKE( 0.0, 0.0 ); } __global__ void magma_slocations_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_slocations_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_sfilltrisystems_8kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_sbackinsert_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel #endif /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_s_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_s_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems float* trisystems @param[out] rhs float* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_sisaigenerator_8_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_s_matrix L, magma_s_matrix *M, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (TORCH_HIP_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( float( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { hipLaunchKernelGGL(( magma_sgpumemzero_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { hipLaunchKernelGGL(( magma_slocations_lower_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { hipLaunchKernelGGL(( magma_slocations_upper_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } // chunk it recursively into batches of 800 for( int z=0; z<recursive; z++ ){ int limit = min(32000, L.num_rows-32000*z); hipLaunchKernelGGL(( magma_sgpumemzero_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, limit, WARP_SIZE, WARP_SIZE ); hipLaunchKernelGGL(( magma_sfilltrisystems_8kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() , 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { hipLaunchKernelGGL(( strsv_lower_8kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } else { hipLaunchKernelGGL(( strsv_upper_8kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } } // routine 3 hipLaunchKernelGGL(( magma_sbackinsert_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
d66ea9a86ed768d9c0743be3a7deb9317db9c28f.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zgeisai_8.cu, normal z -> s, Sun Nov 20 20:20:43 2016 */ #include "magmasparse_internal.h" #define PRECISION_s #define REAL #define BLOCKSIZE 8 #define WARP_SIZE 8 #define WRP 8 #define WRQ 4 #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION >= 7000) __device__ void strsv_lower_8kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_upper_8kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_lower_8kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_8kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_lower_8kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_lower_8kernel_1( dA, dB ); break; case 2: strsv_lower_8kernel_2( dA, dB ); break; case 3: strsv_lower_8kernel_3( dA, dB ); break; case 4: strsv_lower_8kernel_4( dA, dB ); break; case 5: strsv_lower_8kernel_5( dA, dB ); break; case 6: strsv_lower_8kernel_6( dA, dB ); break; case 7: strsv_lower_8kernel_7( dA, dB ); break; case 8: strsv_lower_8kernel_8( dA, dB ); break; default: strsv_lower_8kernel_general( dA, dB, sizes ); break; } } } __device__ void strsv_upper_8kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_8kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_upper_8kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_upper_8kernel_1( dA, dB ); break; case 2: strsv_upper_8kernel_2( dA, dB ); break; case 3: strsv_upper_8kernel_3( dA, dB ); break; case 4: strsv_upper_8kernel_4( dA, dB ); break; case 5: strsv_upper_8kernel_5( dA, dB ); break; case 6: strsv_upper_8kernel_6( dA, dB ); break; case 7: strsv_upper_8kernel_7( dA, dB ); break; case 8: strsv_upper_8kernel_8( dA, dB ); break; default: strsv_upper_8kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_sgpumemzero_8kernel( float * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_S_MAKE( 0.0, 0.0 ); } __global__ void magma_slocations_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_slocations_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_sfilltrisystems_8kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_sbackinsert_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel #endif /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_s_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_s_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems float* trisystems @param[out] rhs float* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_sisaigenerator_8_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_s_matrix L, magma_s_matrix *M, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (CUDA_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( float( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { magma_sgpumemzero_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { magma_slocations_lower_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { magma_slocations_upper_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } // chunk it recursively into batches of 800 for( int z=0; z<recursive; z++ ){ int limit = min(32000, L.num_rows-32000*z); magma_sgpumemzero_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, limit, WARP_SIZE, WARP_SIZE ); magma_sfilltrisystems_8kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>( 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { strsv_lower_8kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } else { strsv_upper_8kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } } // routine 3 magma_sbackinsert_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
79b28df7960c96c61bf2fab3912fec7c441a0d05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/copy.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> template<typename T> struct is_even { __host__ __device__ bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; } }; template<typename T> struct mod_3 { __host__ __device__ unsigned int operator()(T x) { return static_cast<unsigned int>(x) % 3; } }; template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void copy_if_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result1, Predicate pred, Iterator3 result2) { *result2 = thrust::copy_if(exec, first, last, result1, pred); } template<typename ExecutionPolicy> void TestCopyIfDevice(ExecutionPolicy exec) { size_t n = 1000; thrust::host_vector<int> h_data = unittest::random_integers<int>(n); thrust::device_vector<int> d_data = h_data; typename thrust::host_vector<int>::iterator h_new_end; typename thrust::device_vector<int>::iterator d_new_end; thrust::device_vector< typename thrust::device_vector<int>::iterator > d_new_end_vec(1); // test with Predicate that returns a bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<int>()); hipLaunchKernelGGL(( copy_if_kernel), dim3(1),dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), d_result.begin(), is_even<int>(), d_new_end_vec.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } // test with Predicate that returns a non-bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<int>()); hipLaunchKernelGGL(( copy_if_kernel), dim3(1),dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), d_result.begin(), mod_3<int>(), d_new_end_vec.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } } void TestCopyIfDeviceSeq() { TestCopyIfDevice(thrust::seq); } DECLARE_UNITTEST(TestCopyIfDeviceSeq); void TestCopyIfDeviceDevice() { TestCopyIfDevice(thrust::device); } DECLARE_UNITTEST(TestCopyIfDeviceDevice); void TestCopyIfCudaStreams() { typedef thrust::device_vector<int> Vector; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 3; data[4] = 2; Vector result(5); hipStream_t s; hipStreamCreate(&s); Vector::iterator end = thrust::copy_if(thrust::hip::par.on(s), data.begin(), data.end(), result.begin(), is_even<int>()); ASSERT_EQUAL(end - result.begin(), 2); ASSERT_EQUAL(result[0], 2); ASSERT_EQUAL(result[1], 2); hipStreamDestroy(s); } DECLARE_UNITTEST(TestCopyIfCudaStreams); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void copy_if_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 result1, Predicate pred, Iterator4 result2) { *result2 = thrust::copy_if(exec, first, last, stencil_first, result1, pred); } template<typename ExecutionPolicy> void TestCopyIfStencilDevice(ExecutionPolicy exec) { size_t n = 1000; thrust::host_vector<int> h_data(n); thrust::sequence(h_data.begin(), h_data.end()); thrust::device_vector<int> d_data(n); thrust::sequence(d_data.begin(), d_data.end()); thrust::host_vector<int> h_stencil = unittest::random_integers<int>(n); thrust::device_vector<int> d_stencil = unittest::random_integers<int>(n); thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); typename thrust::host_vector<int>::iterator h_new_end; typename thrust::device_vector<int>::iterator d_new_end; thrust::device_vector< typename thrust::device_vector<int>::iterator > d_new_end_vec(1); // test with Predicate that returns a bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<int>()); hipLaunchKernelGGL(( copy_if_kernel), dim3(1),dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), d_result.begin(), is_even<int>(), d_new_end_vec.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } // test with Predicate that returns a non-bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<int>()); hipLaunchKernelGGL(( copy_if_kernel), dim3(1),dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), d_result.begin(), mod_3<int>(), d_new_end_vec.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } } void TestCopyIfStencilDeviceSeq() { TestCopyIfStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestCopyIfStencilDeviceSeq); void TestCopyIfStencilDeviceDevice() { TestCopyIfStencilDevice(thrust::device); } DECLARE_UNITTEST(TestCopyIfStencilDeviceDevice); void TestCopyIfStencilCudaStreams() { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 3; data[4] = 2; Vector result(5); Vector stencil(5); stencil[0] = 0; stencil[1] = 1; stencil[2] = 0; stencil[3] = 0; stencil[4] = 1; hipStream_t s; hipStreamCreate(&s); Vector::iterator end = thrust::copy_if(thrust::hip::par.on(s), data.begin(), data.end(), stencil.begin(), result.begin(), thrust::identity<T>()); ASSERT_EQUAL(end - result.begin(), 2); ASSERT_EQUAL(result[0], 2); ASSERT_EQUAL(result[1], 2); hipStreamDestroy(s); } DECLARE_UNITTEST(TestCopyIfStencilCudaStreams);
79b28df7960c96c61bf2fab3912fec7c441a0d05.cu
#include <unittest/unittest.h> #include <thrust/copy.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> template<typename T> struct is_even { __host__ __device__ bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; } }; template<typename T> struct mod_3 { __host__ __device__ unsigned int operator()(T x) { return static_cast<unsigned int>(x) % 3; } }; template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void copy_if_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result1, Predicate pred, Iterator3 result2) { *result2 = thrust::copy_if(exec, first, last, result1, pred); } template<typename ExecutionPolicy> void TestCopyIfDevice(ExecutionPolicy exec) { size_t n = 1000; thrust::host_vector<int> h_data = unittest::random_integers<int>(n); thrust::device_vector<int> d_data = h_data; typename thrust::host_vector<int>::iterator h_new_end; typename thrust::device_vector<int>::iterator d_new_end; thrust::device_vector< typename thrust::device_vector<int>::iterator > d_new_end_vec(1); // test with Predicate that returns a bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<int>()); copy_if_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), d_result.begin(), is_even<int>(), d_new_end_vec.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } // test with Predicate that returns a non-bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<int>()); copy_if_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), d_result.begin(), mod_3<int>(), d_new_end_vec.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } } void TestCopyIfDeviceSeq() { TestCopyIfDevice(thrust::seq); } DECLARE_UNITTEST(TestCopyIfDeviceSeq); void TestCopyIfDeviceDevice() { TestCopyIfDevice(thrust::device); } DECLARE_UNITTEST(TestCopyIfDeviceDevice); void TestCopyIfCudaStreams() { typedef thrust::device_vector<int> Vector; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 3; data[4] = 2; Vector result(5); cudaStream_t s; cudaStreamCreate(&s); Vector::iterator end = thrust::copy_if(thrust::cuda::par.on(s), data.begin(), data.end(), result.begin(), is_even<int>()); ASSERT_EQUAL(end - result.begin(), 2); ASSERT_EQUAL(result[0], 2); ASSERT_EQUAL(result[1], 2); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestCopyIfCudaStreams); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void copy_if_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 result1, Predicate pred, Iterator4 result2) { *result2 = thrust::copy_if(exec, first, last, stencil_first, result1, pred); } template<typename ExecutionPolicy> void TestCopyIfStencilDevice(ExecutionPolicy exec) { size_t n = 1000; thrust::host_vector<int> h_data(n); thrust::sequence(h_data.begin(), h_data.end()); thrust::device_vector<int> d_data(n); thrust::sequence(d_data.begin(), d_data.end()); thrust::host_vector<int> h_stencil = unittest::random_integers<int>(n); thrust::device_vector<int> d_stencil = unittest::random_integers<int>(n); thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); typename thrust::host_vector<int>::iterator h_new_end; typename thrust::device_vector<int>::iterator d_new_end; thrust::device_vector< typename thrust::device_vector<int>::iterator > d_new_end_vec(1); // test with Predicate that returns a bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<int>()); copy_if_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), d_result.begin(), is_even<int>(), d_new_end_vec.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } // test with Predicate that returns a non-bool { thrust::host_vector<int> h_result(n); thrust::device_vector<int> d_result(n); h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<int>()); copy_if_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), d_result.begin(), mod_3<int>(), d_new_end_vec.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); d_new_end = d_new_end_vec[0]; h_result.resize(h_new_end - h_result.begin()); d_result.resize(d_new_end - d_result.begin()); ASSERT_EQUAL(h_result, d_result); } } void TestCopyIfStencilDeviceSeq() { TestCopyIfStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestCopyIfStencilDeviceSeq); void TestCopyIfStencilDeviceDevice() { TestCopyIfStencilDevice(thrust::device); } DECLARE_UNITTEST(TestCopyIfStencilDeviceDevice); void TestCopyIfStencilCudaStreams() { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 3; data[4] = 2; Vector result(5); Vector stencil(5); stencil[0] = 0; stencil[1] = 1; stencil[2] = 0; stencil[3] = 0; stencil[4] = 1; cudaStream_t s; cudaStreamCreate(&s); Vector::iterator end = thrust::copy_if(thrust::cuda::par.on(s), data.begin(), data.end(), stencil.begin(), result.begin(), thrust::identity<T>()); ASSERT_EQUAL(end - result.begin(), 2); ASSERT_EQUAL(result[0], 2); ASSERT_EQUAL(result[1], 2); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestCopyIfStencilCudaStreams);
d95afc3b0ef0b2e65ce48df528ae12c33eb07765.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * author: ck * 09.04.2011 * devised from CUDA's matrixMul.cu */ // Utilities and system includes #include <shrUtils.h> #include "cutil_inline.h" #include <iostream> // Thread block size #define BLOCK_SIZE 16 // Tensor .* operation. Multiply corresponding entries of tensors A,B of same size // Store the result in tensor C // C = A .* B : // specify dimensions common to all tensors (A,B,C) size_t crdlies[] = {2,3,2,2}; size_t nDims = 4; // includes, kernels #include <tensorMul_kernel.cu> static char *sSDKsample = "tensorMul"; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void incrementalInit(float*, int); void printDiff(float*, float*, int, int, int, float); //extern "C" //void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { printf("[ %s ]\n", sSDKsample); shrSetLogFileName ("tensorMul.txt"); shrLog("%s Starting...\n\n", argv[0]); runTest(argc, argv); shrEXIT(argc, (const char**)argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { if(shrCheckCmdLineFlag(argc, (const char**)argv, "device")) { cutilDeviceInit(argc, argv); } else { hipSetDevice(cutGetMaxGflopsDeviceId()); } int devID; hipDeviceProp_t props; // get number of SMs on this GPU cutilSafeCall(hipGetDevice(&devID)); cutilSafeCall(hipGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); // set seed for rand() srand(2011); //int tmp=1; //shrGetCmdLineArgumenti(argc, (const char**)argv, "sizemult", &tmp); std::cout << "sizes:"; for (int i=0; i<nDims; i++){ std::cout << " " << crdlies[i] ; } std::cout << "\n\n"; //shrLog("\nUsing Matrix Sizes: A(%u x %u), B(%u x %u), C(%u x %u)\n\n", // WA, HA, WB, HB, WC, HC); size_t h_strides[nDims]; size_t total_size = 1; for (size_t i=0; i< nDims; i++) { h_strides[i] = crdlies[i]>1 ? total_size : 0; total_size *= crdlies[i]; }; std::cout << "total size " << total_size << std::endl; // allocate host memory for matrices A and B unsigned int size_A = total_size; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = total_size; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory incrementalInit(h_A, size_A); incrementalInit(h_B, size_B); std::cout << "h_strides:\n"; for (int i=0; i<nDims; i++){ std::cout << h_strides[i] << std::endl; } // allocate device memory //size_t pitch_A; float* d_A; cutilSafeCall(hipMalloc((void**) &d_A, mem_size_A)); //cutilSafeCall( hipMallocPitch(&d_A, &pitch_A, WA*sizeof(float), HA) ); //size_t pitch_B; float* d_B; cutilSafeCall(hipMalloc((void**) &d_B, mem_size_B)); //cutilSafeCall( hipMallocPitch(&d_B, &pitch_B, WB*sizeof(float), HB) ); size_t* d_strides; cutilSafeCall(hipMalloc((void**) &d_strides, nDims*sizeof(size_t))); // copy host memory to device cutilSafeCall(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) ); cutilSafeCall(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) ); cutilSafeCall(hipMemcpy(d_strides, h_strides, nDims*sizeof(size_t), hipMemcpyHostToDevice) ); // allocate device memory for result unsigned int size_C = total_size; unsigned int mem_size_C = sizeof(float) * size_C; //size_t pitch_C; float* d_C; cutilSafeCall(hipMalloc((void**) &d_C, mem_size_C)); //cutilSafeCall( hipMallocPitch(&d_C, &pitch_C, WC*sizeof(float), HC) ); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // setup execution parameters //dim3 threads(BLOCK_SIZE, BLOCK_SIZE); //dim3 grid(WC / threads.x, HC / threads.y); int blocks=BLOCK_SIZE; int threads=512; // kernel warmup hipLaunchKernelGGL(( tensorMul), dim3(blocks), dim3(threads) , 0, 0, d_C, d_A, d_B, d_strides, nDims, total_size); hipDeviceSynchronize(); // create and start timer shrLog("Run Kernels...\n\n"); unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); // execute the kernel int nIter = 30; for (int j = 0; j < nIter; j++) { hipLaunchKernelGGL(( tensorMul), dim3(blocks), dim3(threads) , 0, 0, d_C, d_A, d_B, d_strides, nDims, total_size); } // check if kernel execution generated and error cutilCheckMsg("Kernel execution failed"); hipDeviceSynchronize(); // stop and destroy timer cutilCheckError(cutStopTimer(timer)); double dSeconds = cutGetTimerValue(timer)/((double)nIter * 1000.0); double dNumOps = 2.0 * total_size; double gflops = 1.0e-9 * dNumOps/dSeconds; //Log througput, etc shrLogEx(LOGBOTH | MASTER, 0, "tensorMul, Throughput = %.4f GFlop/s, Time = %.5f s, Size = %.0f Ops, NumDevsUsed = %d, Workgroup = %u\n", gflops, dSeconds, dNumOps, 1, threads); cutilCheckError(cutDeleteTimer(timer)); // copy result from device to host cutilSafeCall(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) ); std::cout << std::endl << std::endl << "input A:" << std::endl; for (int i=0; i<total_size ; i++){ std::cout << i << "\t" << h_A[i] << std::endl; } std::cout << std::endl << std::endl << "input B:" << std::endl; for (int i=0; i<total_size ; i++){ std::cout << i << "\t" << h_B[i] << std::endl; } std::cout << std::endl << std::endl << "output C:" << std::endl; for (int i=0; i<total_size ; i++){ std::cout << i << "\t" << h_C[i] << std::endl; } // compute reference solution /* shrLog("\nCheck against Host computation...\n\n"); float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB); // check result shrBOOL res = shrCompareL2fe(reference, h_C, size_C, 1.0e-6f); if (res != shrTRUE) { printDiff(reference, h_C, uiWC, uiHC, 100, 1.0e-5f); } shrLog("%s \n\n", (shrTRUE == res) ? "PASSED" : "FAILED"); */ // clean up memory free(h_A); free(h_B); free(h_C); //free(reference); cutilSafeCall(hipFree(d_A)); cutilSafeCall(hipFree(d_B)); cutilSafeCall(hipFree(d_C)); hipDeviceReset(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void incrementalInit(float*data, int size) { for (int i=1 ; i<=size; i++){ data[i] = i; } } void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol) { shrLog("Listing first %d Differences > %.6f...\n", iListLength, fListTol); int i,j,k; int error_count=0; for (j = 0; j < height; j++) { if (error_count < iListLength) { shrLog("\n Row %d:\n", j); } for (i = 0; i < width; i++) { k = j * width + i; float fDiff = fabs(data1[k] - data2[k]); if (fDiff > fListTol) { if (error_count < iListLength) { shrLog(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff); } error_count++; } } } shrLog(" \n Total Errors = %d\n\n", error_count); }
d95afc3b0ef0b2e65ce48df528ae12c33eb07765.cu
/* * author: ck * 09.04.2011 * devised from CUDA's matrixMul.cu */ // Utilities and system includes #include <shrUtils.h> #include "cutil_inline.h" #include <iostream> // Thread block size #define BLOCK_SIZE 16 // Tensor .* operation. Multiply corresponding entries of tensors A,B of same size // Store the result in tensor C // C = A .* B : // specify dimensions common to all tensors (A,B,C) size_t crdlies[] = {2,3,2,2}; size_t nDims = 4; // includes, kernels #include <tensorMul_kernel.cu> static char *sSDKsample = "tensorMul"; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void incrementalInit(float*, int); void printDiff(float*, float*, int, int, int, float); //extern "C" //void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { printf("[ %s ]\n", sSDKsample); shrSetLogFileName ("tensorMul.txt"); shrLog("%s Starting...\n\n", argv[0]); runTest(argc, argv); shrEXIT(argc, (const char**)argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { if(shrCheckCmdLineFlag(argc, (const char**)argv, "device")) { cutilDeviceInit(argc, argv); } else { cudaSetDevice(cutGetMaxGflopsDeviceId()); } int devID; cudaDeviceProp props; // get number of SMs on this GPU cutilSafeCall(cudaGetDevice(&devID)); cutilSafeCall(cudaGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); // set seed for rand() srand(2011); //int tmp=1; //shrGetCmdLineArgumenti(argc, (const char**)argv, "sizemult", &tmp); std::cout << "sizes:"; for (int i=0; i<nDims; i++){ std::cout << " " << crdlies[i] ; } std::cout << "\n\n"; //shrLog("\nUsing Matrix Sizes: A(%u x %u), B(%u x %u), C(%u x %u)\n\n", // WA, HA, WB, HB, WC, HC); size_t h_strides[nDims]; size_t total_size = 1; for (size_t i=0; i< nDims; i++) { h_strides[i] = crdlies[i]>1 ? total_size : 0; total_size *= crdlies[i]; }; std::cout << "total size " << total_size << std::endl; // allocate host memory for matrices A and B unsigned int size_A = total_size; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = total_size; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory incrementalInit(h_A, size_A); incrementalInit(h_B, size_B); std::cout << "h_strides:\n"; for (int i=0; i<nDims; i++){ std::cout << h_strides[i] << std::endl; } // allocate device memory //size_t pitch_A; float* d_A; cutilSafeCall(cudaMalloc((void**) &d_A, mem_size_A)); //cutilSafeCall( cudaMallocPitch(&d_A, &pitch_A, WA*sizeof(float), HA) ); //size_t pitch_B; float* d_B; cutilSafeCall(cudaMalloc((void**) &d_B, mem_size_B)); //cutilSafeCall( cudaMallocPitch(&d_B, &pitch_B, WB*sizeof(float), HB) ); size_t* d_strides; cutilSafeCall(cudaMalloc((void**) &d_strides, nDims*sizeof(size_t))); // copy host memory to device cutilSafeCall(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ); cutilSafeCall(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ); cutilSafeCall(cudaMemcpy(d_strides, h_strides, nDims*sizeof(size_t), cudaMemcpyHostToDevice) ); // allocate device memory for result unsigned int size_C = total_size; unsigned int mem_size_C = sizeof(float) * size_C; //size_t pitch_C; float* d_C; cutilSafeCall(cudaMalloc((void**) &d_C, mem_size_C)); //cutilSafeCall( cudaMallocPitch(&d_C, &pitch_C, WC*sizeof(float), HC) ); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // setup execution parameters //dim3 threads(BLOCK_SIZE, BLOCK_SIZE); //dim3 grid(WC / threads.x, HC / threads.y); int blocks=BLOCK_SIZE; int threads=512; // kernel warmup tensorMul<<< blocks, threads >>>(d_C, d_A, d_B, d_strides, nDims, total_size); cudaThreadSynchronize(); // create and start timer shrLog("Run Kernels...\n\n"); unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); // execute the kernel int nIter = 30; for (int j = 0; j < nIter; j++) { tensorMul<<< blocks, threads >>>(d_C, d_A, d_B, d_strides, nDims, total_size); } // check if kernel execution generated and error cutilCheckMsg("Kernel execution failed"); cudaThreadSynchronize(); // stop and destroy timer cutilCheckError(cutStopTimer(timer)); double dSeconds = cutGetTimerValue(timer)/((double)nIter * 1000.0); double dNumOps = 2.0 * total_size; double gflops = 1.0e-9 * dNumOps/dSeconds; //Log througput, etc shrLogEx(LOGBOTH | MASTER, 0, "tensorMul, Throughput = %.4f GFlop/s, Time = %.5f s, Size = %.0f Ops, NumDevsUsed = %d, Workgroup = %u\n", gflops, dSeconds, dNumOps, 1, threads); cutilCheckError(cutDeleteTimer(timer)); // copy result from device to host cutilSafeCall(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); std::cout << std::endl << std::endl << "input A:" << std::endl; for (int i=0; i<total_size ; i++){ std::cout << i << "\t" << h_A[i] << std::endl; } std::cout << std::endl << std::endl << "input B:" << std::endl; for (int i=0; i<total_size ; i++){ std::cout << i << "\t" << h_B[i] << std::endl; } std::cout << std::endl << std::endl << "output C:" << std::endl; for (int i=0; i<total_size ; i++){ std::cout << i << "\t" << h_C[i] << std::endl; } // compute reference solution /* shrLog("\nCheck against Host computation...\n\n"); float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB); // check result shrBOOL res = shrCompareL2fe(reference, h_C, size_C, 1.0e-6f); if (res != shrTRUE) { printDiff(reference, h_C, uiWC, uiHC, 100, 1.0e-5f); } shrLog("%s \n\n", (shrTRUE == res) ? "PASSED" : "FAILED"); */ // clean up memory free(h_A); free(h_B); free(h_C); //free(reference); cutilSafeCall(cudaFree(d_A)); cutilSafeCall(cudaFree(d_B)); cutilSafeCall(cudaFree(d_C)); cudaThreadExit(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void incrementalInit(float*data, int size) { for (int i=1 ; i<=size; i++){ data[i] = i; } } void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol) { shrLog("Listing first %d Differences > %.6f...\n", iListLength, fListTol); int i,j,k; int error_count=0; for (j = 0; j < height; j++) { if (error_count < iListLength) { shrLog("\n Row %d:\n", j); } for (i = 0; i < width; i++) { k = j * width + i; float fDiff = fabs(data1[k] - data2[k]); if (fDiff > fListTol) { if (error_count < iListLength) { shrLog(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff); } error_count++; } } } shrLog(" \n Total Errors = %d\n\n", error_count); }
2220be55f15074b42c4e9ac55aefe0985acd0ecc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "optimizer.h" namespace seq2seq { void Optimzer::update(Blob *param) { if (_optimizer_type == OPTIMIZER_TYPE::SGD) { Sgd(param->device_w, param->device_g, param->size()); } else if (_optimizer_type == OPTIMIZER_TYPE::SGDM) { Sgd_momentum(param->device_w, param->device_g, param->device_m, param->size()); } else if (_optimizer_type == OPTIMIZER_TYPE::NESTROV) { Nestrov(param->device_w, param->device_g, param->device_m, param->size()); } else if (_optimizer_type == OPTIMIZER_TYPE::ADAM) { Adam(param->device_w, param->device_g, param->device_m, param->device_v, param->size()); } } void Optimzer::Sgd(float *w, float *grad, int size) { // w = _lr * grad + w float lr = - _lr; cublasErrCheck(hipblasSaxpy(GlobalAssets::instance()->cublasHandle(), size, &lr, grad, 1, w, 1)); } void Optimzer::Sgd_momentum(float *w, float *g, float *m, int size) { // moment = beta * moment + grad const float beta = 0.9; cublasErrCheck(hipblasSaxpy(GlobalAssets::instance()->cublasHandle(), size, &beta, m, 1, g, 1)); // w = _lr * moment + w cublasErrCheck(hipblasSaxpy(GlobalAssets::instance()->cublasHandle(), size, &_lr, m, 1, w, 1)); } __global__ void nestrov_update_kernel(float *w, float *g, float *m, int N, const float beta, const float lr) { CUDA_KERNEL_LOOP(i, N) { const float mi = m[i]; float mi_new = lr * g[i] + beta * m[i]; float ng = (1 + beta) * mi_new + beta * mi; w[i] += ng; } } void nestrov_update(float *w, float *g, float *m, int N, const float beta, const float lr) { const dim3 blockSize(CUDA_NUM_THREADS, 1, 1); const dim3 gridSize(GET_BLOCKS(N), 1, 1); hipLaunchKernelGGL(( nestrov_update_kernel), dim3(gridSize), dim3(blockSize), 0, 0, w, g, m, N, beta, lr); } void Optimzer::Nestrov(float *w, float *g, float *m, int size) { const float beta = 0.9; const float lr = - _lr; nestrov_update(w, g, m, size, beta, lr); } __global__ void adam_update_kernel(float *w, float *g, float *m, float *v, int N, float beta1, float beta2, float correction, float eps, const float lr) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i] * beta1 + gi * (1 - beta1); float vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); float ng = lr * correction * mi / (sqrt(vi) + eps); w[i] += ng; } } void adam_update(float *w, float *g, float *m, float *v, int N, float beta1, float beta2, float correction, float eps, const float lr) { const dim3 blockSize(CUDA_NUM_THREADS, 1, 1); const dim3 gridSize(GET_BLOCKS(N), 1, 1); hipLaunchKernelGGL(( adam_update_kernel), dim3(gridSize), dim3(blockSize), 0, 0, w, g, m, v, N, beta1, beta2, correction, eps, lr); } void Optimzer::Adam(float *w, float *g, float *m, float *v, int size) { const float eps = 1e-8, beta1 = 0.9, beta2 = 0.999; const float correction = sqrt(1. - pow(beta2, _t)) / (1. - pow(beta1, _t)); adam_update(w, g, m, v, size, beta1, beta2, correction, eps, _lr); } } // namespace seq2seq
2220be55f15074b42c4e9ac55aefe0985acd0ecc.cu
#include "optimizer.h" namespace seq2seq { void Optimzer::update(Blob *param) { if (_optimizer_type == OPTIMIZER_TYPE::SGD) { Sgd(param->device_w, param->device_g, param->size()); } else if (_optimizer_type == OPTIMIZER_TYPE::SGDM) { Sgd_momentum(param->device_w, param->device_g, param->device_m, param->size()); } else if (_optimizer_type == OPTIMIZER_TYPE::NESTROV) { Nestrov(param->device_w, param->device_g, param->device_m, param->size()); } else if (_optimizer_type == OPTIMIZER_TYPE::ADAM) { Adam(param->device_w, param->device_g, param->device_m, param->device_v, param->size()); } } void Optimzer::Sgd(float *w, float *grad, int size) { // w = _lr * grad + w float lr = - _lr; cublasErrCheck(cublasSaxpy(GlobalAssets::instance()->cublasHandle(), size, &lr, grad, 1, w, 1)); } void Optimzer::Sgd_momentum(float *w, float *g, float *m, int size) { // moment = beta * moment + grad const float beta = 0.9; cublasErrCheck(cublasSaxpy(GlobalAssets::instance()->cublasHandle(), size, &beta, m, 1, g, 1)); // w = _lr * moment + w cublasErrCheck(cublasSaxpy(GlobalAssets::instance()->cublasHandle(), size, &_lr, m, 1, w, 1)); } __global__ void nestrov_update_kernel(float *w, float *g, float *m, int N, const float beta, const float lr) { CUDA_KERNEL_LOOP(i, N) { const float mi = m[i]; float mi_new = lr * g[i] + beta * m[i]; float ng = (1 + beta) * mi_new + beta * mi; w[i] += ng; } } void nestrov_update(float *w, float *g, float *m, int N, const float beta, const float lr) { const dim3 blockSize(CUDA_NUM_THREADS, 1, 1); const dim3 gridSize(GET_BLOCKS(N), 1, 1); nestrov_update_kernel<<<gridSize, blockSize>>>(w, g, m, N, beta, lr); } void Optimzer::Nestrov(float *w, float *g, float *m, int size) { const float beta = 0.9; const float lr = - _lr; nestrov_update(w, g, m, size, beta, lr); } __global__ void adam_update_kernel(float *w, float *g, float *m, float *v, int N, float beta1, float beta2, float correction, float eps, const float lr) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i] * beta1 + gi * (1 - beta1); float vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); float ng = lr * correction * mi / (sqrt(vi) + eps); w[i] += ng; } } void adam_update(float *w, float *g, float *m, float *v, int N, float beta1, float beta2, float correction, float eps, const float lr) { const dim3 blockSize(CUDA_NUM_THREADS, 1, 1); const dim3 gridSize(GET_BLOCKS(N), 1, 1); adam_update_kernel<<<gridSize, blockSize>>>(w, g, m, v, N, beta1, beta2, correction, eps, lr); } void Optimzer::Adam(float *w, float *g, float *m, float *v, int size) { const float eps = 1e-8, beta1 = 0.9, beta2 = 0.999; const float correction = sqrt(1. - pow(beta2, _t)) / (1. - pow(beta1, _t)); adam_update(w, g, m, v, size, beta1, beta2, correction, eps, _lr); } } // namespace seq2seq
5cdc704865d357530d6b4f45582492d64ce6559e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N (1024*2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *a, int *b, int *c, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index]+b[index]; } int main(void) { int NrBlocks = N/THREADS_PER_BLOCK; printf("Launching kernel with:\n"); printf("\t NrBlocks: %d\n", NrBlocks); printf("\t THREADS_PER_BLOCK: %d\n", THREADS_PER_BLOCK); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Alloc space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values printf("Preparing vectors of size %d\n", N); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for (int i=0; i<N; i++) { a[i] = 1; b[i] = 4; c[i] = 0; } // Copy inputs to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU with several // blocks and THREADS_PER_BLOCK many threads // per block hipLaunchKernelGGL(( add), dim3(NrBlocks),dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c, N); // Copy result back to host hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); printf("\nFirst 10 elements of resulting vector are:\n"); for (int i=0; i<10; i++) { printf("%d ", c[i]); } printf("\nLast 10 elements of resulting vector are:\n"); for (int i=N-10; i<N; i++) { printf("%d ", c[i]); } printf("\n"); // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
5cdc704865d357530d6b4f45582492d64ce6559e.cu
#include <stdio.h> #define N (1024*2048) #define THREADS_PER_BLOCK 512 __global__ void add(int *a, int *b, int *c, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index]+b[index]; } int main(void) { int NrBlocks = N/THREADS_PER_BLOCK; printf("Launching kernel with:\n"); printf("\t NrBlocks: %d\n", NrBlocks); printf("\t THREADS_PER_BLOCK: %d\n", THREADS_PER_BLOCK); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Alloc space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values printf("Preparing vectors of size %d\n", N); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for (int i=0; i<N; i++) { a[i] = 1; b[i] = 4; c[i] = 0; } // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU with several // blocks and THREADS_PER_BLOCK many threads // per block add<<<NrBlocks,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); printf("\nFirst 10 elements of resulting vector are:\n"); for (int i=0; i<10; i++) { printf("%d ", c[i]); } printf("\nLast 10 elements of resulting vector are:\n"); for (int i=N-10; i<N; i++) { printf("%d ", c[i]); } printf("\n"); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6a6a66c1486dc8eb10ed60578381242aba4d3b76.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <hip/hip_runtime_api.h> namespace nvinfer1 { namespace plugin { __global__ void generateVoxels_kernel( int max_num_points, float *points, unsigned int* points_size, float min_x_range, float max_x_range, float min_y_range, float max_y_range, float min_z_range, float max_z_range, float pillar_x_size, float pillar_y_size, float pillar_z_size, int grid_y_size, int grid_x_size, int num_point_values, int max_points_per_voxel, unsigned int *mask, float *voxels) { int point_idx = blockIdx.x * blockDim.x + threadIdx.x; int batch_idx = point_idx / max_num_points; int point_idx_in_frame = point_idx % max_num_points; if(point_idx_in_frame >= points_size[batch_idx]) return; float px = points[num_point_values * point_idx]; float py = points[num_point_values * point_idx + 1]; float pz = points[num_point_values * point_idx + 2]; float pw = points[num_point_values * point_idx + 3]; float pt; if (num_point_values == 5) { pt = points[num_point_values * point_idx + 4]; } if(px<min_x_range||px>=max_x_range || py<min_y_range||py>=max_y_range || pz<min_z_range||pz>=max_z_range) return; int voxel_idx = floorf((px - min_x_range)/pillar_x_size); int voxel_idy = floorf((py - min_y_range)/pillar_y_size); unsigned int voxel_index = (batch_idx * grid_y_size + voxel_idy) * grid_x_size + voxel_idx; unsigned int point_id = atomicAdd(&(mask[voxel_index]), 1); if(point_id >= max_points_per_voxel) return; float *address = voxels + (voxel_index*max_points_per_voxel + point_id)*num_point_values; atomicExch(address+0, px); atomicExch(address+1, py); atomicExch(address+2, pz); atomicExch(address+3, pw); if (num_point_values == 5) { atomicExch(address+4, pt); } } __global__ void generateBaseFeatures_kernel( int batch_size, unsigned int *mask, float *voxels, int grid_y_size, int grid_x_size, unsigned int *pillar_num, int max_pillar_num, int max_points_per_voxel, int num_point_values, float *voxel_features, unsigned int *voxel_num_points, unsigned int *coords) { int voxel_id = blockIdx.x * blockDim.x + threadIdx.x; int voxel_idx = voxel_id % grid_x_size; int voxel_idy = (voxel_id / grid_x_size) % grid_y_size; int batch_id = voxel_id / (grid_y_size * grid_x_size); if (batch_id >= batch_size) return; unsigned int count = mask[voxel_id]; if( !(count>0) ) return; count = count<max_points_per_voxel?count:max_points_per_voxel; int current_pillarId = 0; current_pillarId = atomicAdd(pillar_num + batch_id, 1); voxel_num_points[batch_id * grid_y_size * grid_x_size + current_pillarId] = count; int4 coord = {0, 0, voxel_idy, voxel_idx}; ((int4*)coords)[batch_id * max_pillar_num + current_pillarId] = coord; for (int i=0; i<count; i++){ int inIndex = voxel_id*max_points_per_voxel + i; int outIndex = (batch_id * grid_x_size * grid_y_size + current_pillarId)*max_points_per_voxel + i; if (num_point_values == 4) { ((float4*)voxel_features)[outIndex] = ((float4*)voxels)[inIndex]; } else if (num_point_values == 5) { for(int k=0; k<5;k++) voxel_features[5 * outIndex + k] = voxels[5 * inIndex + k]; } } } void generateVoxels_launch( int batch_size, int max_num_points, float *points, unsigned int* points_size, float min_x_range, float max_x_range, float min_y_range, float max_y_range, float min_z_range, float max_z_range, float pillar_x_size, float pillar_y_size, float pillar_z_size, int grid_y_size, int grid_x_size, int num_point_values, int max_points_per_voxel, unsigned int *mask, float *voxels, hipStream_t stream) { int threadNum = 256; dim3 blocks((batch_size * max_num_points + threadNum - 1) / threadNum); dim3 threads(threadNum); hipLaunchKernelGGL(( generateVoxels_kernel), dim3(blocks), dim3(threads), 0, stream, max_num_points, points, points_size, min_x_range, max_x_range, min_y_range, max_y_range, min_z_range, max_z_range, pillar_x_size, pillar_y_size, pillar_z_size, grid_y_size, grid_x_size, num_point_values, max_points_per_voxel, mask, voxels); } void generateBaseFeatures_launch( int batch_size, unsigned int *mask, float *voxels, int grid_y_size, int grid_x_size, unsigned int *pillar_num, int max_pillar_num, int max_points_per_voxel, int num_point_values, float *voxel_features, unsigned int *voxel_num_points, unsigned int *coords, hipStream_t stream) { int blockSize = 1024; dim3 threads(blockSize); dim3 blocks((batch_size * grid_y_size * grid_x_size + blockSize - 1) / blockSize); hipLaunchKernelGGL(( generateBaseFeatures_kernel), dim3(blocks), dim3(threads), 0, stream, batch_size, mask, voxels, grid_y_size, grid_x_size, pillar_num, max_pillar_num, max_points_per_voxel, num_point_values, voxel_features, voxel_num_points, coords ); } __global__ void generateFeatures_kernel( int batch_size, int dense_pillar_num, float* voxel_features, unsigned int* voxel_num_points, unsigned int* coords, unsigned int *params, float voxel_x, float voxel_y, float voxel_z, float range_min_x, float range_min_y, float range_min_z, unsigned int voxel_features_size, unsigned int max_points, unsigned int max_voxels, float* features) { int warp_size = max_points; int pillar_idx = blockIdx.x * 4 + threadIdx.x/warp_size; int point_idx = threadIdx.x % warp_size; // In case the actual number of points is less than warp_size // E.g., warp_size=32, max_points=20 if (point_idx >= max_points) return; int batch_idx = pillar_idx / max_voxels; if (batch_idx >= batch_size) return; int pillar_idx_in_frame = pillar_idx % max_voxels; int dense_pillar_idx = pillar_idx_in_frame + dense_pillar_num * batch_idx; int pillar_idx_inBlock = threadIdx.x/warp_size; // Limit number of voxels to max_voxels unsigned int num_pillars = params[batch_idx] > max_voxels ? max_voxels : params[batch_idx]; // Update max_voxel to actual number if (pillar_idx_in_frame == 0 && point_idx == 0) { params[batch_idx] = num_pillars; } if (pillar_idx_in_frame >= num_pillars) return; //load src __shared__ float pillarSM[4][64][5]; // up to 64 points per pillar __shared__ float4 pillarSumSM[4]; //4*4 __shared__ int4 cordsSM[4]; //4*4 __shared__ int pointsNumSM[4]; //4 __shared__ float pillarOutSM[4][64][11]; // up to 11 features per point if (point_idx == 0) { pointsNumSM[pillar_idx_inBlock] = voxel_num_points[dense_pillar_idx]; cordsSM[pillar_idx_inBlock] = ((int4*)coords)[dense_pillar_idx]; pillarSumSM[pillar_idx_inBlock] = {0,0,0,0}; } for(int k=0; k<5; k++) { pillarSM[pillar_idx_inBlock][point_idx][k] = voxel_features[5 * (dense_pillar_idx*max_points + point_idx) + k]; } __syncthreads(); //calculate sm if (point_idx < pointsNumSM[pillar_idx_inBlock]) { atomicAdd(&(pillarSumSM[pillar_idx_inBlock].x), pillarSM[pillar_idx_inBlock][point_idx][0]); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].y), pillarSM[pillar_idx_inBlock][point_idx][1]); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].z), pillarSM[pillar_idx_inBlock][point_idx][2]); } __syncthreads(); //feature-mean float4 mean; float validPoints = pointsNumSM[pillar_idx_inBlock]; mean.x = pillarSumSM[pillar_idx_inBlock].x / validPoints; mean.y = pillarSumSM[pillar_idx_inBlock].y / validPoints; mean.z = pillarSumSM[pillar_idx_inBlock].z / validPoints; mean.x = pillarSM[pillar_idx_inBlock][point_idx][0] - mean.x; mean.y = pillarSM[pillar_idx_inBlock][point_idx][1] - mean.y; mean.z = pillarSM[pillar_idx_inBlock][point_idx][2] - mean.z; //calculate offset float x_offset = voxel_x / 2.0f + cordsSM[pillar_idx_inBlock].w * voxel_x + range_min_x; float y_offset = voxel_y / 2.0f + cordsSM[pillar_idx_inBlock].z * voxel_y + range_min_y; float z_offset = voxel_z / 2.0f + cordsSM[pillar_idx_inBlock].y * voxel_z + range_min_z; //feature-offset float4 center; center.x = pillarSM[pillar_idx_inBlock][point_idx][0] - x_offset; center.y = pillarSM[pillar_idx_inBlock][point_idx][1] - y_offset; center.z = pillarSM[pillar_idx_inBlock][point_idx][2] - z_offset; //store output if (point_idx < pointsNumSM[pillar_idx_inBlock]) { for(int k=0; k<5; k++) pillarOutSM[pillar_idx_inBlock][point_idx][k] = pillarSM[pillar_idx_inBlock][point_idx][k]; pillarOutSM[pillar_idx_inBlock][point_idx][5] = mean.x; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 1] = mean.y; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 2] = mean.z; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 3] = center.x; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 4] = center.y; if (5 + 5 < voxel_features_size) pillarOutSM[pillar_idx_inBlock][point_idx][warp_size + 5] = center.z; } else { for (int k = 0; k < voxel_features_size; k++) pillarOutSM[pillar_idx_inBlock][point_idx][k] = 0; } __syncthreads(); for(int i = 0; i < voxel_features_size; i ++) { int outputSMId = pillar_idx_inBlock*64*11 + point_idx * 11 + i; int outputId = pillar_idx*max_points*voxel_features_size + point_idx * voxel_features_size + i; features[outputId] = ((float*)pillarOutSM)[outputSMId] ; } } __global__ void generateFeatures_kernel_4x( int batch_size, int dense_pillar_num, float* voxel_features, unsigned int* voxel_num_points, unsigned int* coords, unsigned int *params, float voxel_x, float voxel_y, float voxel_z, float range_min_x, float range_min_y, float range_min_z, unsigned int voxel_features_size, unsigned int max_points, unsigned int max_voxels, float* features) { int warp_size = max_points; int pillar_idx = blockIdx.x * 4 + threadIdx.x / warp_size; int point_idx = threadIdx.x % warp_size; // In case the actual number of points is less than warp_size // E.g., warp_size=32, max_points=20 if (point_idx >= max_points) return; int batch_idx = pillar_idx / max_voxels; if (batch_idx >= batch_size) return; int pillar_idx_in_frame = pillar_idx % max_voxels; int dense_pillar_idx = pillar_idx_in_frame + dense_pillar_num * batch_idx; int pillar_idx_inBlock = threadIdx.x / warp_size; // Limit number of voxels to max_voxels unsigned int num_pillars = params[batch_idx] > max_voxels ? max_voxels : params[batch_idx]; // Update max_voxel to actual number if (pillar_idx_in_frame == 0 && point_idx == 0) { params[batch_idx] = num_pillars; } if (pillar_idx_in_frame >= num_pillars) return; //load src __shared__ float4 pillarSM[4][64]; // up to 64 points per pillar __shared__ float4 pillarSumSM[4]; //4*4 __shared__ int4 cordsSM[4]; //4*4 __shared__ int pointsNumSM[4]; //4 __shared__ float pillarOutSM[4][64][11]; // up to 11 output features per point if (point_idx == 0) { pointsNumSM[pillar_idx_inBlock] = voxel_num_points[dense_pillar_idx]; cordsSM[pillar_idx_inBlock] = ((int4*)coords)[pillar_idx]; pillarSumSM[pillar_idx_inBlock] = {0,0,0,0}; } pillarSM[pillar_idx_inBlock][point_idx] = ((float4*)voxel_features)[dense_pillar_idx*max_points + point_idx]; __syncthreads(); //calculate sm if (point_idx < pointsNumSM[pillar_idx_inBlock]) { atomicAdd(&(pillarSumSM[pillar_idx_inBlock].x), pillarSM[pillar_idx_inBlock][point_idx].x); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].y), pillarSM[pillar_idx_inBlock][point_idx].y); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].z), pillarSM[pillar_idx_inBlock][point_idx].z); } __syncthreads(); //feature-mean float4 mean; float validPoints = pointsNumSM[pillar_idx_inBlock]; mean.x = pillarSumSM[pillar_idx_inBlock].x / validPoints; mean.y = pillarSumSM[pillar_idx_inBlock].y / validPoints; mean.z = pillarSumSM[pillar_idx_inBlock].z / validPoints; mean.x = pillarSM[pillar_idx_inBlock][point_idx].x - mean.x; mean.y = pillarSM[pillar_idx_inBlock][point_idx].y - mean.y; mean.z = pillarSM[pillar_idx_inBlock][point_idx].z - mean.z; //calculate offset float x_offset = voxel_x / 2.0f + cordsSM[pillar_idx_inBlock].w * voxel_x + range_min_x; float y_offset = voxel_y / 2.0f + cordsSM[pillar_idx_inBlock].z * voxel_y + range_min_y; float z_offset = voxel_z / 2.0f + cordsSM[pillar_idx_inBlock].y * voxel_z + range_min_z; //feature-offset float4 center; center.x = pillarSM[pillar_idx_inBlock][point_idx].x - x_offset; center.y = pillarSM[pillar_idx_inBlock][point_idx].y - y_offset; center.z = pillarSM[pillar_idx_inBlock][point_idx].z - z_offset; //store output if (point_idx < pointsNumSM[pillar_idx_inBlock]) { pillarOutSM[pillar_idx_inBlock][point_idx][0] = pillarSM[pillar_idx_inBlock][point_idx].x; pillarOutSM[pillar_idx_inBlock][point_idx][1] = pillarSM[pillar_idx_inBlock][point_idx].y; pillarOutSM[pillar_idx_inBlock][point_idx][2] = pillarSM[pillar_idx_inBlock][point_idx].z; pillarOutSM[pillar_idx_inBlock][point_idx][3] = pillarSM[pillar_idx_inBlock][point_idx].w; pillarOutSM[pillar_idx_inBlock][point_idx][4] = mean.x; pillarOutSM[pillar_idx_inBlock][point_idx][5] = mean.y; pillarOutSM[pillar_idx_inBlock][point_idx][6] = mean.z; pillarOutSM[pillar_idx_inBlock][point_idx][7] = center.x; pillarOutSM[pillar_idx_inBlock][point_idx][8] = center.y; pillarOutSM[pillar_idx_inBlock][point_idx][9] = center.z; } else { pillarOutSM[pillar_idx_inBlock][point_idx][0] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][1] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][2] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][3] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][4] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][5] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][6] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][7] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][8] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][9] = 0; } __syncthreads(); for(int i = 0; i < voxel_features_size; i ++) { int outputSMId = pillar_idx_inBlock*64*11 + point_idx * 11 + i; int outputId = pillar_idx*max_points*voxel_features_size + point_idx * voxel_features_size + i; features[outputId] = ((float*)pillarOutSM)[outputSMId] ; } } int generateFeatures_launch( int batch_size, int dense_pillar_num, float* voxel_features, unsigned int* voxel_num_points, unsigned int* coords, unsigned int *params, float voxel_x, float voxel_y, float voxel_z, float range_min_x, float range_min_y, float range_min_z, unsigned int voxel_features_size, unsigned int max_points, unsigned int max_voxels, unsigned int num_point_values, float* features, hipStream_t stream) { unsigned int warp_size = max_points; dim3 blocks((batch_size * max_voxels + 3) / 4); dim3 threads(4*warp_size); if (num_point_values == 4) { hipLaunchKernelGGL(( generateFeatures_kernel_4x), dim3(blocks), dim3(threads), 0, stream, batch_size, dense_pillar_num, voxel_features, voxel_num_points, coords, params, voxel_x, voxel_y, voxel_z, range_min_x, range_min_y, range_min_z, voxel_features_size, max_points, max_voxels, features); } else { hipLaunchKernelGGL(( generateFeatures_kernel), dim3(blocks), dim3(threads), 0, stream, batch_size, dense_pillar_num, voxel_features, voxel_num_points, coords, params, voxel_x, voxel_y, voxel_z, range_min_x, range_min_y, range_min_z, voxel_features_size, max_points, max_voxels, features); } auto err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } return err; } } // namespace plugin } // namespace nvinfer1
6a6a66c1486dc8eb10ed60578381242aba4d3b76.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <cuda_runtime_api.h> namespace nvinfer1 { namespace plugin { __global__ void generateVoxels_kernel( int max_num_points, float *points, unsigned int* points_size, float min_x_range, float max_x_range, float min_y_range, float max_y_range, float min_z_range, float max_z_range, float pillar_x_size, float pillar_y_size, float pillar_z_size, int grid_y_size, int grid_x_size, int num_point_values, int max_points_per_voxel, unsigned int *mask, float *voxels) { int point_idx = blockIdx.x * blockDim.x + threadIdx.x; int batch_idx = point_idx / max_num_points; int point_idx_in_frame = point_idx % max_num_points; if(point_idx_in_frame >= points_size[batch_idx]) return; float px = points[num_point_values * point_idx]; float py = points[num_point_values * point_idx + 1]; float pz = points[num_point_values * point_idx + 2]; float pw = points[num_point_values * point_idx + 3]; float pt; if (num_point_values == 5) { pt = points[num_point_values * point_idx + 4]; } if(px<min_x_range||px>=max_x_range || py<min_y_range||py>=max_y_range || pz<min_z_range||pz>=max_z_range) return; int voxel_idx = floorf((px - min_x_range)/pillar_x_size); int voxel_idy = floorf((py - min_y_range)/pillar_y_size); unsigned int voxel_index = (batch_idx * grid_y_size + voxel_idy) * grid_x_size + voxel_idx; unsigned int point_id = atomicAdd(&(mask[voxel_index]), 1); if(point_id >= max_points_per_voxel) return; float *address = voxels + (voxel_index*max_points_per_voxel + point_id)*num_point_values; atomicExch(address+0, px); atomicExch(address+1, py); atomicExch(address+2, pz); atomicExch(address+3, pw); if (num_point_values == 5) { atomicExch(address+4, pt); } } __global__ void generateBaseFeatures_kernel( int batch_size, unsigned int *mask, float *voxels, int grid_y_size, int grid_x_size, unsigned int *pillar_num, int max_pillar_num, int max_points_per_voxel, int num_point_values, float *voxel_features, unsigned int *voxel_num_points, unsigned int *coords) { int voxel_id = blockIdx.x * blockDim.x + threadIdx.x; int voxel_idx = voxel_id % grid_x_size; int voxel_idy = (voxel_id / grid_x_size) % grid_y_size; int batch_id = voxel_id / (grid_y_size * grid_x_size); if (batch_id >= batch_size) return; unsigned int count = mask[voxel_id]; if( !(count>0) ) return; count = count<max_points_per_voxel?count:max_points_per_voxel; int current_pillarId = 0; current_pillarId = atomicAdd(pillar_num + batch_id, 1); voxel_num_points[batch_id * grid_y_size * grid_x_size + current_pillarId] = count; int4 coord = {0, 0, voxel_idy, voxel_idx}; ((int4*)coords)[batch_id * max_pillar_num + current_pillarId] = coord; for (int i=0; i<count; i++){ int inIndex = voxel_id*max_points_per_voxel + i; int outIndex = (batch_id * grid_x_size * grid_y_size + current_pillarId)*max_points_per_voxel + i; if (num_point_values == 4) { ((float4*)voxel_features)[outIndex] = ((float4*)voxels)[inIndex]; } else if (num_point_values == 5) { for(int k=0; k<5;k++) voxel_features[5 * outIndex + k] = voxels[5 * inIndex + k]; } } } void generateVoxels_launch( int batch_size, int max_num_points, float *points, unsigned int* points_size, float min_x_range, float max_x_range, float min_y_range, float max_y_range, float min_z_range, float max_z_range, float pillar_x_size, float pillar_y_size, float pillar_z_size, int grid_y_size, int grid_x_size, int num_point_values, int max_points_per_voxel, unsigned int *mask, float *voxels, cudaStream_t stream) { int threadNum = 256; dim3 blocks((batch_size * max_num_points + threadNum - 1) / threadNum); dim3 threads(threadNum); generateVoxels_kernel<<<blocks, threads, 0, stream>>> (max_num_points, points, points_size, min_x_range, max_x_range, min_y_range, max_y_range, min_z_range, max_z_range, pillar_x_size, pillar_y_size, pillar_z_size, grid_y_size, grid_x_size, num_point_values, max_points_per_voxel, mask, voxels); } void generateBaseFeatures_launch( int batch_size, unsigned int *mask, float *voxels, int grid_y_size, int grid_x_size, unsigned int *pillar_num, int max_pillar_num, int max_points_per_voxel, int num_point_values, float *voxel_features, unsigned int *voxel_num_points, unsigned int *coords, cudaStream_t stream) { int blockSize = 1024; dim3 threads(blockSize); dim3 blocks((batch_size * grid_y_size * grid_x_size + blockSize - 1) / blockSize); generateBaseFeatures_kernel<<<blocks, threads, 0, stream>>> ( batch_size, mask, voxels, grid_y_size, grid_x_size, pillar_num, max_pillar_num, max_points_per_voxel, num_point_values, voxel_features, voxel_num_points, coords ); } __global__ void generateFeatures_kernel( int batch_size, int dense_pillar_num, float* voxel_features, unsigned int* voxel_num_points, unsigned int* coords, unsigned int *params, float voxel_x, float voxel_y, float voxel_z, float range_min_x, float range_min_y, float range_min_z, unsigned int voxel_features_size, unsigned int max_points, unsigned int max_voxels, float* features) { int warp_size = max_points; int pillar_idx = blockIdx.x * 4 + threadIdx.x/warp_size; int point_idx = threadIdx.x % warp_size; // In case the actual number of points is less than warp_size // E.g., warp_size=32, max_points=20 if (point_idx >= max_points) return; int batch_idx = pillar_idx / max_voxels; if (batch_idx >= batch_size) return; int pillar_idx_in_frame = pillar_idx % max_voxels; int dense_pillar_idx = pillar_idx_in_frame + dense_pillar_num * batch_idx; int pillar_idx_inBlock = threadIdx.x/warp_size; // Limit number of voxels to max_voxels unsigned int num_pillars = params[batch_idx] > max_voxels ? max_voxels : params[batch_idx]; // Update max_voxel to actual number if (pillar_idx_in_frame == 0 && point_idx == 0) { params[batch_idx] = num_pillars; } if (pillar_idx_in_frame >= num_pillars) return; //load src __shared__ float pillarSM[4][64][5]; // up to 64 points per pillar __shared__ float4 pillarSumSM[4]; //4*4 __shared__ int4 cordsSM[4]; //4*4 __shared__ int pointsNumSM[4]; //4 __shared__ float pillarOutSM[4][64][11]; // up to 11 features per point if (point_idx == 0) { pointsNumSM[pillar_idx_inBlock] = voxel_num_points[dense_pillar_idx]; cordsSM[pillar_idx_inBlock] = ((int4*)coords)[dense_pillar_idx]; pillarSumSM[pillar_idx_inBlock] = {0,0,0,0}; } for(int k=0; k<5; k++) { pillarSM[pillar_idx_inBlock][point_idx][k] = voxel_features[5 * (dense_pillar_idx*max_points + point_idx) + k]; } __syncthreads(); //calculate sm if (point_idx < pointsNumSM[pillar_idx_inBlock]) { atomicAdd(&(pillarSumSM[pillar_idx_inBlock].x), pillarSM[pillar_idx_inBlock][point_idx][0]); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].y), pillarSM[pillar_idx_inBlock][point_idx][1]); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].z), pillarSM[pillar_idx_inBlock][point_idx][2]); } __syncthreads(); //feature-mean float4 mean; float validPoints = pointsNumSM[pillar_idx_inBlock]; mean.x = pillarSumSM[pillar_idx_inBlock].x / validPoints; mean.y = pillarSumSM[pillar_idx_inBlock].y / validPoints; mean.z = pillarSumSM[pillar_idx_inBlock].z / validPoints; mean.x = pillarSM[pillar_idx_inBlock][point_idx][0] - mean.x; mean.y = pillarSM[pillar_idx_inBlock][point_idx][1] - mean.y; mean.z = pillarSM[pillar_idx_inBlock][point_idx][2] - mean.z; //calculate offset float x_offset = voxel_x / 2.0f + cordsSM[pillar_idx_inBlock].w * voxel_x + range_min_x; float y_offset = voxel_y / 2.0f + cordsSM[pillar_idx_inBlock].z * voxel_y + range_min_y; float z_offset = voxel_z / 2.0f + cordsSM[pillar_idx_inBlock].y * voxel_z + range_min_z; //feature-offset float4 center; center.x = pillarSM[pillar_idx_inBlock][point_idx][0] - x_offset; center.y = pillarSM[pillar_idx_inBlock][point_idx][1] - y_offset; center.z = pillarSM[pillar_idx_inBlock][point_idx][2] - z_offset; //store output if (point_idx < pointsNumSM[pillar_idx_inBlock]) { for(int k=0; k<5; k++) pillarOutSM[pillar_idx_inBlock][point_idx][k] = pillarSM[pillar_idx_inBlock][point_idx][k]; pillarOutSM[pillar_idx_inBlock][point_idx][5] = mean.x; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 1] = mean.y; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 2] = mean.z; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 3] = center.x; pillarOutSM[pillar_idx_inBlock][point_idx][5 + 4] = center.y; if (5 + 5 < voxel_features_size) pillarOutSM[pillar_idx_inBlock][point_idx][warp_size + 5] = center.z; } else { for (int k = 0; k < voxel_features_size; k++) pillarOutSM[pillar_idx_inBlock][point_idx][k] = 0; } __syncthreads(); for(int i = 0; i < voxel_features_size; i ++) { int outputSMId = pillar_idx_inBlock*64*11 + point_idx * 11 + i; int outputId = pillar_idx*max_points*voxel_features_size + point_idx * voxel_features_size + i; features[outputId] = ((float*)pillarOutSM)[outputSMId] ; } } __global__ void generateFeatures_kernel_4x( int batch_size, int dense_pillar_num, float* voxel_features, unsigned int* voxel_num_points, unsigned int* coords, unsigned int *params, float voxel_x, float voxel_y, float voxel_z, float range_min_x, float range_min_y, float range_min_z, unsigned int voxel_features_size, unsigned int max_points, unsigned int max_voxels, float* features) { int warp_size = max_points; int pillar_idx = blockIdx.x * 4 + threadIdx.x / warp_size; int point_idx = threadIdx.x % warp_size; // In case the actual number of points is less than warp_size // E.g., warp_size=32, max_points=20 if (point_idx >= max_points) return; int batch_idx = pillar_idx / max_voxels; if (batch_idx >= batch_size) return; int pillar_idx_in_frame = pillar_idx % max_voxels; int dense_pillar_idx = pillar_idx_in_frame + dense_pillar_num * batch_idx; int pillar_idx_inBlock = threadIdx.x / warp_size; // Limit number of voxels to max_voxels unsigned int num_pillars = params[batch_idx] > max_voxels ? max_voxels : params[batch_idx]; // Update max_voxel to actual number if (pillar_idx_in_frame == 0 && point_idx == 0) { params[batch_idx] = num_pillars; } if (pillar_idx_in_frame >= num_pillars) return; //load src __shared__ float4 pillarSM[4][64]; // up to 64 points per pillar __shared__ float4 pillarSumSM[4]; //4*4 __shared__ int4 cordsSM[4]; //4*4 __shared__ int pointsNumSM[4]; //4 __shared__ float pillarOutSM[4][64][11]; // up to 11 output features per point if (point_idx == 0) { pointsNumSM[pillar_idx_inBlock] = voxel_num_points[dense_pillar_idx]; cordsSM[pillar_idx_inBlock] = ((int4*)coords)[pillar_idx]; pillarSumSM[pillar_idx_inBlock] = {0,0,0,0}; } pillarSM[pillar_idx_inBlock][point_idx] = ((float4*)voxel_features)[dense_pillar_idx*max_points + point_idx]; __syncthreads(); //calculate sm if (point_idx < pointsNumSM[pillar_idx_inBlock]) { atomicAdd(&(pillarSumSM[pillar_idx_inBlock].x), pillarSM[pillar_idx_inBlock][point_idx].x); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].y), pillarSM[pillar_idx_inBlock][point_idx].y); atomicAdd(&(pillarSumSM[pillar_idx_inBlock].z), pillarSM[pillar_idx_inBlock][point_idx].z); } __syncthreads(); //feature-mean float4 mean; float validPoints = pointsNumSM[pillar_idx_inBlock]; mean.x = pillarSumSM[pillar_idx_inBlock].x / validPoints; mean.y = pillarSumSM[pillar_idx_inBlock].y / validPoints; mean.z = pillarSumSM[pillar_idx_inBlock].z / validPoints; mean.x = pillarSM[pillar_idx_inBlock][point_idx].x - mean.x; mean.y = pillarSM[pillar_idx_inBlock][point_idx].y - mean.y; mean.z = pillarSM[pillar_idx_inBlock][point_idx].z - mean.z; //calculate offset float x_offset = voxel_x / 2.0f + cordsSM[pillar_idx_inBlock].w * voxel_x + range_min_x; float y_offset = voxel_y / 2.0f + cordsSM[pillar_idx_inBlock].z * voxel_y + range_min_y; float z_offset = voxel_z / 2.0f + cordsSM[pillar_idx_inBlock].y * voxel_z + range_min_z; //feature-offset float4 center; center.x = pillarSM[pillar_idx_inBlock][point_idx].x - x_offset; center.y = pillarSM[pillar_idx_inBlock][point_idx].y - y_offset; center.z = pillarSM[pillar_idx_inBlock][point_idx].z - z_offset; //store output if (point_idx < pointsNumSM[pillar_idx_inBlock]) { pillarOutSM[pillar_idx_inBlock][point_idx][0] = pillarSM[pillar_idx_inBlock][point_idx].x; pillarOutSM[pillar_idx_inBlock][point_idx][1] = pillarSM[pillar_idx_inBlock][point_idx].y; pillarOutSM[pillar_idx_inBlock][point_idx][2] = pillarSM[pillar_idx_inBlock][point_idx].z; pillarOutSM[pillar_idx_inBlock][point_idx][3] = pillarSM[pillar_idx_inBlock][point_idx].w; pillarOutSM[pillar_idx_inBlock][point_idx][4] = mean.x; pillarOutSM[pillar_idx_inBlock][point_idx][5] = mean.y; pillarOutSM[pillar_idx_inBlock][point_idx][6] = mean.z; pillarOutSM[pillar_idx_inBlock][point_idx][7] = center.x; pillarOutSM[pillar_idx_inBlock][point_idx][8] = center.y; pillarOutSM[pillar_idx_inBlock][point_idx][9] = center.z; } else { pillarOutSM[pillar_idx_inBlock][point_idx][0] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][1] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][2] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][3] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][4] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][5] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][6] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][7] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][8] = 0; pillarOutSM[pillar_idx_inBlock][point_idx][9] = 0; } __syncthreads(); for(int i = 0; i < voxel_features_size; i ++) { int outputSMId = pillar_idx_inBlock*64*11 + point_idx * 11 + i; int outputId = pillar_idx*max_points*voxel_features_size + point_idx * voxel_features_size + i; features[outputId] = ((float*)pillarOutSM)[outputSMId] ; } } int generateFeatures_launch( int batch_size, int dense_pillar_num, float* voxel_features, unsigned int* voxel_num_points, unsigned int* coords, unsigned int *params, float voxel_x, float voxel_y, float voxel_z, float range_min_x, float range_min_y, float range_min_z, unsigned int voxel_features_size, unsigned int max_points, unsigned int max_voxels, unsigned int num_point_values, float* features, cudaStream_t stream) { unsigned int warp_size = max_points; dim3 blocks((batch_size * max_voxels + 3) / 4); dim3 threads(4*warp_size); if (num_point_values == 4) { generateFeatures_kernel_4x<<<blocks, threads, 0, stream>>> (batch_size, dense_pillar_num, voxel_features, voxel_num_points, coords, params, voxel_x, voxel_y, voxel_z, range_min_x, range_min_y, range_min_z, voxel_features_size, max_points, max_voxels, features); } else { generateFeatures_kernel<<<blocks, threads, 0, stream>>> (batch_size, dense_pillar_num, voxel_features, voxel_num_points, coords, params, voxel_x, voxel_y, voxel_z, range_min_x, range_min_y, range_min_z, voxel_features_size, max_points, max_voxels, features); } auto err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } return err; } } // namespace plugin } // namespace nvinfer1
510c5b6c688b4cd7c721cfb4dd861c99184d518f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // For __syncthreads to not be not found anymore #ifndef __HIPCC__ #define __HIPCC__ #endif #include <cstdlib> #include <cstdint> #include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui/highgui.hpp> #include <hip/device_functions.h> using namespace std; using namespace cv; const int max_threads_per_block = 1024; const int number_of_bins = 256; #define gpu_error_check(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(const hipError_t code, const char* file, const int line, const bool abort = true) { if (code != hipSuccess) { cerr << "GPU_assert: " << hipGetErrorString(code) << " " << file << " " << line << ".\n"; if (abort) { exit(code); } } } Mat read_image(const string image_path) { Mat image = imread(image_path, IMREAD_GRAYSCALE); if (image.empty()) { cerr << "The provided image at path " << image_path << " could not be read\n"; exit(-2); } return image; } __global__ void histogram(const uint8_t* image, uint32_t* histogram) { const uint32_t index = blockDim.x * blockIdx.x + threadIdx.x; atomicAdd(&histogram[image[index]], 1); } // Naive implementation of inclusive scan algorithm based on the exclusive scan presented in // https://www.eecs.umich.edu/courses/eecs570/hw/parprefix.pdf // The above example doesn't work when adapted because of a __syncthreads() issue? // It uses a continuous double buffer, so a single memory location with twice the number of slots as the input. // The complexity of this is O(N*logN) whereas the trivial CPU single threaded version is O(N). // It handles only arrays smaller than max number of threads per 1 block. __global__ void scan(uint32_t* output, const uint32_t* const input, const uint32_t n, const uint32_t offset) { const unsigned thread_index = threadIdx.x; if (thread_index >= offset) { output[thread_index] = input[thread_index] + input[thread_index - offset]; } else { output[thread_index] = input[thread_index]; } } __global__ void equalize_image(const uint8_t* original_image, const size_t number_of_pixels, const uint32_t* cdf, uint8_t* equalized_image) { const int index = blockDim.x * blockIdx.x + threadIdx.x; equalized_image[index] = cdf[original_image[index]] * (number_of_bins - 1) / number_of_pixels; } int get_elapsed_time(float time_in_milliseconds) { return (int)(time_in_milliseconds * 1000); } // Also see https://www.mygreatlearning.com/blog/histogram-equalization-explained/#Algorithm int main(int argc, char** argv) { if (argc < 2) { fprintf(stderr, "An image file path is needed!\n"); exit(-1); } string image_path(argv[1]); Mat image = read_image(image_path); if (!image.isContinuous()) { std::cerr << "Image is not read but stitched together so it is not continuous\n"; exit(-3); } size_t number_of_pixels = image.total(); uint8_t* host_image = (uint8_t*)malloc(number_of_pixels * sizeof(uint8_t)); memcpy_s(host_image, number_of_pixels * sizeof(uint8_t), image.data, number_of_pixels * sizeof(uint8_t)); // Time transfer to GPU hipEvent_t start_transfer, end_transfer; hipEventCreate(&start_transfer); hipEventCreate(&end_transfer); hipEventRecord(start_transfer); // Copy image to GPU uint8_t* dev_image = nullptr; gpu_error_check(hipMalloc(&dev_image, number_of_pixels * sizeof(uint8_t))); gpu_error_check(hipMemcpy(dev_image, host_image, number_of_pixels * sizeof(uint8_t), hipMemcpyHostToDevice)); hipEventRecord(end_transfer, 0); hipEventSynchronize(end_transfer); float transfer_elapsed_time; hipEventElapsedTime(&transfer_elapsed_time, start_transfer, end_transfer); hipEventDestroy(start_transfer); hipEventDestroy(end_transfer); cout << "The time to transfer image to GPU: " << get_elapsed_time(transfer_elapsed_time) << " microseconds\n"; // Time histogram equalization hipEvent_t start_histogram_equalization, end_histogram_equalization; hipEventCreate(&start_histogram_equalization); hipEventCreate(&end_histogram_equalization); hipEventRecord(start_histogram_equalization); // Initialize histogram on device uint32_t* dev_histogram = nullptr; gpu_error_check(hipMalloc(&dev_histogram, number_of_bins * sizeof(uint32_t))); gpu_error_check(hipMemset(dev_histogram, 0, number_of_bins * sizeof(uint32_t))); // Compute histogram of image using a naive kernel hipEvent_t start_histogram, end_histogram; hipEventCreate(&start_histogram); hipEventCreate(&end_histogram); hipEventRecord(start_histogram); hipLaunchKernelGGL(( histogram), dim3(number_of_pixels / max_threads_per_block), dim3(max_threads_per_block), 0, 0, dev_image, dev_histogram); gpu_error_check(hipGetLastError()); gpu_error_check(hipDeviceSynchronize()); hipEventRecord(end_histogram, 0); hipEventSynchronize(end_histogram); float histogram_elapsed_time; hipEventElapsedTime(&histogram_elapsed_time, start_histogram, end_histogram); hipEventDestroy(start_histogram); hipEventDestroy(end_histogram); cout << "The time to compute histogram: " << get_elapsed_time(histogram_elapsed_time) << " microseconds\n"; // Compute the cumulative distribution function using a naive uint32_t* dev_cdf = nullptr; gpu_error_check(hipMalloc(&dev_cdf, number_of_bins * sizeof(uint32_t))); gpu_error_check(hipMemset(dev_cdf, 0, number_of_bins * sizeof(uint32_t))); // Compute the cumulative distribution function uint32_t* temp = nullptr; gpu_error_check(hipMalloc(&temp, number_of_bins * sizeof(uint32_t))); gpu_error_check(hipMemcpy(temp, dev_histogram, number_of_bins * sizeof(uint32_t), hipMemcpyDeviceToDevice)); for (uint32_t offset = 1; offset < number_of_bins; offset *= 2) { hipLaunchKernelGGL(( scan), dim3(1), dim3(number_of_bins), 2 * number_of_bins * sizeof(int32_t), 0, dev_cdf, temp, number_of_bins, offset); gpu_error_check(hipGetLastError()); gpu_error_check(hipDeviceSynchronize()); if (offset * 2 < number_of_bins) { std::swap(temp, dev_cdf); } } // Compute the new image values uint8_t* dev_equalized_image = nullptr; gpu_error_check(hipMalloc(&dev_equalized_image, number_of_pixels * sizeof(uint8_t))); hipLaunchKernelGGL(( equalize_image), dim3(number_of_pixels / max_threads_per_block), dim3(max_threads_per_block), 0, 0, dev_image, number_of_pixels, dev_cdf, dev_equalized_image); gpu_error_check(hipGetLastError()); gpu_error_check(hipDeviceSynchronize()); hipEventRecord(end_histogram_equalization, 0); hipEventSynchronize(end_histogram_equalization); float histogram_equalization_elapsed_time; hipEventElapsedTime(&histogram_equalization_elapsed_time, start_histogram_equalization, end_histogram_equalization); hipEventDestroy(start_histogram_equalization); hipEventDestroy(end_histogram_equalization); cout << "The time to equalize the histogram on the GPU for the input image: " << get_elapsed_time(histogram_equalization_elapsed_time) << " microseconds\n"; // Time the transfer back to the CPU hipEvent_t start_transfer_back, end_transfer_back; hipEventCreate(&start_transfer_back); hipEventCreate(&end_transfer_back); hipEventRecord(start_transfer_back); // Copy the equalized image back to the CPU uint8_t* host_equalized_image = nullptr; host_equalized_image = (uint8_t*)malloc(number_of_pixels * sizeof(uint8_t)); gpu_error_check( hipMemcpy(host_equalized_image, dev_equalized_image, number_of_pixels * sizeof(uint8_t), hipMemcpyDeviceToHost )); hipEventRecord(end_transfer_back, 0); hipEventSynchronize(end_transfer_back); float transfer_back_elapsed_time; hipEventElapsedTime(&transfer_back_elapsed_time, start_transfer_back, end_transfer_back); hipEventDestroy(start_transfer_back); hipEventDestroy(end_transfer_back); cout << "The time to transfer the equalized image back to CPU: " << get_elapsed_time(transfer_back_elapsed_time) << " microseconds\n"; cout << "The total time (without loading initial image from filesystem and displaying them at the end): " << get_elapsed_time(transfer_elapsed_time + histogram_equalization_elapsed_time + transfer_back_elapsed_time) << " microseconds\n"; // Create and image for displaying Mat equalized_image = Mat(image.rows, image.cols, CV_8UC1, host_equalized_image); // imshow("Original image", image); // imshow("Equalized image", equalized_image); // waitKey(0); // Free all memory free(host_image); free(host_equalized_image); hipFree(dev_image); hipFree(dev_histogram); hipFree(dev_cdf); hipFree(dev_equalized_image); return 0; }
510c5b6c688b4cd7c721cfb4dd861c99184d518f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" // For __syncthreads to not be not found anymore #ifndef __CUDACC__ #define __CUDACC__ #endif #include <cstdlib> #include <cstdint> #include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui/highgui.hpp> #include <device_functions.h> using namespace std; using namespace cv; const int max_threads_per_block = 1024; const int number_of_bins = 256; #define gpu_error_check(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(const cudaError_t code, const char* file, const int line, const bool abort = true) { if (code != cudaSuccess) { cerr << "GPU_assert: " << cudaGetErrorString(code) << " " << file << " " << line << ".\n"; if (abort) { exit(code); } } } Mat read_image(const string image_path) { Mat image = imread(image_path, IMREAD_GRAYSCALE); if (image.empty()) { cerr << "The provided image at path " << image_path << " could not be read\n"; exit(-2); } return image; } __global__ void histogram(const uint8_t* image, uint32_t* histogram) { const uint32_t index = blockDim.x * blockIdx.x + threadIdx.x; atomicAdd(&histogram[image[index]], 1); } // Naive implementation of inclusive scan algorithm based on the exclusive scan presented in // https://www.eecs.umich.edu/courses/eecs570/hw/parprefix.pdf // The above example doesn't work when adapted because of a __syncthreads() issue? // It uses a continuous double buffer, so a single memory location with twice the number of slots as the input. // The complexity of this is O(N*logN) whereas the trivial CPU single threaded version is O(N). // It handles only arrays smaller than max number of threads per 1 block. __global__ void scan(uint32_t* output, const uint32_t* const input, const uint32_t n, const uint32_t offset) { const unsigned thread_index = threadIdx.x; if (thread_index >= offset) { output[thread_index] = input[thread_index] + input[thread_index - offset]; } else { output[thread_index] = input[thread_index]; } } __global__ void equalize_image(const uint8_t* original_image, const size_t number_of_pixels, const uint32_t* cdf, uint8_t* equalized_image) { const int index = blockDim.x * blockIdx.x + threadIdx.x; equalized_image[index] = cdf[original_image[index]] * (number_of_bins - 1) / number_of_pixels; } int get_elapsed_time(float time_in_milliseconds) { return (int)(time_in_milliseconds * 1000); } // Also see https://www.mygreatlearning.com/blog/histogram-equalization-explained/#Algorithm int main(int argc, char** argv) { if (argc < 2) { fprintf(stderr, "An image file path is needed!\n"); exit(-1); } string image_path(argv[1]); Mat image = read_image(image_path); if (!image.isContinuous()) { std::cerr << "Image is not read but stitched together so it is not continuous\n"; exit(-3); } size_t number_of_pixels = image.total(); uint8_t* host_image = (uint8_t*)malloc(number_of_pixels * sizeof(uint8_t)); memcpy_s(host_image, number_of_pixels * sizeof(uint8_t), image.data, number_of_pixels * sizeof(uint8_t)); // Time transfer to GPU cudaEvent_t start_transfer, end_transfer; cudaEventCreate(&start_transfer); cudaEventCreate(&end_transfer); cudaEventRecord(start_transfer); // Copy image to GPU uint8_t* dev_image = nullptr; gpu_error_check(cudaMalloc(&dev_image, number_of_pixels * sizeof(uint8_t))); gpu_error_check(cudaMemcpy(dev_image, host_image, number_of_pixels * sizeof(uint8_t), cudaMemcpyHostToDevice)); cudaEventRecord(end_transfer, 0); cudaEventSynchronize(end_transfer); float transfer_elapsed_time; cudaEventElapsedTime(&transfer_elapsed_time, start_transfer, end_transfer); cudaEventDestroy(start_transfer); cudaEventDestroy(end_transfer); cout << "The time to transfer image to GPU: " << get_elapsed_time(transfer_elapsed_time) << " microseconds\n"; // Time histogram equalization cudaEvent_t start_histogram_equalization, end_histogram_equalization; cudaEventCreate(&start_histogram_equalization); cudaEventCreate(&end_histogram_equalization); cudaEventRecord(start_histogram_equalization); // Initialize histogram on device uint32_t* dev_histogram = nullptr; gpu_error_check(cudaMalloc(&dev_histogram, number_of_bins * sizeof(uint32_t))); gpu_error_check(cudaMemset(dev_histogram, 0, number_of_bins * sizeof(uint32_t))); // Compute histogram of image using a naive kernel cudaEvent_t start_histogram, end_histogram; cudaEventCreate(&start_histogram); cudaEventCreate(&end_histogram); cudaEventRecord(start_histogram); histogram<<<number_of_pixels / max_threads_per_block, max_threads_per_block>>>(dev_image, dev_histogram); gpu_error_check(cudaGetLastError()); gpu_error_check(cudaDeviceSynchronize()); cudaEventRecord(end_histogram, 0); cudaEventSynchronize(end_histogram); float histogram_elapsed_time; cudaEventElapsedTime(&histogram_elapsed_time, start_histogram, end_histogram); cudaEventDestroy(start_histogram); cudaEventDestroy(end_histogram); cout << "The time to compute histogram: " << get_elapsed_time(histogram_elapsed_time) << " microseconds\n"; // Compute the cumulative distribution function using a naive uint32_t* dev_cdf = nullptr; gpu_error_check(cudaMalloc(&dev_cdf, number_of_bins * sizeof(uint32_t))); gpu_error_check(cudaMemset(dev_cdf, 0, number_of_bins * sizeof(uint32_t))); // Compute the cumulative distribution function uint32_t* temp = nullptr; gpu_error_check(cudaMalloc(&temp, number_of_bins * sizeof(uint32_t))); gpu_error_check(cudaMemcpy(temp, dev_histogram, number_of_bins * sizeof(uint32_t), cudaMemcpyDeviceToDevice)); for (uint32_t offset = 1; offset < number_of_bins; offset *= 2) { scan<<<1, number_of_bins, 2 * number_of_bins * sizeof(int32_t)>>>(dev_cdf, temp, number_of_bins, offset); gpu_error_check(cudaGetLastError()); gpu_error_check(cudaDeviceSynchronize()); if (offset * 2 < number_of_bins) { std::swap(temp, dev_cdf); } } // Compute the new image values uint8_t* dev_equalized_image = nullptr; gpu_error_check(cudaMalloc(&dev_equalized_image, number_of_pixels * sizeof(uint8_t))); equalize_image<<<number_of_pixels / max_threads_per_block, max_threads_per_block>>>( dev_image, number_of_pixels, dev_cdf, dev_equalized_image); gpu_error_check(cudaGetLastError()); gpu_error_check(cudaDeviceSynchronize()); cudaEventRecord(end_histogram_equalization, 0); cudaEventSynchronize(end_histogram_equalization); float histogram_equalization_elapsed_time; cudaEventElapsedTime(&histogram_equalization_elapsed_time, start_histogram_equalization, end_histogram_equalization); cudaEventDestroy(start_histogram_equalization); cudaEventDestroy(end_histogram_equalization); cout << "The time to equalize the histogram on the GPU for the input image: " << get_elapsed_time(histogram_equalization_elapsed_time) << " microseconds\n"; // Time the transfer back to the CPU cudaEvent_t start_transfer_back, end_transfer_back; cudaEventCreate(&start_transfer_back); cudaEventCreate(&end_transfer_back); cudaEventRecord(start_transfer_back); // Copy the equalized image back to the CPU uint8_t* host_equalized_image = nullptr; host_equalized_image = (uint8_t*)malloc(number_of_pixels * sizeof(uint8_t)); gpu_error_check( cudaMemcpy(host_equalized_image, dev_equalized_image, number_of_pixels * sizeof(uint8_t), cudaMemcpyDeviceToHost )); cudaEventRecord(end_transfer_back, 0); cudaEventSynchronize(end_transfer_back); float transfer_back_elapsed_time; cudaEventElapsedTime(&transfer_back_elapsed_time, start_transfer_back, end_transfer_back); cudaEventDestroy(start_transfer_back); cudaEventDestroy(end_transfer_back); cout << "The time to transfer the equalized image back to CPU: " << get_elapsed_time(transfer_back_elapsed_time) << " microseconds\n"; cout << "The total time (without loading initial image from filesystem and displaying them at the end): " << get_elapsed_time(transfer_elapsed_time + histogram_equalization_elapsed_time + transfer_back_elapsed_time) << " microseconds\n"; // Create and image for displaying Mat equalized_image = Mat(image.rows, image.cols, CV_8UC1, host_equalized_image); // imshow("Original image", image); // imshow("Equalized image", equalized_image); // waitKey(0); // Free all memory free(host_image); free(host_equalized_image); cudaFree(dev_image); cudaFree(dev_histogram); cudaFree(dev_cdf); cudaFree(dev_equalized_image); return 0; }
d6fa7195fd1b711f31a1e5cbd97ae9fa4a2068c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #define NB 64 // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void zaxpycp_kernel( int m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_Z_ADD( x[i], r[i] ); r[i] = b[i]; } } // ---------------------------------------------------------------------- // adds x += r --and-- // copies r = b extern "C" void magmablas_zaxpycp_q( magma_int_t m, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr x, magmaDoubleComplex_const_ptr b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( zaxpycp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, r, x, b ); }
d6fa7195fd1b711f31a1e5cbd97ae9fa4a2068c4.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #define NB 64 // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void zaxpycp_kernel( int m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_Z_ADD( x[i], r[i] ); r[i] = b[i]; } } // ---------------------------------------------------------------------- // adds x += r --and-- // copies r = b extern "C" void magmablas_zaxpycp_q( magma_int_t m, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr x, magmaDoubleComplex_const_ptr b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); zaxpycp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, r, x, b ); }
6b7563a996bc3bfdd5e4a80769f0307ebbfcfead.hip
// !!! This is a file automatically generated by hipify!!! /* * ExcuteConstraint.cu * * *************** Notice *************** * Auto Generated By ATPC on:2018-01-14 17:53:17 * Author: ZhangHui * */ #include <iostream> #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/Coodinate.cuh" #include "./../model/Interval.cuh" #include "./../model/Priority.cuh" #include "./../model/FullCoveredInfo.cuh" #include "./../model/PredictValue.cuh" #include "./../model/PredictValueWithOne.cuh" #include "./../solver/type.h" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "ParallelATG.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> using namespace std; /* * * */ /* * * 1 * 2if * * : * getRuntimeValue_i_jij * */ /* * 0CUDA * */ __device__ void getRuntimeValue_0_0(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x-y+tanf(v) ) - ( (z+t)/(w+t) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 1CUDA * */ __device__ void getRuntimeValue_0_1(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( sqrtf(x-t) ) - ( z/y ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 2CUDA * */ __device__ void getRuntimeValue_0_2(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x*y ) - ( 0 ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 3CUDA * */ __device__ void getRuntimeValue_0_3(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( t+w+z ) - ( 0 ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 4CUDA * */ __device__ void getRuntimeValue_0_4(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x*y ) - ( t+w+z ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 5CUDA * */ __device__ void getRuntimeValue_0_5(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( powf(y,t)*cosf(v) ) - ( z*2+w*3+x*7 ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 6CUDA * */ __device__ void getRuntimeValue_0_6(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( z+w ) - ( x+y ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 7CUDA * */ __device__ void getRuntimeValue_0_7(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x/y ) - ( w ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 8CUDA * */ __device__ void getRuntimeValue_0_8(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x ) - ( w+y-z ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 9CUDA * */ __device__ void getRuntimeValue_0_9(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( sqrtf(w*y*z) ) - ( log10f(t*x) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 10CUDA * */ __device__ void getRuntimeValue_0_10(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x*cosf(t+y) ) - ( logf(w*z*3) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 11CUDA * */ __device__ void getRuntimeValue_0_11(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( cosf(t)*sinf(v) ) - ( cosf(y) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 12CUDA * */ __device__ void getRuntimeValue_0_12(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( cosf(x*y)+cosf(z*w)+cosf(t*v) ) - ( sinf(x*y)+sinf(z*w)+sinf(t*v) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * nm m*n * */ /* * 00 * */ __global__ void calaConstraint_0_0_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 01 * */ __global__ void calaConstraint_0_0_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 02 * */ __global__ void calaConstraint_0_0_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 03 * */ __global__ void calaConstraint_0_0_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 04 * */ __global__ void calaConstraint_0_0_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 05 * */ __global__ void calaConstraint_0_0_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 10 * */ __global__ void calaConstraint_0_1_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 11 * */ __global__ void calaConstraint_0_1_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 12 * */ __global__ void calaConstraint_0_1_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 13 * */ __global__ void calaConstraint_0_1_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 14 * */ __global__ void calaConstraint_0_1_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 15 * */ __global__ void calaConstraint_0_1_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 20 * */ __global__ void calaConstraint_0_2_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 21 * */ __global__ void calaConstraint_0_2_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 22 * */ __global__ void calaConstraint_0_2_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 23 * */ __global__ void calaConstraint_0_2_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 24 * */ __global__ void calaConstraint_0_2_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 25 * */ __global__ void calaConstraint_0_2_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 30 * */ __global__ void calaConstraint_0_3_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 31 * */ __global__ void calaConstraint_0_3_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 32 * */ __global__ void calaConstraint_0_3_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 33 * */ __global__ void calaConstraint_0_3_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 34 * */ __global__ void calaConstraint_0_3_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 35 * */ __global__ void calaConstraint_0_3_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 40 * */ __global__ void calaConstraint_0_4_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 41 * */ __global__ void calaConstraint_0_4_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 42 * */ __global__ void calaConstraint_0_4_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 43 * */ __global__ void calaConstraint_0_4_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 44 * */ __global__ void calaConstraint_0_4_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 45 * */ __global__ void calaConstraint_0_4_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 50 * */ __global__ void calaConstraint_0_5_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 51 * */ __global__ void calaConstraint_0_5_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 52 * */ __global__ void calaConstraint_0_5_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 53 * */ __global__ void calaConstraint_0_5_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 54 * */ __global__ void calaConstraint_0_5_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 55 * */ __global__ void calaConstraint_0_5_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 60 * */ __global__ void calaConstraint_0_6_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 61 * */ __global__ void calaConstraint_0_6_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 62 * */ __global__ void calaConstraint_0_6_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 63 * */ __global__ void calaConstraint_0_6_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 64 * */ __global__ void calaConstraint_0_6_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 65 * */ __global__ void calaConstraint_0_6_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 70 * */ __global__ void calaConstraint_0_7_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 71 * */ __global__ void calaConstraint_0_7_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 72 * */ __global__ void calaConstraint_0_7_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 73 * */ __global__ void calaConstraint_0_7_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 74 * */ __global__ void calaConstraint_0_7_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 75 * */ __global__ void calaConstraint_0_7_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 80 * */ __global__ void calaConstraint_0_8_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 81 * */ __global__ void calaConstraint_0_8_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 82 * */ __global__ void calaConstraint_0_8_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 83 * */ __global__ void calaConstraint_0_8_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 84 * */ __global__ void calaConstraint_0_8_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 85 * */ __global__ void calaConstraint_0_8_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 90 * */ __global__ void calaConstraint_0_9_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 91 * */ __global__ void calaConstraint_0_9_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 92 * */ __global__ void calaConstraint_0_9_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 93 * */ __global__ void calaConstraint_0_9_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 94 * */ __global__ void calaConstraint_0_9_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 95 * */ __global__ void calaConstraint_0_9_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 100 * */ __global__ void calaConstraint_0_10_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 101 * */ __global__ void calaConstraint_0_10_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 102 * */ __global__ void calaConstraint_0_10_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 103 * */ __global__ void calaConstraint_0_10_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 104 * */ __global__ void calaConstraint_0_10_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 105 * */ __global__ void calaConstraint_0_10_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 110 * */ __global__ void calaConstraint_0_11_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 111 * */ __global__ void calaConstraint_0_11_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 112 * */ __global__ void calaConstraint_0_11_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 113 * */ __global__ void calaConstraint_0_11_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 114 * */ __global__ void calaConstraint_0_11_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 115 * */ __global__ void calaConstraint_0_11_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 120 * */ __global__ void calaConstraint_0_12_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 121 * */ __global__ void calaConstraint_0_12_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 122 * */ __global__ void calaConstraint_0_12_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 123 * */ __global__ void calaConstraint_0_12_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 124 * */ __global__ void calaConstraint_0_12_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 125 * */ __global__ void calaConstraint_0_12_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * ,CUDA * */ void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col) { Block res = HardwareStrategy::getHardwareStrategy(col); // if(paraIndex == 0) { hipLaunchKernelGGL(( calaConstraint_0_0_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col); hipLaunchKernelGGL(( calaConstraint_0_3_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[3], dev_predictArray,dev_parameter,3*col,col); hipLaunchKernelGGL(( calaConstraint_0_4_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[4], dev_predictArray,dev_parameter,4*col,col); hipLaunchKernelGGL(( calaConstraint_0_5_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[5], dev_predictArray,dev_parameter,5*col,col); hipLaunchKernelGGL(( calaConstraint_0_6_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[6], dev_predictArray,dev_parameter,6*col,col); hipLaunchKernelGGL(( calaConstraint_0_7_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[7], dev_predictArray,dev_parameter,7*col,col); hipLaunchKernelGGL(( calaConstraint_0_8_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[8], dev_predictArray,dev_parameter,8*col,col); hipLaunchKernelGGL(( calaConstraint_0_9_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[9], dev_predictArray,dev_parameter,9*col,col); hipLaunchKernelGGL(( calaConstraint_0_10_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[10], dev_predictArray,dev_parameter,10*col,col); hipLaunchKernelGGL(( calaConstraint_0_11_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[11], dev_predictArray,dev_parameter,11*col,col); hipLaunchKernelGGL(( calaConstraint_0_12_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[12], dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 1) { hipLaunchKernelGGL(( calaConstraint_0_0_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col); hipLaunchKernelGGL(( calaConstraint_0_3_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[3], dev_predictArray,dev_parameter,3*col,col); hipLaunchKernelGGL(( calaConstraint_0_4_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[4], dev_predictArray,dev_parameter,4*col,col); hipLaunchKernelGGL(( calaConstraint_0_5_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[5], dev_predictArray,dev_parameter,5*col,col); hipLaunchKernelGGL(( calaConstraint_0_6_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[6], dev_predictArray,dev_parameter,6*col,col); hipLaunchKernelGGL(( calaConstraint_0_7_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[7], dev_predictArray,dev_parameter,7*col,col); hipLaunchKernelGGL(( calaConstraint_0_8_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[8], dev_predictArray,dev_parameter,8*col,col); hipLaunchKernelGGL(( calaConstraint_0_9_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[9], dev_predictArray,dev_parameter,9*col,col); hipLaunchKernelGGL(( calaConstraint_0_10_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[10], dev_predictArray,dev_parameter,10*col,col); hipLaunchKernelGGL(( calaConstraint_0_11_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[11], dev_predictArray,dev_parameter,11*col,col); hipLaunchKernelGGL(( calaConstraint_0_12_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[12], dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 2) { hipLaunchKernelGGL(( calaConstraint_0_0_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col); hipLaunchKernelGGL(( calaConstraint_0_3_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[3], dev_predictArray,dev_parameter,3*col,col); hipLaunchKernelGGL(( calaConstraint_0_4_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[4], dev_predictArray,dev_parameter,4*col,col); hipLaunchKernelGGL(( calaConstraint_0_5_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[5], dev_predictArray,dev_parameter,5*col,col); hipLaunchKernelGGL(( calaConstraint_0_6_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[6], dev_predictArray,dev_parameter,6*col,col); hipLaunchKernelGGL(( calaConstraint_0_7_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[7], dev_predictArray,dev_parameter,7*col,col); hipLaunchKernelGGL(( calaConstraint_0_8_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[8], dev_predictArray,dev_parameter,8*col,col); hipLaunchKernelGGL(( calaConstraint_0_9_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[9], dev_predictArray,dev_parameter,9*col,col); hipLaunchKernelGGL(( calaConstraint_0_10_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[10], dev_predictArray,dev_parameter,10*col,col); hipLaunchKernelGGL(( calaConstraint_0_11_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[11], dev_predictArray,dev_parameter,11*col,col); hipLaunchKernelGGL(( calaConstraint_0_12_2), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[12], dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 3) { hipLaunchKernelGGL(( calaConstraint_0_0_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col); hipLaunchKernelGGL(( calaConstraint_0_3_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[3], dev_predictArray,dev_parameter,3*col,col); hipLaunchKernelGGL(( calaConstraint_0_4_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[4], dev_predictArray,dev_parameter,4*col,col); hipLaunchKernelGGL(( calaConstraint_0_5_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[5], dev_predictArray,dev_parameter,5*col,col); hipLaunchKernelGGL(( calaConstraint_0_6_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[6], dev_predictArray,dev_parameter,6*col,col); hipLaunchKernelGGL(( calaConstraint_0_7_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[7], dev_predictArray,dev_parameter,7*col,col); hipLaunchKernelGGL(( calaConstraint_0_8_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[8], dev_predictArray,dev_parameter,8*col,col); hipLaunchKernelGGL(( calaConstraint_0_9_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[9], dev_predictArray,dev_parameter,9*col,col); hipLaunchKernelGGL(( calaConstraint_0_10_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[10], dev_predictArray,dev_parameter,10*col,col); hipLaunchKernelGGL(( calaConstraint_0_11_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[11], dev_predictArray,dev_parameter,11*col,col); hipLaunchKernelGGL(( calaConstraint_0_12_3), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[12], dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 4) { hipLaunchKernelGGL(( calaConstraint_0_0_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col); hipLaunchKernelGGL(( calaConstraint_0_3_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[3], dev_predictArray,dev_parameter,3*col,col); hipLaunchKernelGGL(( calaConstraint_0_4_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[4], dev_predictArray,dev_parameter,4*col,col); hipLaunchKernelGGL(( calaConstraint_0_5_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[5], dev_predictArray,dev_parameter,5*col,col); hipLaunchKernelGGL(( calaConstraint_0_6_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[6], dev_predictArray,dev_parameter,6*col,col); hipLaunchKernelGGL(( calaConstraint_0_7_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[7], dev_predictArray,dev_parameter,7*col,col); hipLaunchKernelGGL(( calaConstraint_0_8_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[8], dev_predictArray,dev_parameter,8*col,col); hipLaunchKernelGGL(( calaConstraint_0_9_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[9], dev_predictArray,dev_parameter,9*col,col); hipLaunchKernelGGL(( calaConstraint_0_10_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[10], dev_predictArray,dev_parameter,10*col,col); hipLaunchKernelGGL(( calaConstraint_0_11_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[11], dev_predictArray,dev_parameter,11*col,col); hipLaunchKernelGGL(( calaConstraint_0_12_4), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[12], dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 5) { hipLaunchKernelGGL(( calaConstraint_0_0_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col); hipLaunchKernelGGL(( calaConstraint_0_1_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col); hipLaunchKernelGGL(( calaConstraint_0_2_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col); hipLaunchKernelGGL(( calaConstraint_0_3_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[3], dev_predictArray,dev_parameter,3*col,col); hipLaunchKernelGGL(( calaConstraint_0_4_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[4], dev_predictArray,dev_parameter,4*col,col); hipLaunchKernelGGL(( calaConstraint_0_5_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[5], dev_predictArray,dev_parameter,5*col,col); hipLaunchKernelGGL(( calaConstraint_0_6_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[6], dev_predictArray,dev_parameter,6*col,col); hipLaunchKernelGGL(( calaConstraint_0_7_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[7], dev_predictArray,dev_parameter,7*col,col); hipLaunchKernelGGL(( calaConstraint_0_8_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[8], dev_predictArray,dev_parameter,8*col,col); hipLaunchKernelGGL(( calaConstraint_0_9_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[9], dev_predictArray,dev_parameter,9*col,col); hipLaunchKernelGGL(( calaConstraint_0_10_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[10], dev_predictArray,dev_parameter,10*col,col); hipLaunchKernelGGL(( calaConstraint_0_11_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[11], dev_predictArray,dev_parameter,11*col,col); hipLaunchKernelGGL(( calaConstraint_0_12_5), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[12], dev_predictArray,dev_parameter,12*col,col); } else { cout<<"************ You Should Never Get Here. In Function Of: void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col)"<<endl; } // ParallelATG::synStream(); } /* * * */ __global__ void calaFeasibleSolution(FullCoveredInfo* dev_coveredInfo,Coodinate* dev_predictArray,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { dev_coveredInfo[i].index = i; dev_coveredInfo[i].isCovered = dev_predictArray[i + 0*Size].isCovered && dev_predictArray[i + 1*Size].isCovered && dev_predictArray[i + 2*Size].isCovered && dev_predictArray[i + 3*Size].isCovered && dev_predictArray[i + 4*Size].isCovered && dev_predictArray[i + 5*Size].isCovered && dev_predictArray[i + 6*Size].isCovered && dev_predictArray[i + 7*Size].isCovered && dev_predictArray[i + 8*Size].isCovered && dev_predictArray[i + 9*Size].isCovered && dev_predictArray[i + 10*Size].isCovered && dev_predictArray[i + 11*Size].isCovered && dev_predictArray[i + 12*Size].isCovered; dev_coveredInfo[i].isVaild = dev_predictArray[i + 0*Size].isValid && dev_predictArray[i + 1*Size].isValid && dev_predictArray[i + 2*Size].isValid && dev_predictArray[i + 3*Size].isValid && dev_predictArray[i + 4*Size].isValid && dev_predictArray[i + 5*Size].isValid && dev_predictArray[i + 6*Size].isValid && dev_predictArray[i + 7*Size].isValid && dev_predictArray[i + 8*Size].isValid && dev_predictArray[i + 9*Size].isValid && dev_predictArray[i + 10*Size].isValid && dev_predictArray[i + 11*Size].isValid && dev_predictArray[i + 12*Size].isValid; dev_coveredInfo[i].vaildNum = (int)(dev_coveredInfo[i].isVaild == true); } } /* * * */ __global__ void calaFinalIntervel(Interval* dev_finalIntervel,Interval* dev_interval,const int calaArraySize) { int i = threadIdx.x + blockIdx.x*blockDim.x; bool condition = (i>=1) && (i<calaArraySize); if(condition) { Interval* a0 = dev_interval + i + calaArraySize * 0; Interval* a1 = dev_interval + i + calaArraySize * 1; Interval* a2 = dev_interval + i + calaArraySize * 2; Interval* a3 = dev_interval + i + calaArraySize * 3; Interval* a4 = dev_interval + i + calaArraySize * 4; Interval* a5 = dev_interval + i + calaArraySize * 5; Interval* a6 = dev_interval + i + calaArraySize * 6; Interval* a7 = dev_interval + i + calaArraySize * 7; Interval* a8 = dev_interval + i + calaArraySize * 8; Interval* a9 = dev_interval + i + calaArraySize * 9; Interval* a10 = dev_interval + i + calaArraySize * 10; Interval* a11 = dev_interval + i + calaArraySize * 11; Interval* a12 = dev_interval + i + calaArraySize * 12; FloatType left = a0->left; left = fmaxf( left , a1->left); left = fmaxf( left , a2->left); left = fmaxf( left , a3->left); left = fmaxf( left , a4->left); left = fmaxf( left , a5->left); left = fmaxf( left , a6->left); left = fmaxf( left , a7->left); left = fmaxf( left , a8->left); left = fmaxf( left , a9->left); left = fmaxf( left , a10->left); left = fmaxf( left , a11->left); left = fmaxf( left , a12->left); FloatType right = a0->right; right = fminf( right , a1->right); right = fminf( right , a2->right); right = fminf( right , a3->right); right = fminf( right , a4->right); right = fminf( right , a5->right); right = fminf( right , a6->right); right = fminf( right , a7->right); right = fminf( right , a8->right); right = fminf( right , a9->right); right = fminf( right , a10->right); right = fminf( right , a11->right); right = fminf( right , a12->right); bool hasIntervel = a0->hasIntervel && a1->hasIntervel && a2->hasIntervel && a3->hasIntervel && a4->hasIntervel && a5->hasIntervel && a6->hasIntervel && a7->hasIntervel && a8->hasIntervel && a9->hasIntervel && a10->hasIntervel && a11->hasIntervel && a12->hasIntervel; dev_finalIntervel[i].left = left; dev_finalIntervel[i].right = right; dev_finalIntervel[i].hasIntervel = hasIntervel; // //printf("(%f , %f ) (%f , %f ) (%f , %f ) Final %d (%f , %f)\n",a1->left,a1->right,a2->left,a2->right,a3->left,a3->right,hasIntervel,left,right); } } /* * predct * */ __global__ void generatePredictMat(Coodinate* dev_predictArray,PredictValueWithOne* dev_finalAllPredictValue,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { dev_predictArray[i + 0*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 1*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 2*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 3*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 4*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 5*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 6*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 7*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 8*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 9*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 10*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 11*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 12*Size].x = dev_finalAllPredictValue[i].value; } } /* * * */ __global__ void calaPriority(Priority* dev_priority,Coodinate* dev_calaArray,const int row,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { FloatType pri = 0.0; Coodinate* a0 = dev_calaArray + i + 0 * Size; Coodinate* a1 = dev_calaArray + i + 1 * Size; Coodinate* a2 = dev_calaArray + i + 2 * Size; Coodinate* a3 = dev_calaArray + i + 3 * Size; Coodinate* a4 = dev_calaArray + i + 4 * Size; Coodinate* a5 = dev_calaArray + i + 5 * Size; Coodinate* a6 = dev_calaArray + i + 6 * Size; Coodinate* a7 = dev_calaArray + i + 7 * Size; Coodinate* a8 = dev_calaArray + i + 8 * Size; Coodinate* a9 = dev_calaArray + i + 9 * Size; Coodinate* a10 = dev_calaArray + i + 10 * Size; Coodinate* a11 = dev_calaArray + i + 11 * Size; Coodinate* a12 = dev_calaArray + i + 12 * Size; if(a0->isCovered==true) pri = pri + 1.f; else if(a0->isValid==true) pri = pri + 1.f/(1.f+fabsf(a0->y)); if(a1->isCovered==true) pri = pri + 1.f; else if(a1->isValid==true) pri = pri + 1.f/(1.f+fabsf(a1->y)); if(a2->isCovered==true) pri = pri + 1.f; else if(a2->isValid==true) pri = pri + 1.f/(1.f+fabsf(a2->y)); if(a3->isCovered==true) pri = pri + 1.f; else if(a3->isValid==true) pri = pri + 1.f/(1.f+fabsf(a3->y)); if(a4->isCovered==true) pri = pri + 1.f; else if(a4->isValid==true) pri = pri + 1.f/(1.f+fabsf(a4->y)); if(a5->isCovered==true) pri = pri + 1.f; else if(a5->isValid==true) pri = pri + 1.f/(1.f+fabsf(a5->y)); if(a6->isCovered==true) pri = pri + 1.f; else if(a6->isValid==true) pri = pri + 1.f/(1.f+fabsf(a6->y)); if(a7->isCovered==true) pri = pri + 1.f; else if(a7->isValid==true) pri = pri + 1.f/(1.f+fabsf(a7->y)); if(a8->isCovered==true) pri = pri + 1.f; else if(a8->isValid==true) pri = pri + 1.f/(1.f+fabsf(a8->y)); if(a9->isCovered==true) pri = pri + 1.f; else if(a9->isValid==true) pri = pri + 1.f/(1.f+fabsf(a9->y)); if(a10->isCovered==true) pri = pri + 1.f; else if(a10->isValid==true) pri = pri + 1.f/(1.f+fabsf(a10->y)); if(a11->isCovered==true) pri = pri + 1.f; else if(a11->isValid==true) pri = pri + 1.f/(1.f+fabsf(a11->y)); if(a12->isCovered==true) pri = pri + 1.f; else if(a12->isValid==true) pri = pri + 1.f/(1.f+fabsf(a12->y)); dev_priority[i].priority = pri / (FloatType)row; dev_priority[i].x = a0->x; // bool isOne = (a0->x == a1->x) && (a1->x == a2->x) && (a2->x == a3->x) && (a3->x == a4->x) && (a4->x == a5->x) && (a5->x == a6->x) && (a6->x == a7->x) && (a7->x == a8->x) && (a8->x == a9->x) && (a9->x == a10->x) && (a10->x == a11->x) && (a11->x == a12->x); bool isCovered = a0->isCovered && a1->isCovered && a2->isCovered && a3->isCovered && a4->isCovered && a5->isCovered && a6->isCovered && a7->isCovered && a8->isCovered && a9->isCovered && a10->isCovered && a11->isCovered && a12->isCovered; bool isValid= a0->isValid && a1->isValid && a2->isValid && a3->isValid && a4->isValid && a5->isValid && a6->isValid && a7->isValid && a8->isValid && a9->isValid && a10->isValid && a11->isValid && a12->isValid; if(isCovered == true) { printf("Cala Prioruty Wrong,index:%d: (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , isOne:%d , isCovered:%d , isValid:%d \n",i,a0->x,a0->y,a0->isCovered,a0->isValid,a1->x,a1->y,a1->isCovered,a1->isValid,a2->x,a2->y,a2->isCovered,a2->isValid,a3->x,a3->y,a3->isCovered,a3->isValid,a4->x,a4->y,a4->isCovered,a4->isValid,a5->x,a5->y,a5->isCovered,a5->isValid,a6->x,a6->y,a6->isCovered,a6->isValid,a7->x,a7->y,a7->isCovered,a7->isValid,a8->x,a8->y,a8->isCovered,a8->isValid,a9->x,a9->y,a9->isCovered,a9->isValid,a10->x,a10->y,a10->isCovered,a10->isValid,a11->x,a11->y,a11->isCovered,a11->isValid,a12->x,a12->y,a12->isCovered,a12->isValid,isOne,isCovered,isValid); } } }
6b7563a996bc3bfdd5e4a80769f0307ebbfcfead.cu
/* * ExcuteConstraint.cu * * *************** Notice *************** * Auto Generated By ATPC on:2018-01-14 17:53:17 * Author: ZhangHui * */ #include <iostream> #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/Coodinate.cuh" #include "./../model/Interval.cuh" #include "./../model/Priority.cuh" #include "./../model/FullCoveredInfo.cuh" #include "./../model/PredictValue.cuh" #include "./../model/PredictValueWithOne.cuh" #include "./../solver/type.h" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "ParallelATG.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> using namespace std; /* * 这个文件是函数获取运行时刻的各种函数的声明 * */ /* * 注意这里计算有两部的计算优化: * 1)在计算运行时刻值的时候,顺便把子约束满足情况计算了 * 2)计算子约束的满足情况的时候没有使用if等判断分支结构, * 同时使用到已经计算好的运行时刻值去减少复杂的浮点数计算过程 * 这里采用的编码函数命名编码规则是这样的: * getRuntimeValue_i_j表示计算第i个析取范式的第j个约束的运行时刻值 * */ /* * 第0个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_0(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x-y+tanf(v) ) - ( (z+t)/(w+t) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第1个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_1(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( sqrtf(x-t) ) - ( z/y ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第2个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_2(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x*y ) - ( 0 ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第3个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_3(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( t+w+z ) - ( 0 ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第4个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_4(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x*y ) - ( t+w+z ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第5个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_5(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( powf(y,t)*cosf(v) ) - ( z*2+w*3+x*7 ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第6个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_6(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( z+w ) - ( x+y ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第7个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_7(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x/y ) - ( w ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第8个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_8(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x ) - ( w+y-z ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第9个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_9(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( sqrtf(w*y*z) ) - ( log10f(t*x) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第10个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_10(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( x*cosf(t+y) ) - ( logf(w*z*3) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第11个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_11(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( cosf(t)*sinf(v) ) - ( cosf(y) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 第12个约束的CUDA计算函数 * */ __device__ void getRuntimeValue_0_12(FloatType x , FloatType y , FloatType z , FloatType w , FloatType t , FloatType v , Coodinate* res ) { res->y = ( cosf(x*y)+cosf(z*w)+cosf(t*v) ) - ( sinf(x*y)+sinf(z*w)+sinf(t*v) ); res->isCovered = (bool)(res->y > 0.f); res->isValid = (bool)(isfinite(res->y)); return ; } /* * 下面是所有的 约束 在 各个搜索方向 的获取运行时刻值的关系(假如n个约束m个搜索方向,那么一共 m*n 个函数) * */ /* * 第0个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_0_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第0个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_0_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第0个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_0_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第0个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_0_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第0个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_0_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第0个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_0_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_0(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第1个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_1_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第1个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_1_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第1个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_1_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第1个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_1_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第1个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_1_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第1个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_1_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_1(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第2个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_2_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第2个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_2_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第2个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_2_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第2个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_2_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第2个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_2_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第2个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_2_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_2(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第3个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_3_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第3个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_3_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第3个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_3_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第3个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_3_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第3个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_3_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第3个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_3_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_3(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第4个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_4_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第4个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_4_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第4个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_4_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第4个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_4_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第4个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_4_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第4个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_4_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_4(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第5个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_5_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第5个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_5_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第5个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_5_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第5个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_5_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第5个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_5_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第5个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_5_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_5(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第6个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_6_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第6个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_6_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第6个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_6_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第6个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_6_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第6个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_6_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第6个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_6_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_6(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第7个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_7_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第7个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_7_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第7个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_7_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第7个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_7_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第7个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_7_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第7个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_7_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_7(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第8个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_8_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第8个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_8_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第8个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_8_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第8个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_8_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第8个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_8_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第8个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_8_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_8(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第9个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_9_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第9个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_9_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第9个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_9_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第9个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_9_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第9个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_9_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第9个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_9_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_9(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第10个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_10_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第10个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_10_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第10个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_10_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第10个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_10_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第10个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_10_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第10个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_10_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_10(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第11个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_11_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第11个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_11_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第11个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_11_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第11个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_11_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第11个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_11_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第11个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_11_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_11(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 第12个约束在第0个搜索方向的执行 * */ __global__ void calaConstraint_0_12_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_predictArray[i+base].x , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第12个约束在第1个搜索方向的执行 * */ __global__ void calaConstraint_0_12_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_predictArray[i+base].x , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第12个约束在第2个搜索方向的执行 * */ __global__ void calaConstraint_0_12_2(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_predictArray[i+base].x , dev_parameter[3] , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第12个约束在第3个搜索方向的执行 * */ __global__ void calaConstraint_0_12_3(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_predictArray[i+base].x , dev_parameter[4] , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第12个约束在第4个搜索方向的执行 * */ __global__ void calaConstraint_0_12_4(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_predictArray[i+base].x , dev_parameter[5] , dev_predictArray + i + base); } } /* * 第12个约束在第5个搜索方向的执行 * */ __global__ void calaConstraint_0_12_5(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { getRuntimeValue_0_12(dev_parameter[0] , dev_parameter[1] , dev_parameter[2] , dev_parameter[3] , dev_parameter[4] , dev_predictArray[i+base].x , dev_predictArray + i + base); } } /* * 复合约束的并行计算模块,注意,这个函数还可以使用CUDA提供的流加速计算 * */ void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col) { Block res = HardwareStrategy::getHardwareStrategy(col); //根据不同的搜索方向做判断 if(paraIndex == 0) { calaConstraint_0_0_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col); calaConstraint_0_3_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[3]>>>(dev_predictArray,dev_parameter,3*col,col); calaConstraint_0_4_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[4]>>>(dev_predictArray,dev_parameter,4*col,col); calaConstraint_0_5_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[5]>>>(dev_predictArray,dev_parameter,5*col,col); calaConstraint_0_6_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[6]>>>(dev_predictArray,dev_parameter,6*col,col); calaConstraint_0_7_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[7]>>>(dev_predictArray,dev_parameter,7*col,col); calaConstraint_0_8_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[8]>>>(dev_predictArray,dev_parameter,8*col,col); calaConstraint_0_9_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[9]>>>(dev_predictArray,dev_parameter,9*col,col); calaConstraint_0_10_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[10]>>>(dev_predictArray,dev_parameter,10*col,col); calaConstraint_0_11_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[11]>>>(dev_predictArray,dev_parameter,11*col,col); calaConstraint_0_12_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[12]>>>(dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 1) { calaConstraint_0_0_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col); calaConstraint_0_3_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[3]>>>(dev_predictArray,dev_parameter,3*col,col); calaConstraint_0_4_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[4]>>>(dev_predictArray,dev_parameter,4*col,col); calaConstraint_0_5_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[5]>>>(dev_predictArray,dev_parameter,5*col,col); calaConstraint_0_6_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[6]>>>(dev_predictArray,dev_parameter,6*col,col); calaConstraint_0_7_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[7]>>>(dev_predictArray,dev_parameter,7*col,col); calaConstraint_0_8_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[8]>>>(dev_predictArray,dev_parameter,8*col,col); calaConstraint_0_9_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[9]>>>(dev_predictArray,dev_parameter,9*col,col); calaConstraint_0_10_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[10]>>>(dev_predictArray,dev_parameter,10*col,col); calaConstraint_0_11_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[11]>>>(dev_predictArray,dev_parameter,11*col,col); calaConstraint_0_12_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[12]>>>(dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 2) { calaConstraint_0_0_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col); calaConstraint_0_3_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[3]>>>(dev_predictArray,dev_parameter,3*col,col); calaConstraint_0_4_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[4]>>>(dev_predictArray,dev_parameter,4*col,col); calaConstraint_0_5_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[5]>>>(dev_predictArray,dev_parameter,5*col,col); calaConstraint_0_6_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[6]>>>(dev_predictArray,dev_parameter,6*col,col); calaConstraint_0_7_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[7]>>>(dev_predictArray,dev_parameter,7*col,col); calaConstraint_0_8_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[8]>>>(dev_predictArray,dev_parameter,8*col,col); calaConstraint_0_9_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[9]>>>(dev_predictArray,dev_parameter,9*col,col); calaConstraint_0_10_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[10]>>>(dev_predictArray,dev_parameter,10*col,col); calaConstraint_0_11_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[11]>>>(dev_predictArray,dev_parameter,11*col,col); calaConstraint_0_12_2<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[12]>>>(dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 3) { calaConstraint_0_0_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col); calaConstraint_0_3_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[3]>>>(dev_predictArray,dev_parameter,3*col,col); calaConstraint_0_4_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[4]>>>(dev_predictArray,dev_parameter,4*col,col); calaConstraint_0_5_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[5]>>>(dev_predictArray,dev_parameter,5*col,col); calaConstraint_0_6_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[6]>>>(dev_predictArray,dev_parameter,6*col,col); calaConstraint_0_7_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[7]>>>(dev_predictArray,dev_parameter,7*col,col); calaConstraint_0_8_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[8]>>>(dev_predictArray,dev_parameter,8*col,col); calaConstraint_0_9_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[9]>>>(dev_predictArray,dev_parameter,9*col,col); calaConstraint_0_10_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[10]>>>(dev_predictArray,dev_parameter,10*col,col); calaConstraint_0_11_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[11]>>>(dev_predictArray,dev_parameter,11*col,col); calaConstraint_0_12_3<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[12]>>>(dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 4) { calaConstraint_0_0_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col); calaConstraint_0_3_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[3]>>>(dev_predictArray,dev_parameter,3*col,col); calaConstraint_0_4_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[4]>>>(dev_predictArray,dev_parameter,4*col,col); calaConstraint_0_5_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[5]>>>(dev_predictArray,dev_parameter,5*col,col); calaConstraint_0_6_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[6]>>>(dev_predictArray,dev_parameter,6*col,col); calaConstraint_0_7_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[7]>>>(dev_predictArray,dev_parameter,7*col,col); calaConstraint_0_8_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[8]>>>(dev_predictArray,dev_parameter,8*col,col); calaConstraint_0_9_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[9]>>>(dev_predictArray,dev_parameter,9*col,col); calaConstraint_0_10_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[10]>>>(dev_predictArray,dev_parameter,10*col,col); calaConstraint_0_11_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[11]>>>(dev_predictArray,dev_parameter,11*col,col); calaConstraint_0_12_4<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[12]>>>(dev_predictArray,dev_parameter,12*col,col); } else if(paraIndex == 5) { calaConstraint_0_0_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col); calaConstraint_0_1_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col); calaConstraint_0_2_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col); calaConstraint_0_3_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[3]>>>(dev_predictArray,dev_parameter,3*col,col); calaConstraint_0_4_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[4]>>>(dev_predictArray,dev_parameter,4*col,col); calaConstraint_0_5_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[5]>>>(dev_predictArray,dev_parameter,5*col,col); calaConstraint_0_6_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[6]>>>(dev_predictArray,dev_parameter,6*col,col); calaConstraint_0_7_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[7]>>>(dev_predictArray,dev_parameter,7*col,col); calaConstraint_0_8_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[8]>>>(dev_predictArray,dev_parameter,8*col,col); calaConstraint_0_9_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[9]>>>(dev_predictArray,dev_parameter,9*col,col); calaConstraint_0_10_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[10]>>>(dev_predictArray,dev_parameter,10*col,col); calaConstraint_0_11_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[11]>>>(dev_predictArray,dev_parameter,11*col,col); calaConstraint_0_12_5<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[12]>>>(dev_predictArray,dev_parameter,12*col,col); } else { cout<<"************ You Should Never Get Here. In Function Of: void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col)"<<endl; } //下面是流并行计算部分的同步 ParallelATG::synStream(); } /* * 这个核函数是为了寻找可行解 * */ __global__ void calaFeasibleSolution(FullCoveredInfo* dev_coveredInfo,Coodinate* dev_predictArray,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < Size) { dev_coveredInfo[i].index = i; dev_coveredInfo[i].isCovered = dev_predictArray[i + 0*Size].isCovered && dev_predictArray[i + 1*Size].isCovered && dev_predictArray[i + 2*Size].isCovered && dev_predictArray[i + 3*Size].isCovered && dev_predictArray[i + 4*Size].isCovered && dev_predictArray[i + 5*Size].isCovered && dev_predictArray[i + 6*Size].isCovered && dev_predictArray[i + 7*Size].isCovered && dev_predictArray[i + 8*Size].isCovered && dev_predictArray[i + 9*Size].isCovered && dev_predictArray[i + 10*Size].isCovered && dev_predictArray[i + 11*Size].isCovered && dev_predictArray[i + 12*Size].isCovered; dev_coveredInfo[i].isVaild = dev_predictArray[i + 0*Size].isValid && dev_predictArray[i + 1*Size].isValid && dev_predictArray[i + 2*Size].isValid && dev_predictArray[i + 3*Size].isValid && dev_predictArray[i + 4*Size].isValid && dev_predictArray[i + 5*Size].isValid && dev_predictArray[i + 6*Size].isValid && dev_predictArray[i + 7*Size].isValid && dev_predictArray[i + 8*Size].isValid && dev_predictArray[i + 9*Size].isValid && dev_predictArray[i + 10*Size].isValid && dev_predictArray[i + 11*Size].isValid && dev_predictArray[i + 12*Size].isValid; dev_coveredInfo[i].vaildNum = (int)(dev_coveredInfo[i].isVaild == true); } } /* * 就是区间交运算的计算 * */ __global__ void calaFinalIntervel(Interval* dev_finalIntervel,Interval* dev_interval,const int calaArraySize) { int i = threadIdx.x + blockIdx.x*blockDim.x; bool condition = (i>=1) && (i<calaArraySize); if(condition) { Interval* a0 = dev_interval + i + calaArraySize * 0; Interval* a1 = dev_interval + i + calaArraySize * 1; Interval* a2 = dev_interval + i + calaArraySize * 2; Interval* a3 = dev_interval + i + calaArraySize * 3; Interval* a4 = dev_interval + i + calaArraySize * 4; Interval* a5 = dev_interval + i + calaArraySize * 5; Interval* a6 = dev_interval + i + calaArraySize * 6; Interval* a7 = dev_interval + i + calaArraySize * 7; Interval* a8 = dev_interval + i + calaArraySize * 8; Interval* a9 = dev_interval + i + calaArraySize * 9; Interval* a10 = dev_interval + i + calaArraySize * 10; Interval* a11 = dev_interval + i + calaArraySize * 11; Interval* a12 = dev_interval + i + calaArraySize * 12; FloatType left = a0->left; left = fmaxf( left , a1->left); left = fmaxf( left , a2->left); left = fmaxf( left , a3->left); left = fmaxf( left , a4->left); left = fmaxf( left , a5->left); left = fmaxf( left , a6->left); left = fmaxf( left , a7->left); left = fmaxf( left , a8->left); left = fmaxf( left , a9->left); left = fmaxf( left , a10->left); left = fmaxf( left , a11->left); left = fmaxf( left , a12->left); FloatType right = a0->right; right = fminf( right , a1->right); right = fminf( right , a2->right); right = fminf( right , a3->right); right = fminf( right , a4->right); right = fminf( right , a5->right); right = fminf( right , a6->right); right = fminf( right , a7->right); right = fminf( right , a8->right); right = fminf( right , a9->right); right = fminf( right , a10->right); right = fminf( right , a11->right); right = fminf( right , a12->right); bool hasIntervel = a0->hasIntervel && a1->hasIntervel && a2->hasIntervel && a3->hasIntervel && a4->hasIntervel && a5->hasIntervel && a6->hasIntervel && a7->hasIntervel && a8->hasIntervel && a9->hasIntervel && a10->hasIntervel && a11->hasIntervel && a12->hasIntervel; dev_finalIntervel[i].left = left; dev_finalIntervel[i].right = right; dev_finalIntervel[i].hasIntervel = hasIntervel; //这里可以自行添加打印获取区间交运算的结果 //printf("(%f , %f ) (%f , %f ) (%f , %f ) Final %d (%f , %f)\n",a1->left,a1->right,a2->left,a2->right,a3->left,a3->right,hasIntervel,left,right); } } /* * 根据预测的序列生成predct矩阵 * */ __global__ void generatePredictMat(Coodinate* dev_predictArray,PredictValueWithOne* dev_finalAllPredictValue,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { dev_predictArray[i + 0*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 1*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 2*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 3*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 4*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 5*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 6*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 7*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 8*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 9*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 10*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 11*Size].x = dev_finalAllPredictValue[i].value; dev_predictArray[i + 12*Size].x = dev_finalAllPredictValue[i].value; } } /* * 并行计算所有的预测解向量的优先级 * */ __global__ void calaPriority(Priority* dev_priority,Coodinate* dev_calaArray,const int row,const int Size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if( i < Size ) { FloatType pri = 0.0; Coodinate* a0 = dev_calaArray + i + 0 * Size; Coodinate* a1 = dev_calaArray + i + 1 * Size; Coodinate* a2 = dev_calaArray + i + 2 * Size; Coodinate* a3 = dev_calaArray + i + 3 * Size; Coodinate* a4 = dev_calaArray + i + 4 * Size; Coodinate* a5 = dev_calaArray + i + 5 * Size; Coodinate* a6 = dev_calaArray + i + 6 * Size; Coodinate* a7 = dev_calaArray + i + 7 * Size; Coodinate* a8 = dev_calaArray + i + 8 * Size; Coodinate* a9 = dev_calaArray + i + 9 * Size; Coodinate* a10 = dev_calaArray + i + 10 * Size; Coodinate* a11 = dev_calaArray + i + 11 * Size; Coodinate* a12 = dev_calaArray + i + 12 * Size; if(a0->isCovered==true) pri = pri + 1.f; else if(a0->isValid==true) pri = pri + 1.f/(1.f+fabsf(a0->y)); if(a1->isCovered==true) pri = pri + 1.f; else if(a1->isValid==true) pri = pri + 1.f/(1.f+fabsf(a1->y)); if(a2->isCovered==true) pri = pri + 1.f; else if(a2->isValid==true) pri = pri + 1.f/(1.f+fabsf(a2->y)); if(a3->isCovered==true) pri = pri + 1.f; else if(a3->isValid==true) pri = pri + 1.f/(1.f+fabsf(a3->y)); if(a4->isCovered==true) pri = pri + 1.f; else if(a4->isValid==true) pri = pri + 1.f/(1.f+fabsf(a4->y)); if(a5->isCovered==true) pri = pri + 1.f; else if(a5->isValid==true) pri = pri + 1.f/(1.f+fabsf(a5->y)); if(a6->isCovered==true) pri = pri + 1.f; else if(a6->isValid==true) pri = pri + 1.f/(1.f+fabsf(a6->y)); if(a7->isCovered==true) pri = pri + 1.f; else if(a7->isValid==true) pri = pri + 1.f/(1.f+fabsf(a7->y)); if(a8->isCovered==true) pri = pri + 1.f; else if(a8->isValid==true) pri = pri + 1.f/(1.f+fabsf(a8->y)); if(a9->isCovered==true) pri = pri + 1.f; else if(a9->isValid==true) pri = pri + 1.f/(1.f+fabsf(a9->y)); if(a10->isCovered==true) pri = pri + 1.f; else if(a10->isValid==true) pri = pri + 1.f/(1.f+fabsf(a10->y)); if(a11->isCovered==true) pri = pri + 1.f; else if(a11->isValid==true) pri = pri + 1.f/(1.f+fabsf(a11->y)); if(a12->isCovered==true) pri = pri + 1.f; else if(a12->isValid==true) pri = pri + 1.f/(1.f+fabsf(a12->y)); dev_priority[i].priority = pri / (FloatType)row; dev_priority[i].x = a0->x; //下面是测试代码 bool isOne = (a0->x == a1->x) && (a1->x == a2->x) && (a2->x == a3->x) && (a3->x == a4->x) && (a4->x == a5->x) && (a5->x == a6->x) && (a6->x == a7->x) && (a7->x == a8->x) && (a8->x == a9->x) && (a9->x == a10->x) && (a10->x == a11->x) && (a11->x == a12->x); bool isCovered = a0->isCovered && a1->isCovered && a2->isCovered && a3->isCovered && a4->isCovered && a5->isCovered && a6->isCovered && a7->isCovered && a8->isCovered && a9->isCovered && a10->isCovered && a11->isCovered && a12->isCovered; bool isValid= a0->isValid && a1->isValid && a2->isValid && a3->isValid && a4->isValid && a5->isValid && a6->isValid && a7->isValid && a8->isValid && a9->isValid && a10->isValid && a11->isValid && a12->isValid; if(isCovered == true) { printf("Cala Prioruty Wrong,index:%d: (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , isOne:%d , isCovered:%d , isValid:%d \n",i,a0->x,a0->y,a0->isCovered,a0->isValid,a1->x,a1->y,a1->isCovered,a1->isValid,a2->x,a2->y,a2->isCovered,a2->isValid,a3->x,a3->y,a3->isCovered,a3->isValid,a4->x,a4->y,a4->isCovered,a4->isValid,a5->x,a5->y,a5->isCovered,a5->isValid,a6->x,a6->y,a6->isCovered,a6->isValid,a7->x,a7->y,a7->isCovered,a7->isValid,a8->x,a8->y,a8->isCovered,a8->isValid,a9->x,a9->y,a9->isCovered,a9->isValid,a10->x,a10->y,a10->isCovered,a10->isValid,a11->x,a11->y,a11->isCovered,a11->isValid,a12->x,a12->y,a12->isCovered,a12->isValid,isOne,isCovered,isValid); } } }
2050445ada6935495ae1e48de18030d14faef06b.hip
// !!! This is a file automatically generated by hipify!!! /** * Angle Between Two Vectors A and B * * Author: Gulsum Gudukbay * Date: 23 December 2017 * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> // double precision atomic add function // taken from https://devtalk.nvidia.com/default/topic/763119/atomic-add-operation/ __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +__longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ void dot_product(const double *A, const double *B, int numElements, int blockSize, int width_thread, double *result) { int start = width_thread*(blockIdx.x * blockSize + threadIdx.x); double sum = 0.0; for(int i = start; i < start+width_thread; i++) { if(i < numElements) sum += A[i] * B[i]; } atomicAdd2(&result[blockIdx.x], sum); } __device__ void mag_squared(const double *A, int numElements, int blockSize, int width_thread, double *result) { int start =width_thread*(blockIdx.x * blockSize + threadIdx.x); double sum = 0.0; //sum all elements squared in the block for(int i = start; i < start+width_thread; i++) { if(i < numElements) sum+= pow(A[i],2); } atomicAdd2(&result[blockIdx.x], sum); } __global__ void find_angle(const double *A, const double *B, int numElements, int blockSize, int width_thread, double *mag1, double *mag2, double *dot_prod) { mag_squared(A, numElements, blockSize, width_thread+1, mag1); mag_squared(B, numElements, blockSize, width_thread+1, mag2); dot_product(A, B, numElements, blockSize, width_thread+1, dot_prod); __syncthreads(); } double findAngleCPU(const double *A, const double *B, int numElements) { double res = 0.0; double dot_prod = 0.0; double mag1 = 0.0; double mag2 = 0.0; for(int i = 0; i < numElements; i++) { dot_prod += A[i] * B[i]; mag1 += pow(A[i], 2); mag2 += pow(B[i], 2); } mag1 = sqrt(mag1); mag2 = sqrt(mag2); res = acos(dot_prod/(mag1*mag2)); return res; } int main(int argc, char *argv[]) { srand (58); /*FILE* in = fopen("input.txt", "w+"); fprintf(in, "%f\n", (float)1000000); for( int i = 0; i < 2000000; i++) { fprintf(in, "%f\n", (float)(rand() / (RAND_MAX / 100))); } fclose(in); */ int N, blockSize, threadElts; double *A, *B, *d_A, *d_B; double *dot_prod, *mag1, *mag2; double *h_dot_prod, *h_mag1, *h_mag2; char* filename; dot_prod = NULL; mag1 = NULL; mag2 = NULL; threadElts = 256; N = atoi(argv[1]); blockSize = atoi(argv[2]); if(argc == 4) filename = argv[3]; hipEvent_t start4, stop4; hipEventCreate(&start4); hipEventCreate(&stop4); hipEventRecord(start4); if(argc == 3) { A = (double*)malloc(N * sizeof(double)); B = (double*)malloc(N * sizeof(double)); //fill in the arrays with random numbers for(int i = 0; i < N; i++) { A[i] = rand() / (RAND_MAX / 100); } for(int i = 0; i < N; i++) { B[i] = rand() / (RAND_MAX / 100); } } else { FILE * file; int i; float tmp; if ((file = fopen(filename, "r+")) == NULL) { printf("ERROR: file open failed\n"); return -1; } fscanf(file,"%f", &tmp); N = (int)tmp; printf("%f\n", tmp); A = (double*)malloc(N * sizeof(double)); B = (double*)malloc(N * sizeof(double)); for(i = 0; i < N; i++) { fscanf(file,"%f", &tmp); A[i] = tmp; } for(i = 0; i < N; i++) { fscanf(file,"%f", &tmp); B[i] = tmp; } fclose(file); } hipEventRecord(stop4); hipEventSynchronize(stop4); float milliseconds4 = 0; hipEventElapsedTime(&milliseconds4, start4, stop4); printf("Time for the array generation: %f ms\n", milliseconds4); int no_of_blocks = (int)ceil( N / blockSize / threadElts)+1; printf("\nInfo\n______________________________________________________\n"); printf("Number of elements: %d\n", N); printf("Number of threads per block: %d\n", blockSize); printf("Number of blocks will be created: %d\n", no_of_blocks); printf("\nTime\n______________________________________________________\n"); h_dot_prod = (double*)malloc(no_of_blocks * sizeof(double)); h_mag1 = (double*)malloc(no_of_blocks * sizeof(double)); h_mag2 = (double*)malloc(no_of_blocks * sizeof(double)); double dot_product, magnitude1, magnitude2; dot_product = 0.0; magnitude1 = 0.0; magnitude2 = 0.0; //Compute angle on CPU hipEvent_t start3, stop3; hipEventCreate(&start3); hipEventCreate(&stop3); hipEventRecord(start3); float cpu_result = (float)((180.0 / M_PI)*findAngleCPU(A, B, N)); hipEventRecord(stop3); hipEventSynchronize(stop3); float milliseconds3 = 0; hipEventElapsedTime(&milliseconds3, start3, stop3); printf("Time for the CPU function: %f ms\n", milliseconds3); hipMalloc(&d_A, N * sizeof(double)); hipMalloc(&d_B, N * sizeof(double)); hipMalloc(&dot_prod, no_of_blocks*sizeof(double)); hipMalloc(&mag1, no_of_blocks*sizeof(double)); hipMalloc(&mag2, no_of_blocks*sizeof(double)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(d_A, A, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_B, B, N*sizeof(double), hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Time for Host to Device transfer: %f ms\n", milliseconds); //KERNEL hipEvent_t start2, stop2; float milliseconds2 = 0; hipEventCreate(&start2); hipEventCreate(&stop2); hipEventRecord(start2); hipLaunchKernelGGL(( find_angle), dim3(no_of_blocks), dim3(blockSize), 0, 0, d_A, d_B, N, blockSize, threadElts, mag1, mag2, dot_prod); hipDeviceSynchronize(); hipEventRecord(stop2); hipEventSynchronize(stop2); hipEventElapsedTime(&milliseconds2, start2, stop2); printf("Time for the kernel execution: %f ms\n", milliseconds2); hipError_t error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: %s \n", hipGetErrorString(error)); } hipEvent_t start5, stop5; float milliseconds5 = 0; hipEventCreate(&start5); hipEventCreate(&stop5); hipEventRecord(start5); hipMemcpy(h_dot_prod, dot_prod, no_of_blocks*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(h_mag1, mag1, no_of_blocks*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(h_mag2, mag2, no_of_blocks*sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(stop5); hipEventSynchronize(stop5); hipEventElapsedTime(&milliseconds5, start5, stop5); printf("Time for the Device to Host transfer: %f ms\n", milliseconds5); printf("Total execution time for GPU: %f ms\n", milliseconds5 + milliseconds2 + milliseconds); for(int i = 0; i < no_of_blocks; i++) { magnitude1 += h_mag1[i]; magnitude2 += h_mag2[i]; dot_product += h_dot_prod[i]; } magnitude1 = sqrt(magnitude1); magnitude2 = sqrt(magnitude2); //printf("magnitude1: %.2f, magnitude2: %.2f, dot_product: %.2f\n", (float)magnitude1, (float)magnitude2, (float)dot_product); double result = acos(dot_product/(magnitude1*magnitude2)); printf("\nResults\n____________________________________________________\n"); printf("CPU result: %f\n", cpu_result); printf("GPU result: %f\n\n", (float)((180.0 / M_PI)*result)); hipFree(d_A); hipFree(d_B); hipFree(dot_prod); hipFree(mag1); hipFree(mag2); return 0; }
2050445ada6935495ae1e48de18030d14faef06b.cu
/** * Angle Between Two Vectors A and B * * Author: Gulsum Gudukbay * Date: 23 December 2017 * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #include <cuda_runtime_api.h> // double precision atomic add function // taken from https://devtalk.nvidia.com/default/topic/763119/atomic-add-operation/ __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +__longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ void dot_product(const double *A, const double *B, int numElements, int blockSize, int width_thread, double *result) { int start = width_thread*(blockIdx.x * blockSize + threadIdx.x); double sum = 0.0; for(int i = start; i < start+width_thread; i++) { if(i < numElements) sum += A[i] * B[i]; } atomicAdd2(&result[blockIdx.x], sum); } __device__ void mag_squared(const double *A, int numElements, int blockSize, int width_thread, double *result) { int start =width_thread*(blockIdx.x * blockSize + threadIdx.x); double sum = 0.0; //sum all elements squared in the block for(int i = start; i < start+width_thread; i++) { if(i < numElements) sum+= pow(A[i],2); } atomicAdd2(&result[blockIdx.x], sum); } __global__ void find_angle(const double *A, const double *B, int numElements, int blockSize, int width_thread, double *mag1, double *mag2, double *dot_prod) { mag_squared(A, numElements, blockSize, width_thread+1, mag1); mag_squared(B, numElements, blockSize, width_thread+1, mag2); dot_product(A, B, numElements, blockSize, width_thread+1, dot_prod); __syncthreads(); } double findAngleCPU(const double *A, const double *B, int numElements) { double res = 0.0; double dot_prod = 0.0; double mag1 = 0.0; double mag2 = 0.0; for(int i = 0; i < numElements; i++) { dot_prod += A[i] * B[i]; mag1 += pow(A[i], 2); mag2 += pow(B[i], 2); } mag1 = sqrt(mag1); mag2 = sqrt(mag2); res = acos(dot_prod/(mag1*mag2)); return res; } int main(int argc, char *argv[]) { srand (58); /*FILE* in = fopen("input.txt", "w+"); fprintf(in, "%f\n", (float)1000000); for( int i = 0; i < 2000000; i++) { fprintf(in, "%f\n", (float)(rand() / (RAND_MAX / 100))); } fclose(in); */ int N, blockSize, threadElts; double *A, *B, *d_A, *d_B; double *dot_prod, *mag1, *mag2; double *h_dot_prod, *h_mag1, *h_mag2; char* filename; dot_prod = NULL; mag1 = NULL; mag2 = NULL; threadElts = 256; N = atoi(argv[1]); blockSize = atoi(argv[2]); if(argc == 4) filename = argv[3]; cudaEvent_t start4, stop4; cudaEventCreate(&start4); cudaEventCreate(&stop4); cudaEventRecord(start4); if(argc == 3) { A = (double*)malloc(N * sizeof(double)); B = (double*)malloc(N * sizeof(double)); //fill in the arrays with random numbers for(int i = 0; i < N; i++) { A[i] = rand() / (RAND_MAX / 100); } for(int i = 0; i < N; i++) { B[i] = rand() / (RAND_MAX / 100); } } else { FILE * file; int i; float tmp; if ((file = fopen(filename, "r+")) == NULL) { printf("ERROR: file open failed\n"); return -1; } fscanf(file,"%f", &tmp); N = (int)tmp; printf("%f\n", tmp); A = (double*)malloc(N * sizeof(double)); B = (double*)malloc(N * sizeof(double)); for(i = 0; i < N; i++) { fscanf(file,"%f", &tmp); A[i] = tmp; } for(i = 0; i < N; i++) { fscanf(file,"%f", &tmp); B[i] = tmp; } fclose(file); } cudaEventRecord(stop4); cudaEventSynchronize(stop4); float milliseconds4 = 0; cudaEventElapsedTime(&milliseconds4, start4, stop4); printf("Time for the array generation: %f ms\n", milliseconds4); int no_of_blocks = (int)ceil( N / blockSize / threadElts)+1; printf("\nInfo\n______________________________________________________\n"); printf("Number of elements: %d\n", N); printf("Number of threads per block: %d\n", blockSize); printf("Number of blocks will be created: %d\n", no_of_blocks); printf("\nTime\n______________________________________________________\n"); h_dot_prod = (double*)malloc(no_of_blocks * sizeof(double)); h_mag1 = (double*)malloc(no_of_blocks * sizeof(double)); h_mag2 = (double*)malloc(no_of_blocks * sizeof(double)); double dot_product, magnitude1, magnitude2; dot_product = 0.0; magnitude1 = 0.0; magnitude2 = 0.0; //Compute angle on CPU cudaEvent_t start3, stop3; cudaEventCreate(&start3); cudaEventCreate(&stop3); cudaEventRecord(start3); float cpu_result = (float)((180.0 / M_PI)*findAngleCPU(A, B, N)); cudaEventRecord(stop3); cudaEventSynchronize(stop3); float milliseconds3 = 0; cudaEventElapsedTime(&milliseconds3, start3, stop3); printf("Time for the CPU function: %f ms\n", milliseconds3); cudaMalloc(&d_A, N * sizeof(double)); cudaMalloc(&d_B, N * sizeof(double)); cudaMalloc(&dot_prod, no_of_blocks*sizeof(double)); cudaMalloc(&mag1, no_of_blocks*sizeof(double)); cudaMalloc(&mag2, no_of_blocks*sizeof(double)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(d_A, A, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, N*sizeof(double), cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time for Host to Device transfer: %f ms\n", milliseconds); //KERNEL cudaEvent_t start2, stop2; float milliseconds2 = 0; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2); find_angle<<<no_of_blocks, blockSize>>>(d_A, d_B, N, blockSize, threadElts, mag1, mag2, dot_prod); cudaDeviceSynchronize(); cudaEventRecord(stop2); cudaEventSynchronize(stop2); cudaEventElapsedTime(&milliseconds2, start2, stop2); printf("Time for the kernel execution: %f ms\n", milliseconds2); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); } cudaEvent_t start5, stop5; float milliseconds5 = 0; cudaEventCreate(&start5); cudaEventCreate(&stop5); cudaEventRecord(start5); cudaMemcpy(h_dot_prod, dot_prod, no_of_blocks*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_mag1, mag1, no_of_blocks*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_mag2, mag2, no_of_blocks*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop5); cudaEventSynchronize(stop5); cudaEventElapsedTime(&milliseconds5, start5, stop5); printf("Time for the Device to Host transfer: %f ms\n", milliseconds5); printf("Total execution time for GPU: %f ms\n", milliseconds5 + milliseconds2 + milliseconds); for(int i = 0; i < no_of_blocks; i++) { magnitude1 += h_mag1[i]; magnitude2 += h_mag2[i]; dot_product += h_dot_prod[i]; } magnitude1 = sqrt(magnitude1); magnitude2 = sqrt(magnitude2); //printf("magnitude1: %.2f, magnitude2: %.2f, dot_product: %.2f\n", (float)magnitude1, (float)magnitude2, (float)dot_product); double result = acos(dot_product/(magnitude1*magnitude2)); printf("\nResults\n____________________________________________________\n"); printf("CPU result: %f\n", cpu_result); printf("GPU result: %f\n\n", (float)((180.0 / M_PI)*result)); cudaFree(d_A); cudaFree(d_B); cudaFree(dot_prod); cudaFree(mag1); cudaFree(mag2); return 0; }
5cc2da628e2f600be14e96507c10bd6a8104a2ee.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "firewall_kernel.h" #include <gpu_packet.h> extern struct portTreeNode *dev_srcPortTree; extern struct portTreeNode *dev_desPortTree; extern struct trieAddrNode *dev_srcAddrTrie; extern struct trieAddrNode *dev_desAddrTrie; extern unsigned int *dev_protocolHash; extern "C" __global__ void firewall_gpu(struct trieAddrNode *srcAddrTrie, struct trieAddrNode *desAddrTrie, unsigned int *protocolHash, struct portTreeNode *srcPortTree, struct portTreeNode *desPortTree, unsigned int *res, gpu_packet_t **pkts, int pcktCount) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (; tid < pcktCount; tid += gridDim.x * blockDim.x) { int idx = 0; int resid = tid << 2; res[resid] = 0; res[resid+1] = 0; res[resid+2] = 0; res[resid+3] = 0; //*********** //for srcAddr for (int i = 1; i <= 32; i++) { unsigned int tmp = pkts[tid]->src_addr; tmp = tmp >> (32-i); res[resid] = (res[resid] | srcAddrTrie[idx].matchRules[0]); res[resid+1] = (res[resid+1] | srcAddrTrie[idx].matchRules[1]); res[resid+2] = (res[resid+2] | srcAddrTrie[idx].matchRules[2]); res[resid+3] = (res[resid+3] | srcAddrTrie[idx].matchRules[3]); if ((tmp % 2) == 0) { if (srcAddrTrie[idx].leftChild != 0) { idx = srcAddrTrie[idx].leftChild; } else { break; } } else { if (srcAddrTrie[idx].rightChild != 0) { idx = srcAddrTrie[idx].rightChild; } else { break; } } } //*********** //for desAddr idx = 0; int resDesAddr[4] = {0}; for (int i = 1; i <= 32; i++) { unsigned int tmp = pkts[tid]->dst_addr; tmp = tmp >> (32-i); resDesAddr[0] = (resDesAddr[0] | desAddrTrie[idx].matchRules[0]); resDesAddr[1] = (resDesAddr[1] | desAddrTrie[idx].matchRules[1]); resDesAddr[2] = (resDesAddr[2] | desAddrTrie[idx].matchRules[2]); resDesAddr[3] = (resDesAddr[3] | desAddrTrie[idx].matchRules[3]); if ((tmp % 2) == 0) { if (desAddrTrie[idx].leftChild != 0) { idx = desAddrTrie[idx].leftChild; } else { break; } } else { if (desAddrTrie[idx].rightChild != 0) { idx = desAddrTrie[idx].rightChild; } else { break; } } } res[resid] = (res[resid] & resDesAddr[0]); res[resid+1] = (res[resid+1] & resDesAddr[1]); res[resid+2] = (res[resid+2] & resDesAddr[2]); res[resid+3] = (res[resid+3] & resDesAddr[3]); //************ //for protocol res[resid] = (res[resid] & protocolHash[pkts[tid]->proto_id]); res[resid+1] = (res[resid+1] & protocolHash[pkts[tid]->proto_id+4]); res[resid+2] = (res[resid+2] & protocolHash[pkts[tid]->proto_id+8]); res[resid+3] = (res[resid+3] & protocolHash[pkts[tid]->proto_id+12]); //************ //for src port int srcPortQueue[RULESIZE] = {0}; int headSrc = -1; int tailSrc = 0; //queue size = tailSrc-headSrc. srcPortQueue[tailSrc++] = srcPortTree[0].endPort; headSrc++; int resSrcPort[4] = {0}; while((tailSrc-headSrc) > 0) { //when size > 0, same as queue is not empty, //same as there are node to be deal with. //headSrc is the node we are dealing with. if (pkts[tid]->src_port > srcPortTree[srcPortQueue[headSrc]].max) { headSrc++; } else if (pkts[tid]->src_port < srcPortTree[srcPortQueue[headSrc]].startPort) { if (srcPortTree[srcPortQueue[headSrc]].leftChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].leftChild; } headSrc++; } else if (pkts[tid]->src_port <= srcPortTree[srcPortQueue[headSrc]].endPort) { resSrcPort[0] = resSrcPort[0] | srcPortTree[srcPortQueue[headSrc]].matchRules[0]; resSrcPort[1] = resSrcPort[1] | srcPortTree[srcPortQueue[headSrc]].matchRules[1]; resSrcPort[2] = resSrcPort[2] | srcPortTree[srcPortQueue[headSrc]].matchRules[2]; resSrcPort[3] = resSrcPort[3] | srcPortTree[srcPortQueue[headSrc]].matchRules[3]; if (srcPortTree[srcPortQueue[headSrc]].leftChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].leftChild; } if (srcPortTree[srcPortQueue[headSrc]].rightChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].rightChild; } headSrc++; } else { if (srcPortTree[srcPortQueue[headSrc]].leftChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].leftChild; } if (srcPortTree[srcPortQueue[headSrc]].rightChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].rightChild; } headSrc++; } } res[resid] = (res[resid] & resSrcPort[0]); res[resid+1] = (res[resid+1] & resSrcPort[1]); res[resid+2] = (res[resid+2] & resSrcPort[2]); res[resid+3] = (res[resid+3] & resSrcPort[3]); //************ //for des port int desPortQueue[RULESIZE] = {0}; int headDes = -1; int tailDes = 0; //queue size = tailDes-headDes. desPortQueue[tailDes++] = desPortTree[0].endPort; headDes++; int resDesPort[4] = {0}; while((tailDes-headDes) > 0) { //when size > 0, same as queue is not empty, //same as there are node to be deal with. //headDes is the node we are dealing with. if (pkts[tid]->dst_port > desPortTree[desPortQueue[headDes]].max) { headDes++; } else if (pkts[tid]->dst_port < desPortTree[desPortQueue[headDes]].startPort) { if (desPortTree[desPortQueue[headDes]].leftChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].leftChild; } headDes++; } else if (pkts[tid]->dst_port <= desPortTree[desPortQueue[headDes]].endPort) { resDesPort[0] = resDesPort[0] | desPortTree[desPortQueue[headDes]].matchRules[0]; resDesPort[1] = resDesPort[1] | desPortTree[desPortQueue[headDes]].matchRules[1]; resDesPort[2] = resDesPort[2] | desPortTree[desPortQueue[headDes]].matchRules[2]; resDesPort[3] = resDesPort[3] | desPortTree[desPortQueue[headDes]].matchRules[3]; if (desPortTree[desPortQueue[headDes]].leftChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].leftChild; } if (desPortTree[desPortQueue[headDes]].rightChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].rightChild; } headDes++; } else { if (desPortTree[desPortQueue[headDes]].leftChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].leftChild; } if (desPortTree[desPortQueue[headDes]].rightChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].rightChild; } headDes++; } } res[resid] = (res[resid] & resDesPort[0]); res[resid+1] = (res[resid+1] & resDesPort[1]); res[resid+2] = (res[resid+2] & resDesPort[2]); res[resid+3] = (res[resid+3] & resDesPort[3]); } }
5cc2da628e2f600be14e96507c10bd6a8104a2ee.cu
#include <stdio.h> #include <cuda_runtime.h> #include "firewall_kernel.h" #include <gpu_packet.h> extern struct portTreeNode *dev_srcPortTree; extern struct portTreeNode *dev_desPortTree; extern struct trieAddrNode *dev_srcAddrTrie; extern struct trieAddrNode *dev_desAddrTrie; extern unsigned int *dev_protocolHash; extern "C" __global__ void firewall_gpu(struct trieAddrNode *srcAddrTrie, struct trieAddrNode *desAddrTrie, unsigned int *protocolHash, struct portTreeNode *srcPortTree, struct portTreeNode *desPortTree, unsigned int *res, gpu_packet_t **pkts, int pcktCount) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (; tid < pcktCount; tid += gridDim.x * blockDim.x) { int idx = 0; int resid = tid << 2; res[resid] = 0; res[resid+1] = 0; res[resid+2] = 0; res[resid+3] = 0; //*********** //for srcAddr for (int i = 1; i <= 32; i++) { unsigned int tmp = pkts[tid]->src_addr; tmp = tmp >> (32-i); res[resid] = (res[resid] | srcAddrTrie[idx].matchRules[0]); res[resid+1] = (res[resid+1] | srcAddrTrie[idx].matchRules[1]); res[resid+2] = (res[resid+2] | srcAddrTrie[idx].matchRules[2]); res[resid+3] = (res[resid+3] | srcAddrTrie[idx].matchRules[3]); if ((tmp % 2) == 0) { if (srcAddrTrie[idx].leftChild != 0) { idx = srcAddrTrie[idx].leftChild; } else { break; } } else { if (srcAddrTrie[idx].rightChild != 0) { idx = srcAddrTrie[idx].rightChild; } else { break; } } } //*********** //for desAddr idx = 0; int resDesAddr[4] = {0}; for (int i = 1; i <= 32; i++) { unsigned int tmp = pkts[tid]->dst_addr; tmp = tmp >> (32-i); resDesAddr[0] = (resDesAddr[0] | desAddrTrie[idx].matchRules[0]); resDesAddr[1] = (resDesAddr[1] | desAddrTrie[idx].matchRules[1]); resDesAddr[2] = (resDesAddr[2] | desAddrTrie[idx].matchRules[2]); resDesAddr[3] = (resDesAddr[3] | desAddrTrie[idx].matchRules[3]); if ((tmp % 2) == 0) { if (desAddrTrie[idx].leftChild != 0) { idx = desAddrTrie[idx].leftChild; } else { break; } } else { if (desAddrTrie[idx].rightChild != 0) { idx = desAddrTrie[idx].rightChild; } else { break; } } } res[resid] = (res[resid] & resDesAddr[0]); res[resid+1] = (res[resid+1] & resDesAddr[1]); res[resid+2] = (res[resid+2] & resDesAddr[2]); res[resid+3] = (res[resid+3] & resDesAddr[3]); //************ //for protocol res[resid] = (res[resid] & protocolHash[pkts[tid]->proto_id]); res[resid+1] = (res[resid+1] & protocolHash[pkts[tid]->proto_id+4]); res[resid+2] = (res[resid+2] & protocolHash[pkts[tid]->proto_id+8]); res[resid+3] = (res[resid+3] & protocolHash[pkts[tid]->proto_id+12]); //************ //for src port int srcPortQueue[RULESIZE] = {0}; int headSrc = -1; int tailSrc = 0; //queue size = tailSrc-headSrc. srcPortQueue[tailSrc++] = srcPortTree[0].endPort; headSrc++; int resSrcPort[4] = {0}; while((tailSrc-headSrc) > 0) { //when size > 0, same as queue is not empty, //same as there are node to be deal with. //headSrc is the node we are dealing with. if (pkts[tid]->src_port > srcPortTree[srcPortQueue[headSrc]].max) { headSrc++; } else if (pkts[tid]->src_port < srcPortTree[srcPortQueue[headSrc]].startPort) { if (srcPortTree[srcPortQueue[headSrc]].leftChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].leftChild; } headSrc++; } else if (pkts[tid]->src_port <= srcPortTree[srcPortQueue[headSrc]].endPort) { resSrcPort[0] = resSrcPort[0] | srcPortTree[srcPortQueue[headSrc]].matchRules[0]; resSrcPort[1] = resSrcPort[1] | srcPortTree[srcPortQueue[headSrc]].matchRules[1]; resSrcPort[2] = resSrcPort[2] | srcPortTree[srcPortQueue[headSrc]].matchRules[2]; resSrcPort[3] = resSrcPort[3] | srcPortTree[srcPortQueue[headSrc]].matchRules[3]; if (srcPortTree[srcPortQueue[headSrc]].leftChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].leftChild; } if (srcPortTree[srcPortQueue[headSrc]].rightChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].rightChild; } headSrc++; } else { if (srcPortTree[srcPortQueue[headSrc]].leftChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].leftChild; } if (srcPortTree[srcPortQueue[headSrc]].rightChild != 0) { srcPortQueue[tailSrc++] = srcPortTree[srcPortQueue[headSrc]].rightChild; } headSrc++; } } res[resid] = (res[resid] & resSrcPort[0]); res[resid+1] = (res[resid+1] & resSrcPort[1]); res[resid+2] = (res[resid+2] & resSrcPort[2]); res[resid+3] = (res[resid+3] & resSrcPort[3]); //************ //for des port int desPortQueue[RULESIZE] = {0}; int headDes = -1; int tailDes = 0; //queue size = tailDes-headDes. desPortQueue[tailDes++] = desPortTree[0].endPort; headDes++; int resDesPort[4] = {0}; while((tailDes-headDes) > 0) { //when size > 0, same as queue is not empty, //same as there are node to be deal with. //headDes is the node we are dealing with. if (pkts[tid]->dst_port > desPortTree[desPortQueue[headDes]].max) { headDes++; } else if (pkts[tid]->dst_port < desPortTree[desPortQueue[headDes]].startPort) { if (desPortTree[desPortQueue[headDes]].leftChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].leftChild; } headDes++; } else if (pkts[tid]->dst_port <= desPortTree[desPortQueue[headDes]].endPort) { resDesPort[0] = resDesPort[0] | desPortTree[desPortQueue[headDes]].matchRules[0]; resDesPort[1] = resDesPort[1] | desPortTree[desPortQueue[headDes]].matchRules[1]; resDesPort[2] = resDesPort[2] | desPortTree[desPortQueue[headDes]].matchRules[2]; resDesPort[3] = resDesPort[3] | desPortTree[desPortQueue[headDes]].matchRules[3]; if (desPortTree[desPortQueue[headDes]].leftChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].leftChild; } if (desPortTree[desPortQueue[headDes]].rightChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].rightChild; } headDes++; } else { if (desPortTree[desPortQueue[headDes]].leftChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].leftChild; } if (desPortTree[desPortQueue[headDes]].rightChild != 0) { desPortQueue[tailDes++] = desPortTree[desPortQueue[headDes]].rightChild; } headDes++; } } res[resid] = (res[resid] & resDesPort[0]); res[resid+1] = (res[resid+1] & resDesPort[1]); res[resid+2] = (res[resid+2] & resDesPort[2]); res[resid+3] = (res[resid+3] & resDesPort[3]); } }
9951cd4667868d8fd93b2680211865ea00360912.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief Implementation of the math_utils.h file * * @file math_utils.cu * @author David Chan * @date 2018-04-04 * Copyright (c) 2018, Regents of the University of California */ #include "util/math_utils.h" void tsnecuda::util::GaussianNormalizeDeviceVector(hipblasHandle_t &handle, thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { // Compute the means auto d_means = tsnecuda::util::ReduceMean(handle, d_points, num_points, num_dims, 0); // Zero-Center tsnecuda::util::BroadcastMatrixVector(d_points, d_means, num_points, num_dims, thrust::minus<float>(), 1, 1.f); // Compute the standard deviation thrust::device_vector<float> squared_vals(d_points.size()); tsnecuda::util::SquareDeviceVector(squared_vals, d_points); auto norm_sum_of_squares = tsnecuda::util::ReduceAlpha(handle, squared_vals, num_points, num_dims, 1.f / (num_points - 1), 0); thrust::device_vector<float> standard_deviation(norm_sum_of_squares.size()); tsnecuda::util::SqrtDeviceVector(standard_deviation, norm_sum_of_squares); // Normalize the values tsnecuda::util::BroadcastMatrixVector(d_points, standard_deviation, num_points, num_dims, thrust::divides<float>(), 1, 1.f); } void tsnecuda::util::SquareDeviceVector(thrust::device_vector<float> &d_out, const thrust::device_vector<float> &d_input) { thrust::transform(d_input.begin(), d_input.end(), d_out.begin(), tsnecuda::util::FunctionalSquare()); } void tsnecuda::util::SqrtDeviceVector(thrust::device_vector<float> &d_out, const thrust::device_vector<float> &d_input) { thrust::transform(d_input.begin(), d_input.end(), d_out.begin(), tsnecuda::util::FunctionalSqrt()); } float tsnecuda::util::L2NormDeviceVector( const thrust::device_vector<float> &d_vector) { return std::sqrt(thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalSquare(), 0.0f, thrust::plus<float>())); } bool tsnecuda::util::AnyNanOrInfDeviceVector( const thrust::device_vector<float> &d_vector) { return thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalNanOrInf(), 0, thrust::plus<bool>()); } void tsnecuda::util::MaxNormalizeDeviceVector( thrust::device_vector<float> &d_vector) { float max_val = thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalAbs(), 0.0f, thrust::maximum<float>()); thrust::constant_iterator<float> division_iterator(max_val); thrust::transform(d_vector.begin(), d_vector.end(), division_iterator, d_vector.begin(), thrust::divides<float>()); } // Needs to compute pij = pj|i + pi|j / 2n // void tsnecuda::util::SymmetrizeMatrix(hipsparseHandle_t &handle, // thrust::device_vector<float> &d_symmetrized_values, // thrust::device_vector<int32_t> &d_symmetrized_rowptr, // thrust::device_vector<int32_t> &d_symmetrized_colind, // thrust::device_vector<float> &d_values, // thrust::device_vector<int32_t> &d_indices, // const float magnitude_factor, // const int num_points, // const int num_neighbors) // { // // Allocate memory // int32_t *csr_row_ptr_a = nullptr; // hipMalloc(reinterpret_cast<void **>(&csr_row_ptr_a), // (num_points + 1) * sizeof(int32_t)); // int32_t *csr_column_ptr_a = thrust::raw_pointer_cast(d_indices.data()); // float *csr_values_a = thrust::raw_pointer_cast(d_values.data()); // // Copy the data // thrust::device_vector<int> d_vector_memory(csr_row_ptr_a, // csr_row_ptr_a + num_points + 1); // thrust::sequence(d_vector_memory.begin(), d_vector_memory.end(), // 0, static_cast<int32_t>(num_neighbors)); // thrust::copy(d_vector_memory.begin(), d_vector_memory.end(), csr_row_ptr_a); // hipDeviceSynchronize(); // // Initialize the matrix descriptor // hipsparseMatDescr_t matrix_descriptor; // hipsparseCreateMatDescr(&matrix_descriptor); // hipsparseSetMatType(matrix_descriptor, HIPSPARSE_MATRIX_TYPE_GENERAL); // hipsparseSetMatIndexBase(matrix_descriptor, HIPSPARSE_INDEX_BASE_ZERO); // // Sort the matrix properly // size_t permutation_buffer_byte_size = 0; // void *permutation_buffer = NULL; // int32_t *permutation = NULL; // // step 1: Allocate memory buffer // hipsparseXcsrsort_bufferSizeExt(handle, num_points, num_points, // num_points * num_neighbors, csr_row_ptr_a, // csr_column_ptr_a, &permutation_buffer_byte_size); // hipDeviceSynchronize(); // hipMalloc(&permutation_buffer, // sizeof(char) * permutation_buffer_byte_size); // // step 2: Setup permutation vector permutation to be the identity // hipMalloc(reinterpret_cast<void **>(&permutation), // sizeof(int32_t) * num_points * num_neighbors); // hipsparseCreateIdentityPermutation(handle, num_points * num_neighbors, // permutation); // hipDeviceSynchronize(); // // step 3: Sort CSR format // hipsparseXcsrsort(handle, num_points, num_points, // num_points * num_neighbors, matrix_descriptor, csr_row_ptr_a, // csr_column_ptr_a, permutation, permutation_buffer); // hipDeviceSynchronize(); // // step 4: Gather sorted csr_values // float *csr_values_a_sorted = nullptr; // hipMalloc(reinterpret_cast<void **>(&csr_values_a_sorted), // (num_points * num_neighbors) * sizeof(float)); // hipsparseSgthr(handle, num_points * num_neighbors, csr_values_a, // csr_values_a_sorted, permutation, HIPSPARSE_INDEX_BASE_ZERO); // hipDeviceSynchronize(); // // Free some memory // hipFree(permutation_buffer); // hipFree(permutation); // csr_values_a = csr_values_a_sorted; // // We need A^T, so we do a csr2csc() call // int32_t *csc_row_ptr_at = nullptr; // hipMalloc(reinterpret_cast<void **>(&csc_row_ptr_at), // (num_points * num_neighbors) * sizeof(int32_t)); // int32_t *csc_column_ptr_at = nullptr; // hipMalloc(reinterpret_cast<void **>(&csc_column_ptr_at), // (num_points + 1) * sizeof(int32_t)); // float *csc_values_at = nullptr; // hipMalloc(reinterpret_cast<void **>(&csc_values_at), // (num_points * num_neighbors) * sizeof(float)); // // TODO: Compute the CSR2CSC buffer // // Do the transpose operation // hipsparseScsr2csc(handle, num_points, num_points, // num_neighbors * num_points, csr_values_a, csr_row_ptr_a, // csr_column_ptr_a, csc_values_at, csc_row_ptr_at, // csc_column_ptr_at, HIPSPARSE_ACTION_NUMERIC, // CUSPARSE_INDEX_BASEa_ZERO); // hipDeviceSynchronize(); // // Now compute the output size of the matrix // int32_t base_C, num_nonzeros_C; // int32_t symmetrized_num_nonzeros = -1; // hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST); // d_symmetrized_rowptr.resize(num_points + 1); // hipsparseXcsrgeamNnz(handle, num_points, num_points, // matrix_descriptor, num_points * num_neighbors, csr_row_ptr_a, // csr_column_ptr_a, // matrix_descriptor, num_points * num_neighbors, csc_column_ptr_at, // csc_row_ptr_at, // matrix_descriptor, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // &symmetrized_num_nonzeros); // hipDeviceSynchronize(); // // Do some useful checking... // if (-1 != symmetrized_num_nonzeros) // { // num_nonzeros_C = symmetrized_num_nonzeros; // } // else // { // hipMemcpy(&num_nonzeros_C, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()) + // num_points, // sizeof(int32_t), hipMemcpyDeviceToHost); // hipMemcpy(&base_C, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // sizeof(int), hipMemcpyDeviceToHost); // } // // Allocate memory for the new summed array // d_symmetrized_colind.resize(num_nonzeros_C); // d_symmetrized_values.resize(num_nonzeros_C); // // Sum the arrays // float kAlpha = 1.0f / (2.0f * num_points); // float kBeta = 1.0f / (2.0f * num_points); // hipsparseScsrgeam(handle, num_points, num_points, // &kAlpha, matrix_descriptor, num_points * num_neighbors, // csr_values_a, csr_row_ptr_a, csr_column_ptr_a, // &kBeta, matrix_descriptor, num_points * num_neighbors, // csc_values_at, csc_column_ptr_at, csc_row_ptr_at, // matrix_descriptor, // thrust::raw_pointer_cast(d_symmetrized_values.data()), // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // thrust::raw_pointer_cast(d_symmetrized_colind.data())); // hipDeviceSynchronize(); // // Free the memory we were using... // hipFree(csr_values_a); // hipFree(csc_values_at); // hipFree(csr_row_ptr_a); // hipFree(csc_column_ptr_at); // hipFree(csc_row_ptr_at); // } __global__ void tsnecuda::util::Csr2CooKernel(volatile int *__restrict__ coo_indices, const int *__restrict__ pij_row_ptr, const int *__restrict__ pij_col_ind, const int num_points, const int num_nonzero) { register int TID, i, j, start, end; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_nonzero) return; start = 0; end = num_points + 1; i = (num_points + 1) >> 1; while (end - start > 1) { j = pij_row_ptr[i]; end = (j > TID) ? i : end; start = (j <= TID) ? i : start; i = (start + end) >> 1; } j = pij_col_ind[TID]; coo_indices[2 * TID] = i; coo_indices[2 * TID + 1] = j; } void tsnecuda::util::Csr2Coo(tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &coo_indices, thrust::device_vector<int> &pij_row_ptr, thrust::device_vector<int> &pij_col_ind, const int num_points, const int num_nonzero) { const int num_threads = 1024; const int num_blocks = iDivUp(num_nonzero, num_threads); hipLaunchKernelGGL(( tsnecuda::util::Csr2CooKernel), dim3(num_blocks), dim3(num_threads), 0, 0, thrust::raw_pointer_cast(coo_indices.data()), thrust::raw_pointer_cast(pij_row_ptr.data()), thrust::raw_pointer_cast(pij_col_ind.data()), num_points, num_nonzero); GpuErrorCheck(hipDeviceSynchronize()); } __global__ void syv2k( float *const __restrict__ pij, const float *const __restrict__ pij_non_sym, const int *const __restrict__ pij_indices, const int num_points, const int num_neighbors) { register int TID, i, j, jend; register float pij_acc; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points * num_neighbors) return; i = TID / num_neighbors; j = pij_indices[TID]; pij_acc = pij_non_sym[TID]; jend = (j + 1) * num_neighbors; for (register int jidx = j * num_neighbors; jidx < jend; jidx++) pij_acc += pij_indices[jidx] == i ? pij_non_sym[jidx] : 0.0f; pij[TID] = pij_acc / (2.0 * num_points); } void tsnecuda::util::SymmetrizeMatrixV2(thrust::device_vector<float> &pij_symmetrized, thrust::device_vector<float> &pij_unsymmetrized, thrust::device_vector<int32_t> &pij_indices, const int num_points, const int num_neighbors) { const int num_threads = 1024; const int num_blocks = iDivUp(num_points * num_neighbors, num_threads); hipLaunchKernelGGL(( syv2k), dim3(num_blocks), dim3(num_threads), 0, 0, thrust::raw_pointer_cast(pij_symmetrized.data()), thrust::raw_pointer_cast(pij_unsymmetrized.data()), thrust::raw_pointer_cast(pij_indices.data()), num_points, num_neighbors); GpuErrorCheck(hipDeviceSynchronize()); }
9951cd4667868d8fd93b2680211865ea00360912.cu
/** * @brief Implementation of the math_utils.h file * * @file math_utils.cu * @author David Chan * @date 2018-04-04 * Copyright (c) 2018, Regents of the University of California */ #include "util/math_utils.h" void tsnecuda::util::GaussianNormalizeDeviceVector(cublasHandle_t &handle, thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { // Compute the means auto d_means = tsnecuda::util::ReduceMean(handle, d_points, num_points, num_dims, 0); // Zero-Center tsnecuda::util::BroadcastMatrixVector(d_points, d_means, num_points, num_dims, thrust::minus<float>(), 1, 1.f); // Compute the standard deviation thrust::device_vector<float> squared_vals(d_points.size()); tsnecuda::util::SquareDeviceVector(squared_vals, d_points); auto norm_sum_of_squares = tsnecuda::util::ReduceAlpha(handle, squared_vals, num_points, num_dims, 1.f / (num_points - 1), 0); thrust::device_vector<float> standard_deviation(norm_sum_of_squares.size()); tsnecuda::util::SqrtDeviceVector(standard_deviation, norm_sum_of_squares); // Normalize the values tsnecuda::util::BroadcastMatrixVector(d_points, standard_deviation, num_points, num_dims, thrust::divides<float>(), 1, 1.f); } void tsnecuda::util::SquareDeviceVector(thrust::device_vector<float> &d_out, const thrust::device_vector<float> &d_input) { thrust::transform(d_input.begin(), d_input.end(), d_out.begin(), tsnecuda::util::FunctionalSquare()); } void tsnecuda::util::SqrtDeviceVector(thrust::device_vector<float> &d_out, const thrust::device_vector<float> &d_input) { thrust::transform(d_input.begin(), d_input.end(), d_out.begin(), tsnecuda::util::FunctionalSqrt()); } float tsnecuda::util::L2NormDeviceVector( const thrust::device_vector<float> &d_vector) { return std::sqrt(thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalSquare(), 0.0f, thrust::plus<float>())); } bool tsnecuda::util::AnyNanOrInfDeviceVector( const thrust::device_vector<float> &d_vector) { return thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalNanOrInf(), 0, thrust::plus<bool>()); } void tsnecuda::util::MaxNormalizeDeviceVector( thrust::device_vector<float> &d_vector) { float max_val = thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalAbs(), 0.0f, thrust::maximum<float>()); thrust::constant_iterator<float> division_iterator(max_val); thrust::transform(d_vector.begin(), d_vector.end(), division_iterator, d_vector.begin(), thrust::divides<float>()); } // Needs to compute pij = pj|i + pi|j / 2n // void tsnecuda::util::SymmetrizeMatrix(cusparseHandle_t &handle, // thrust::device_vector<float> &d_symmetrized_values, // thrust::device_vector<int32_t> &d_symmetrized_rowptr, // thrust::device_vector<int32_t> &d_symmetrized_colind, // thrust::device_vector<float> &d_values, // thrust::device_vector<int32_t> &d_indices, // const float magnitude_factor, // const int num_points, // const int num_neighbors) // { // // Allocate memory // int32_t *csr_row_ptr_a = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csr_row_ptr_a), // (num_points + 1) * sizeof(int32_t)); // int32_t *csr_column_ptr_a = thrust::raw_pointer_cast(d_indices.data()); // float *csr_values_a = thrust::raw_pointer_cast(d_values.data()); // // Copy the data // thrust::device_vector<int> d_vector_memory(csr_row_ptr_a, // csr_row_ptr_a + num_points + 1); // thrust::sequence(d_vector_memory.begin(), d_vector_memory.end(), // 0, static_cast<int32_t>(num_neighbors)); // thrust::copy(d_vector_memory.begin(), d_vector_memory.end(), csr_row_ptr_a); // cudaDeviceSynchronize(); // // Initialize the matrix descriptor // cusparseMatDescr_t matrix_descriptor; // cusparseCreateMatDescr(&matrix_descriptor); // cusparseSetMatType(matrix_descriptor, CUSPARSE_MATRIX_TYPE_GENERAL); // cusparseSetMatIndexBase(matrix_descriptor, CUSPARSE_INDEX_BASE_ZERO); // // Sort the matrix properly // size_t permutation_buffer_byte_size = 0; // void *permutation_buffer = NULL; // int32_t *permutation = NULL; // // step 1: Allocate memory buffer // cusparseXcsrsort_bufferSizeExt(handle, num_points, num_points, // num_points * num_neighbors, csr_row_ptr_a, // csr_column_ptr_a, &permutation_buffer_byte_size); // cudaDeviceSynchronize(); // cudaMalloc(&permutation_buffer, // sizeof(char) * permutation_buffer_byte_size); // // step 2: Setup permutation vector permutation to be the identity // cudaMalloc(reinterpret_cast<void **>(&permutation), // sizeof(int32_t) * num_points * num_neighbors); // cusparseCreateIdentityPermutation(handle, num_points * num_neighbors, // permutation); // cudaDeviceSynchronize(); // // step 3: Sort CSR format // cusparseXcsrsort(handle, num_points, num_points, // num_points * num_neighbors, matrix_descriptor, csr_row_ptr_a, // csr_column_ptr_a, permutation, permutation_buffer); // cudaDeviceSynchronize(); // // step 4: Gather sorted csr_values // float *csr_values_a_sorted = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csr_values_a_sorted), // (num_points * num_neighbors) * sizeof(float)); // cusparseSgthr(handle, num_points * num_neighbors, csr_values_a, // csr_values_a_sorted, permutation, CUSPARSE_INDEX_BASE_ZERO); // cudaDeviceSynchronize(); // // Free some memory // cudaFree(permutation_buffer); // cudaFree(permutation); // csr_values_a = csr_values_a_sorted; // // We need A^T, so we do a csr2csc() call // int32_t *csc_row_ptr_at = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csc_row_ptr_at), // (num_points * num_neighbors) * sizeof(int32_t)); // int32_t *csc_column_ptr_at = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csc_column_ptr_at), // (num_points + 1) * sizeof(int32_t)); // float *csc_values_at = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csc_values_at), // (num_points * num_neighbors) * sizeof(float)); // // TODO: Compute the CSR2CSC buffer // // Do the transpose operation // cusparseScsr2csc(handle, num_points, num_points, // num_neighbors * num_points, csr_values_a, csr_row_ptr_a, // csr_column_ptr_a, csc_values_at, csc_row_ptr_at, // csc_column_ptr_at, CUSPARSE_ACTION_NUMERIC, // CUSPARSE_INDEX_BASEa_ZERO); // cudaDeviceSynchronize(); // // Now compute the output size of the matrix // int32_t base_C, num_nonzeros_C; // int32_t symmetrized_num_nonzeros = -1; // cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST); // d_symmetrized_rowptr.resize(num_points + 1); // cusparseXcsrgeamNnz(handle, num_points, num_points, // matrix_descriptor, num_points * num_neighbors, csr_row_ptr_a, // csr_column_ptr_a, // matrix_descriptor, num_points * num_neighbors, csc_column_ptr_at, // csc_row_ptr_at, // matrix_descriptor, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // &symmetrized_num_nonzeros); // cudaDeviceSynchronize(); // // Do some useful checking... // if (-1 != symmetrized_num_nonzeros) // { // num_nonzeros_C = symmetrized_num_nonzeros; // } // else // { // cudaMemcpy(&num_nonzeros_C, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()) + // num_points, // sizeof(int32_t), cudaMemcpyDeviceToHost); // cudaMemcpy(&base_C, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // sizeof(int), cudaMemcpyDeviceToHost); // } // // Allocate memory for the new summed array // d_symmetrized_colind.resize(num_nonzeros_C); // d_symmetrized_values.resize(num_nonzeros_C); // // Sum the arrays // float kAlpha = 1.0f / (2.0f * num_points); // float kBeta = 1.0f / (2.0f * num_points); // cusparseScsrgeam(handle, num_points, num_points, // &kAlpha, matrix_descriptor, num_points * num_neighbors, // csr_values_a, csr_row_ptr_a, csr_column_ptr_a, // &kBeta, matrix_descriptor, num_points * num_neighbors, // csc_values_at, csc_column_ptr_at, csc_row_ptr_at, // matrix_descriptor, // thrust::raw_pointer_cast(d_symmetrized_values.data()), // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // thrust::raw_pointer_cast(d_symmetrized_colind.data())); // cudaDeviceSynchronize(); // // Free the memory we were using... // cudaFree(csr_values_a); // cudaFree(csc_values_at); // cudaFree(csr_row_ptr_a); // cudaFree(csc_column_ptr_at); // cudaFree(csc_row_ptr_at); // } __global__ void tsnecuda::util::Csr2CooKernel(volatile int *__restrict__ coo_indices, const int *__restrict__ pij_row_ptr, const int *__restrict__ pij_col_ind, const int num_points, const int num_nonzero) { register int TID, i, j, start, end; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_nonzero) return; start = 0; end = num_points + 1; i = (num_points + 1) >> 1; while (end - start > 1) { j = pij_row_ptr[i]; end = (j > TID) ? i : end; start = (j <= TID) ? i : start; i = (start + end) >> 1; } j = pij_col_ind[TID]; coo_indices[2 * TID] = i; coo_indices[2 * TID + 1] = j; } void tsnecuda::util::Csr2Coo(tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &coo_indices, thrust::device_vector<int> &pij_row_ptr, thrust::device_vector<int> &pij_col_ind, const int num_points, const int num_nonzero) { const int num_threads = 1024; const int num_blocks = iDivUp(num_nonzero, num_threads); tsnecuda::util::Csr2CooKernel<<<num_blocks, num_threads>>>(thrust::raw_pointer_cast(coo_indices.data()), thrust::raw_pointer_cast(pij_row_ptr.data()), thrust::raw_pointer_cast(pij_col_ind.data()), num_points, num_nonzero); GpuErrorCheck(cudaDeviceSynchronize()); } __global__ void syv2k( float *const __restrict__ pij, const float *const __restrict__ pij_non_sym, const int *const __restrict__ pij_indices, const int num_points, const int num_neighbors) { register int TID, i, j, jend; register float pij_acc; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points * num_neighbors) return; i = TID / num_neighbors; j = pij_indices[TID]; pij_acc = pij_non_sym[TID]; jend = (j + 1) * num_neighbors; for (register int jidx = j * num_neighbors; jidx < jend; jidx++) pij_acc += pij_indices[jidx] == i ? pij_non_sym[jidx] : 0.0f; pij[TID] = pij_acc / (2.0 * num_points); } void tsnecuda::util::SymmetrizeMatrixV2(thrust::device_vector<float> &pij_symmetrized, thrust::device_vector<float> &pij_unsymmetrized, thrust::device_vector<int32_t> &pij_indices, const int num_points, const int num_neighbors) { const int num_threads = 1024; const int num_blocks = iDivUp(num_points * num_neighbors, num_threads); syv2k<<<num_blocks, num_threads>>>(thrust::raw_pointer_cast(pij_symmetrized.data()), thrust::raw_pointer_cast(pij_unsymmetrized.data()), thrust::raw_pointer_cast(pij_indices.data()), num_points, num_neighbors); GpuErrorCheck(cudaDeviceSynchronize()); }
dbb7c2bc2591773630e6564bbb62ad3cc08d6c54.hip
// !!! This is a file automatically generated by hipify!!! /* Collatz code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdio> #include <hip/hip_runtime.h> static const int ThreadsPerBlock = 512; static int* d_maxlen; static __global__ void collatz(const long start, const long stop, int* const maxlen) { // todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project) //global index const long i = blockIdx.x * (long)blockDim.x; if(start+2*i<stop){ long val = start+2*i; int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val = val / 2; // even } else { val = 3 * val + 1; // odd } } //condition for potential raw error if(len > *maxlen)atomicMax(maxlen, len); } } void GPU_Init() { int maxlen = 0; if (hipSuccess != hipMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} if (hipSuccess != hipMemcpy(d_maxlen, &maxlen, sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);} } void GPU_Exec(const long start, const long stop) { if (start <= stop) { hipLaunchKernelGGL(( collatz), dim3(((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, start, stop, d_maxlen); } } int GPU_Fini() { int maxlen; // todo: copy the result from the device to the host and free the device memory if (hipSuccess != hipMemcpy(&maxlen, d_maxlen, sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);} hipFree(d_maxlen); return maxlen; }
dbb7c2bc2591773630e6564bbb62ad3cc08d6c54.cu
/* Collatz code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdio> #include <cuda.h> static const int ThreadsPerBlock = 512; static int* d_maxlen; static __global__ void collatz(const long start, const long stop, int* const maxlen) { // todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project) //global index const long i = blockIdx.x * (long)blockDim.x; if(start+2*i<stop){ long val = start+2*i; int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val = val / 2; // even } else { val = 3 * val + 1; // odd } } //condition for potential raw error if(len > *maxlen)atomicMax(maxlen, len); } } void GPU_Init() { int maxlen = 0; if (cudaSuccess != cudaMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);} } void GPU_Exec(const long start, const long stop) { if (start <= stop) { collatz<<<((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, stop, d_maxlen); } } int GPU_Fini() { int maxlen; // todo: copy the result from the device to the host and free the device memory if (cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);} cudaFree(d_maxlen); return maxlen; }
25dee2bc501488afeb54988792fdad3b053821b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _COPY_KERNEL_H_ #define _COPY_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&As[0][0])), (block_size * i + j)) #define BS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&Bs[0][0])), (block_size * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif #define ELEM_SIZE 2048 template <int block_size, typename size_type> __device__ void copykernelAoS_shared(float *h_src_A, float *h_src_B, float *d_dst, size_type elem_size, size_type elem_count) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; size_type bdx = blockDim.x; size_type bdy = blockDim.y; // want ptr to start of each field id. //__shared__ float tmp_d_dst[ELEM_SIZE]; // tmp_d_dst[0] = 22.3; size_type tid = (bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx; //size_type dst_idx = bx*bdx + tx; size_type dst_idx = tid*2; // this can be made faster, you should not num_elems threads d_dst[dst_idx] = h_src_A[dst_idx/2]; d_dst[dst_idx + 1] = h_src_B[dst_idx/2]; /* //d_dst[dst_idx] = h_src_A[dst_idx]; if (dst_idx % 2 == 0){ d_dst[dst_idx] = h_src_A[dst_idx/2]; //tmp_d_dst[dst_idx] = h_src_A[dst_idx/2]; // May be worth seeing if accessing host memory differently will improve performance //d_dst[dst_idx/2] = h_src[dst_idx]; } else{ // May be worth seeing if accessing host memory differently will improve performance // these have the same perf d_dst[dst_idx] = h_src_B[dst_idx/2]; //d_dst[dst_idx] = h_src_A[elem_count + dst_idx/2]; //tmp_d_dst[dst_idx] = h_src_B[dst_idx/2]; //d_dst[dst_idx] = h_src_B[dst_idx/2 + elem_count/2]; //d_dst[dst_idx/2 + elem_count/2] = h_src[dst_idx]; } */ // d_dst[0] = tmp_d_dst[0]; //d_dst[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx] = h_src[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx]; } // This kernel is copying everything over and rearranging the data from AoS to SoA layout. // TODO: calculate bandwidth!! template <int block_size, typename size_type> __device__ void copykernelAoS(float *h_src, float *d_dst, size_type elem_size, size_type elem_count) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; size_type bdx = blockDim.x; size_type bdy = blockDim.y; // tid used to idx into dst // 0, 1, 2, ... elem_count - 1 // one thread per element (consider one thread per chunk of elems in the aos struct) /* size_type dst_idx = (bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx; if (dst_idx % 2 == 0){ d_dst[dst_idx] = h_src[dst_idx/2]; } else{ d_dst[dst_idx] = h_src[dst_idx/2 + elem_count/2]; } */ size_type dst_idx = (bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx; if (dst_idx % 2 == 0){ d_dst[dst_idx] = h_src[dst_idx/2]; // May be worth seeing if accessing host memory differently will improve performance //d_dst[dst_idx/2] = h_src[dst_idx]; } else{ // May be worth seeing if accessing host memory differently will improve performance d_dst[dst_idx] = h_src[dst_idx/2 + elem_count/2]; //d_dst[dst_idx/2 + elem_count/2] = h_src[dst_idx]; } //d_dst[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx] = h_src[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx]; } //This kernel is copying everything over. template <int block_size, typename size_type> __device__ void copykernel(float *h_src, float *d_dst, size_type elem_size, size_type elem_count) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; size_type bdx = blockDim.x; size_type bdy = blockDim.y; //for (size_type i = block_size * bx + tx; i < elem_count; i += gridDim.x * block_size){ // d_dst[i] = h_src[i]; //hipMemcpy(&(d_dst[i]), &(h_src), elem_size); //} //memcpy(&(d_dst[block_size*bx + tx]), &(h_src[block_size*bx + tx]), gridDim.x *block_size); d_dst[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx] = h_src[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx]; //todo: calculate the bandwidth. } //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// template <int block_size, typename size_type> __device__ void matrixMul(float *C, float *A, float *B, size_type wA, size_type wB) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block size_type aBegin = wA * block_size * by; // Index of the last sub-matrix of A processed by the block size_type aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A size_type aStep = block_size; // Index of the first sub-matrix of B processed by the block size_type bBegin = block_size * bx; // Step size used to iterate through the sub-matrices of B size_type bStep = block_size * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (size_type a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[block_size][block_size]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[block_size][block_size]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (size_type k = 0; k < block_size; ++k) Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element size_type c = wB * block_size * by + block_size * bx; C[c + wB * ty + tx] = Csub; } // C wrappers around our template kernel //extern "C" __global__ void matrixMul_bs16_32bit(float *C, float *A, float *B, // int wA, int wB) { // matrixMul<16, int>(C, A, B, wA, wB); //} //extern "C" __global__ void matrixMul_bs16_64bit(float *C, float *A, float *B, // size_t wA, size_t wB) { // matrixMul<16, size_t>(C, A, B, wA, wB); //} extern "C" __global__ void copykernel32_32bit(float *h_src, float *d_dst, int wA, int wB) { copykernel<32, int>(h_src, d_dst, wA, wB); } extern "C" __global__ void copykernelAoS_shared32_32bit(float *h_src_A, float *h_src_B, float *d_dst, int wA, int wB) { copykernelAoS_shared<32, int>(h_src_A, h_src_B, d_dst, wA, wB); } extern "C" __global__ void copykernelAoS32_32bit(float *h_src, float *d_dst, int wA, int wB) { copykernelAoS<32, int>(h_src, d_dst, wA, wB); } extern "C" __global__ void matrixMul_bs32_32bit(float *C, float *A, float *B, int wA, int wB) { matrixMul<32, int>(C, A, B, wA, wB); } //extern "C" __global__ void matrixMul_bs32_64bit(float *C, float *A, float *B, // size_t wA, size_t wB) { // matrixMul<32, size_t>(C, A, B, wA, wB); //} #endif // #ifndef _COPY_KERNEL_H_
25dee2bc501488afeb54988792fdad3b053821b9.cu
#ifndef _COPY_KERNEL_H_ #define _COPY_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&As[0][0])), (block_size * i + j)) #define BS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&Bs[0][0])), (block_size * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif #define ELEM_SIZE 2048 template <int block_size, typename size_type> __device__ void copykernelAoS_shared(float *h_src_A, float *h_src_B, float *d_dst, size_type elem_size, size_type elem_count) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; size_type bdx = blockDim.x; size_type bdy = blockDim.y; // want ptr to start of each field id. //__shared__ float tmp_d_dst[ELEM_SIZE]; // tmp_d_dst[0] = 22.3; size_type tid = (bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx; //size_type dst_idx = bx*bdx + tx; size_type dst_idx = tid*2; // this can be made faster, you should not num_elems threads d_dst[dst_idx] = h_src_A[dst_idx/2]; d_dst[dst_idx + 1] = h_src_B[dst_idx/2]; /* //d_dst[dst_idx] = h_src_A[dst_idx]; if (dst_idx % 2 == 0){ d_dst[dst_idx] = h_src_A[dst_idx/2]; //tmp_d_dst[dst_idx] = h_src_A[dst_idx/2]; // May be worth seeing if accessing host memory differently will improve performance //d_dst[dst_idx/2] = h_src[dst_idx]; } else{ // May be worth seeing if accessing host memory differently will improve performance // these have the same perf d_dst[dst_idx] = h_src_B[dst_idx/2]; //d_dst[dst_idx] = h_src_A[elem_count + dst_idx/2]; //tmp_d_dst[dst_idx] = h_src_B[dst_idx/2]; //d_dst[dst_idx] = h_src_B[dst_idx/2 + elem_count/2]; //d_dst[dst_idx/2 + elem_count/2] = h_src[dst_idx]; } */ // d_dst[0] = tmp_d_dst[0]; //d_dst[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx] = h_src[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx]; } // This kernel is copying everything over and rearranging the data from AoS to SoA layout. // TODO: calculate bandwidth!! template <int block_size, typename size_type> __device__ void copykernelAoS(float *h_src, float *d_dst, size_type elem_size, size_type elem_count) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; size_type bdx = blockDim.x; size_type bdy = blockDim.y; // tid used to idx into dst // 0, 1, 2, ... elem_count - 1 // one thread per element (consider one thread per chunk of elems in the aos struct) /* size_type dst_idx = (bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx; if (dst_idx % 2 == 0){ d_dst[dst_idx] = h_src[dst_idx/2]; } else{ d_dst[dst_idx] = h_src[dst_idx/2 + elem_count/2]; } */ size_type dst_idx = (bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx; if (dst_idx % 2 == 0){ d_dst[dst_idx] = h_src[dst_idx/2]; // May be worth seeing if accessing host memory differently will improve performance //d_dst[dst_idx/2] = h_src[dst_idx]; } else{ // May be worth seeing if accessing host memory differently will improve performance d_dst[dst_idx] = h_src[dst_idx/2 + elem_count/2]; //d_dst[dst_idx/2 + elem_count/2] = h_src[dst_idx]; } //d_dst[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx] = h_src[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx]; } //This kernel is copying everything over. template <int block_size, typename size_type> __device__ void copykernel(float *h_src, float *d_dst, size_type elem_size, size_type elem_count) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; size_type bdx = blockDim.x; size_type bdy = blockDim.y; //for (size_type i = block_size * bx + tx; i < elem_count; i += gridDim.x * block_size){ // d_dst[i] = h_src[i]; //cudaMemcpy(&(d_dst[i]), &(h_src), elem_size); //} //memcpy(&(d_dst[block_size*bx + tx]), &(h_src[block_size*bx + tx]), gridDim.x *block_size); d_dst[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx] = h_src[(bx + by*gridDim.x) * (bdx*bdy) + (ty*bdx) + tx]; //todo: calculate the bandwidth. } //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// template <int block_size, typename size_type> __device__ void matrixMul(float *C, float *A, float *B, size_type wA, size_type wB) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block size_type aBegin = wA * block_size * by; // Index of the last sub-matrix of A processed by the block size_type aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A size_type aStep = block_size; // Index of the first sub-matrix of B processed by the block size_type bBegin = block_size * bx; // Step size used to iterate through the sub-matrices of B size_type bStep = block_size * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (size_type a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[block_size][block_size]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[block_size][block_size]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (size_type k = 0; k < block_size; ++k) Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element size_type c = wB * block_size * by + block_size * bx; C[c + wB * ty + tx] = Csub; } // C wrappers around our template kernel //extern "C" __global__ void matrixMul_bs16_32bit(float *C, float *A, float *B, // int wA, int wB) { // matrixMul<16, int>(C, A, B, wA, wB); //} //extern "C" __global__ void matrixMul_bs16_64bit(float *C, float *A, float *B, // size_t wA, size_t wB) { // matrixMul<16, size_t>(C, A, B, wA, wB); //} extern "C" __global__ void copykernel32_32bit(float *h_src, float *d_dst, int wA, int wB) { copykernel<32, int>(h_src, d_dst, wA, wB); } extern "C" __global__ void copykernelAoS_shared32_32bit(float *h_src_A, float *h_src_B, float *d_dst, int wA, int wB) { copykernelAoS_shared<32, int>(h_src_A, h_src_B, d_dst, wA, wB); } extern "C" __global__ void copykernelAoS32_32bit(float *h_src, float *d_dst, int wA, int wB) { copykernelAoS<32, int>(h_src, d_dst, wA, wB); } extern "C" __global__ void matrixMul_bs32_32bit(float *C, float *A, float *B, int wA, int wB) { matrixMul<32, int>(C, A, B, wA, wB); } //extern "C" __global__ void matrixMul_bs32_64bit(float *C, float *A, float *B, // size_t wA, size_t wB) { // matrixMul<32, size_t>(C, A, B, wA, wB); //} #endif // #ifndef _COPY_KERNEL_H_
a1e8735a4cfb80574ae4f6c2a92b977faa66f75e.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include "caffe2/core/THCCachingAllocator_gpu.h" #include "hipcub/hipcub.hpp" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" CAFFE2_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html CAFFE2_DEFINE_int(caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); CAFFE2_DEFINE_int(caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); CAFFE2_DEFINE_int(caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); CAFFE2_DEFINE_int(caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); CAFFE2_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); CAFFE2_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); CAFFE2_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator; std::unique_ptr<THCCachingAllocator> g_thc_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is garded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), CAFFE2_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", CAFFE2_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile the caffe binary."); for (int i = 0; i < NumCudaDevices(); ++i) { DeviceGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = ::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for hipDeviceEnablePeerAccess that should always be // zero currently. CUDA_ENFORCE(hipDeviceEnablePeerAccess(j, 0)); } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new hipcub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; g_thc_allocator.reset(new THCCachingAllocator()); } else { CAFFE_THROW("Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "hipHostMalloc. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(new PinnedCPUAllocator()); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline int RectifyGPUID(const int gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(const int gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_cuda_gpu_id() ? RectifyGPUID(option.cuda_gpu_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } std::pair<void*, MemoryDeleter> CUDAStaticContext::New(size_t nbytes) const { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: CUDA_ENFORCE(hipMalloc(&ptr, nbytes)); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, Delete}; case CudaMemoryPoolType::CUB: CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, Delete}; case CudaMemoryPoolType::THC: CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */)); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, Delete}; } return {nullptr, Delete}; } void CUDAStaticContext::Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple hipFree. hipError_t error = hipFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is exiting // anyway, we will not need to worry about memory leak, so we basically // ignore it. This is definitely not ideal but works for now. if (error != hipSuccess && error != hipErrorDeinitialized) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { CUDA_ENFORCE(g_thc_allocator->Free(ptr)); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } BaseStaticContext* GetCUDAStaticContext() { static CUDAStaticContext context; return &context; } REGISTER_STATIC_CONTEXT(CUDA, GetCUDAStaticContext()); } // namespace caffe2
a1e8735a4cfb80574ae4f6c2a92b977faa66f75e.cu
#include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include "caffe2/core/THCCachingAllocator_gpu.h" #include "cub/util_allocator.cuh" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" CAFFE2_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html CAFFE2_DEFINE_int(caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); CAFFE2_DEFINE_int(caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); CAFFE2_DEFINE_int(caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); CAFFE2_DEFINE_int(caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); CAFFE2_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); CAFFE2_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); CAFFE2_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator; std::unique_ptr<THCCachingAllocator> g_thc_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is garded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), CAFFE2_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", CAFFE2_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile the caffe binary."); for (int i = 0; i < NumCudaDevices(); ++i) { DeviceGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = std::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for cudaDeviceEnablePeerAccess that should always be // zero currently. CUDA_ENFORCE(cudaDeviceEnablePeerAccess(j, 0)); } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new cub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; g_thc_allocator.reset(new THCCachingAllocator()); } else { CAFFE_THROW("Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "cudaMallocHost. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(new PinnedCPUAllocator()); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline int RectifyGPUID(const int gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(const int gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_cuda_gpu_id() ? RectifyGPUID(option.cuda_gpu_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } std::pair<void*, MemoryDeleter> CUDAStaticContext::New(size_t nbytes) const { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: CUDA_ENFORCE(cudaMalloc(&ptr, nbytes)); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, Delete}; case CudaMemoryPoolType::CUB: CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, Delete}; case CudaMemoryPoolType::THC: CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */)); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, Delete}; } return {nullptr, Delete}; } void CUDAStaticContext::Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple cudaFree. cudaError_t error = cudaFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is exiting // anyway, we will not need to worry about memory leak, so we basically // ignore it. This is definitely not ideal but works for now. if (error != cudaSuccess && error != cudaErrorCudartUnloading) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { CUDA_ENFORCE(g_thc_allocator->Free(ptr)); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } BaseStaticContext* GetCUDAStaticContext() { static CUDAStaticContext context; return &context; } REGISTER_STATIC_CONTEXT(CUDA, GetCUDAStaticContext()); } // namespace caffe2
ef8a2f936386f6fc30814e77007f224434f3b6c7.hip
// !!! This is a file automatically generated by hipify!!! #include "common/common_types.h" #include "common/common_utils.h" #include "common/sanity_check.h" #include "common/device_intrinsics.h" #include "math/DenseGaussian.h" #include "math/DenseLDLT.h" #include "pcg_solver/solver_configs.h" #include "pcg_solver/block6x6_pcg_weber.h" #include <device_launch_parameters.h> #include <hip/hip_runtime_api.h> #include <hipcub/hipcub.hpp> #include <iostream> namespace surfelwarp { namespace device { /** * \brief Perform parallel matrix inverse on 6x6 psd matrix array * \tparam num_threads Each thread process a matrix * \param A Input matrix array, will not be touched * \param A_inversed The output matrix array * \param num_matrix */ template <int num_threads = 64> __global__ void matrix6x6InverseKernel( const float* A, float* A_inversed, int num_matrix ) { //Load the matrix into the shared memory __shared__ float factored_matrix[36 * num_threads]; __shared__ float inversed_matrix[36 * num_threads]; //The input matrix pointer for this block const int blk_matrix_offset = 36 * blockDim.x * blockIdx.x; const float* A_this_blk = A + blk_matrix_offset; //Cooperative loading for (auto k = 0; k < 36; k++) { // There are 36 x num_threads float need to be loaded if(blk_matrix_offset + k * num_threads + threadIdx.x < num_matrix * 36) factored_matrix[k * num_threads + threadIdx.x] = A_this_blk[k * num_threads + threadIdx.x]; //Each thread loads one element } //Sync here __syncthreads(); //Call the Gaussian inversion float* A_this_thread = &(factored_matrix[36 * threadIdx.x]); float* A_inv_this_thread = &(inversed_matrix[36 * threadIdx.x]); DenseGaussian<6>::Inverse(A_this_thread, A_inv_this_thread); //Sync again __syncthreads(); //Cooperative storing float* A_inv_this_blk = A_inversed + blk_matrix_offset; #pragma unroll for (auto k = 0; k < 36; k++) { // There are 36 x num_threads float need to be loaded if (blk_matrix_offset + k * num_threads + threadIdx.x < num_matrix * 36) A_inv_this_blk[k * num_threads + threadIdx.x] = inversed_matrix[k * num_threads + threadIdx.x]; //Each thread stores one element } } __device__ float nu_old_blk6x6; __device__ float nu_new_blk6x6; __device__ float reduce_partials_blk6x6[max_reduce_blocks]; //The maximum number of blocks to perform reduce for dot(a, b) /** * \brief r <- b; s <- inv_diag_blks * b; mu_new <- dot(r, s) * \tparam num_warps The FIXED number of warps in this kernel, for reduction * \param b * \param inv_diag_blks * \param r * \param s */ template <int num_warps = reduce_block_warps> __global__ void block6x6InitKernel( const PtrSz<const float> b, const PtrSz<const float> inv_diag_blks, PtrSz<float> r, PtrSz<float> s, PtrSz<float> x ) { //r <- b; s <- inv_diag_blks * b; const int idx = threadIdx.x + blockIdx.x * blockDim.x; //The dot product from this row for mu_new <- dot(r, s) float dot_this_row = 0.0f; if (idx < b.size) { const int blk_idx = idx / 6; //Perform the block matrix vector product float s_row = 0.0f; for (auto j = 0; j < 6; j++) { const float mat_value = inv_diag_blks[6 * idx + j]; const float b_value = b[6 * blk_idx + j]; s_row += mat_value * b_value; } const float r_row = b[idx]; dot_this_row = s_row * r_row; //Store the value to s and r s[idx] = s_row; r[idx] = r_row; x[idx] = 0.0f; } //Warp reduction on dot_this_row const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; //Perform reduct on the warp_dot // __syncthreads(); if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) warp_dot_reduce = warp_dot[lane_id]; //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } __global__ void block6x6ReducePartialKernel() { float sum = 0.0f; if (threadIdx.x < num_reduce_blocks_6x6) { sum = reduce_partials_blk6x6[threadIdx.x]; } sum = warp_scan(sum); if (threadIdx.x == 31) { nu_new_blk6x6 = sum; // nu_new <- dot(r, s) } } /* nu_old <- nu_new; q <- A s; alpha <- nu_new / dot(q, s); */ template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernel_0( const PtrSz<const float> A_data, const PtrSz<const int> A_colptr, const PtrSz<const int> A_rowptr, const PtrSz<const float> s, PtrSz<float> q ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx == 0){ nu_old_blk6x6 = nu_new_blk6x6; } const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float dot_this_row = 0; //Perform a sparse matrix-vector product if(idx < s.size) { int begin = A_rowptr[idx]; const int end = A_rowptr[idx + bin_size]; int column_offset = (begin - lane_id) / 6 + lane_id; float sp_mv = 0.0f; while (begin < end) { const int colume = A_colptr[column_offset]; for(auto j = 0; j < 6; j++){ float mat_data = A_data[begin]; float s_data = colume >= 0 ? s[colume + j] : 0; sp_mv += mat_data * s_data; begin += bin_size; } //Increase the column index column_offset += bin_size; } //The value of this row q[idx] = sp_mv; dot_this_row = sp_mv * s[idx]; } //Perform warp scan float scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; //Perform reduct on the warp_dot __syncthreads(); //MAYBE NEEDED if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) warp_dot_reduce = warp_dot[lane_id]; //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernel_0( const PtrSz<const float> A_data, const PtrSz<const int> A_colptr, const PtrSz<const int> A_rowptr, hipTextureObject_t s, PtrSz<float> q ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx == 0) { nu_old_blk6x6 = nu_new_blk6x6; } const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float dot_this_row = 0; //Perform a sparse matrix-vector product if (idx < q.size) { int begin = A_rowptr[idx]; const int end = A_rowptr[idx + bin_size]; int column_offset = (begin - lane_id) / 6 + lane_id; float sp_mv = 0.0f; while (begin < end) { const int colume = A_colptr[column_offset]; for (auto j = 0; j < 6; j++) { const float mat_data = A_data[begin]; const float s_data = (colume >= 0) ? fetch1DLinear<float>(s, colume + j) : 0.0f; sp_mv += mat_data * s_data; begin += bin_size; } //Increase the column index column_offset += bin_size; } //The value of this row q[idx] = sp_mv; dot_this_row = sp_mv * fetch1DLinear<float>(s, idx); } //Perform warp scan float scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; //Perform reduct on the warp_dot __syncthreads(); //MAYBE NEEDED if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) warp_dot_reduce = warp_dot[lane_id]; //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } /** * \brief alpha <- nu_new / dot(q, s); x <- x + alpha * s; * t <- r - alpha * q; p <- M_inv*t; nu_new <- dot(t, p) * \tparam num_warps The FIXED number of warps in this kernel */ template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernel_1( const PtrSz<const float> s, const PtrSz<const float> r, const PtrSz<const float> q, const PtrSz<const float> inv_diag_blks, PtrSz<float> x, PtrSz<float> t, PtrSz<float> p ) { //Each block performs a reduction for alpha = dot(q, s) __shared__ float alpha; const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float scanned_dot; //Perform reduction on warp_0 if (warp_id == 0) { scanned_dot = 0.0f; if (lane_id < num_reduce_blocks_6x6) { scanned_dot = reduce_partials_blk6x6[lane_id]; } scanned_dot = warp_scan(scanned_dot); if (lane_id == 31) { alpha = nu_new_blk6x6 / scanned_dot; } } //Do sync to broadcast alpha __syncthreads(); const float alpha_thread = alpha; //float alpha_thread = alpha; const int idx = threadIdx.x + blockDim.x * blockIdx.x; float dot_this_row = 0.0f; if (idx < x.size) { const int blk_idx = idx / 6; //Block matrix vector product float p_row = 0.0; float mat_value, r_value; for(auto j = 0; j < 6; j++) { mat_value = inv_diag_blks[6 * idx + j]; r_value = r[6 * blk_idx + j] - alpha_thread * q[6 * blk_idx + j]; p_row += mat_value * r_value; } p[idx] = p_row; //p <- M_inv * r //float r_row = r[idx]; //float q_row = q[idx]; const float r_row_new = r[idx] - alpha_thread * q[idx]; t[idx] = r_row_new; // t <- r - alpha * q x[idx] += alpha_thread * s[idx]; // x <- x + alpha s dot_this_row = p_row * r_row_new; } //Perform in block reduction on dot(q, s) scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; __syncthreads(); //MAYBE NEEDED if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) { warp_dot_reduce = warp_dot[lane_id]; } //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } /** * \brief nu_new <- dot(t, p); beta <- nu_new/nu_old; s <- p + beta s */ __global__ void block6x6PCGKernel_2( const PtrSz<const float> p, PtrSz<float> s ) { //Each block perform a reduce to compute beta __shared__ float beta; const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; if(warp_id == 0) { float dot_reduce = 0.0f; if(lane_id < num_reduce_blocks_6x6) { dot_reduce = reduce_partials_blk6x6[lane_id]; } dot_reduce = warp_scan(dot_reduce); if(lane_id == 31) { if(blockIdx.x == 0) nu_new_blk6x6 = dot_reduce; beta = dot_reduce / nu_old_blk6x6; //Debug code: seems correct //printf("Beta from device %f \n", beta); } } //Do sync to broadcast the value of beta __syncthreads(); const float beta_thread = beta; const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < p.size) { s[idx] = p[idx] + beta_thread * s[idx]; } } /** * \brief nu_new <- dot(t, p); beta <- nu_new/nu_old; s <- p + beta s */ template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernelAlphaBeta( const PtrSz<const float> r, const PtrSz<const float> q, const PtrSz<const float> inv_diag_blks, PtrSz<float> s, PtrSz<float> x, PtrSz<float> t, PtrSz<float> p ) { //Each block performs a reduction for alpha = dot(q, s) __shared__ float alpha; __shared__ float beta; const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float scanned_dot; //Perform reduction on warp_0 if (warp_id == 0) { scanned_dot = 0.0f; if (lane_id < num_reduce_blocks_6x6) { scanned_dot = reduce_partials_blk6x6[lane_id]; } scanned_dot = warp_scan(scanned_dot); if (lane_id == 31) { alpha = nu_new_blk6x6 / scanned_dot; } } //Do sync to broadcast alpha __syncthreads(); const float alpha_thread = alpha; const int idx = threadIdx.x + blockDim.x * blockIdx.x; float dot_this_row = 0.0f; if (idx < x.size) { const int blk_idx = idx / 6; //Block matrix vector product float p_row = 0.0; float mat_value, r_value; for(auto j = 0; j < 6; j++) { mat_value = inv_diag_blks[6 * idx + j]; r_value = r[6 * blk_idx + j] - alpha_thread * q[6 * blk_idx + j]; p_row += mat_value * r_value; } p[idx] = p_row; //p <- M_inv * r const float r_row_new = r[idx] - alpha_thread * q[idx]; t[idx] = r_row_new; // t <- r - alpha * q x[idx] += alpha_thread * s[idx]; // x <- x + alpha s dot_this_row = p_row * r_row_new; } //Perform in block reduction on dot(q, s) scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; if (warp_id == 0) { float warp_dot_reduce = 0.0f; float dot_reduce = 0.0f; if (lane_id < num_warps) { warp_dot_reduce = warp_dot[lane_id]; } if(lane_id < num_reduce_blocks_6x6) { dot_reduce = reduce_partials_blk6x6[lane_id]; } //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); dot_reduce = warp_scan(dot_reduce); //Store to global memory if (lane_id == 31) { reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; if(blockIdx.x == 0) nu_new_blk6x6 = dot_reduce; beta = dot_reduce / nu_old_blk6x6; } } //Do sync to broadcast the value of beta __syncthreads(); const float beta_thread = beta; if(idx < p.size) { s[idx] = p[idx] + beta_thread * s[idx]; } } }; /* End of namespace device */ }; /* End of namespace surfelwarp */ void surfelwarp::block6x6_pcg_weber( const DeviceArray<float>& diag_blks, const DeviceArray<float>& A_data, const DeviceArray<int>& A_colptr, const DeviceArray<int>& A_rowptr, const DeviceArray<float>& b, DeviceArray<float>& x_buffer, DeviceArray<float>& inv_diag_blk_buffer, DeviceArray<float>& p_buffer, DeviceArray<float>& q_buffer, DeviceArray<float>& r_buffer, DeviceArray<float>& s_buffer, DeviceArray<float>& t_buffer, DeviceArray<float>& valid_x, int max_iters, hipStream_t stream ) { //Correct the size of array size_t N = b.size(); DeviceArray<float> inv_diag_blks = DeviceArray<float>(inv_diag_blk_buffer.ptr(), diag_blks.size()); valid_x = DeviceArray<float>(x_buffer.ptr(), N); DeviceArray<float> p = DeviceArray<float>(p_buffer.ptr(), N); DeviceArray<float> q = DeviceArray<float>(q_buffer.ptr(), N); DeviceArray<float> r = DeviceArray<float>(r_buffer.ptr(), N); DeviceArray<float> s = DeviceArray<float>(s_buffer.ptr(), N); DeviceArray<float> t = DeviceArray<float>(t_buffer.ptr(), N); //Compute the inverse of diag blocks for pre-conditioning cudaSafeCall(hipMemsetAsync(valid_x.ptr(), 0, sizeof(float) * valid_x.size(), stream)); block6x6_diag_inverse(diag_blks, inv_diag_blks, N / 6, stream); //The init kernel block6x6_init_kernel(b, inv_diag_blks, r, s, valid_x, stream); //The main loop for(auto i = 0; i < max_iters; i++) { block6x6_pcg_kernel_0(A_data, A_colptr, A_rowptr, s, q, stream); // block6x6_pcg_kernel_alphabeta(r, q, inv_diag_blks,s, valid_x, t, p, stream); block6x6_pcg_kernel_1(s, r, q, inv_diag_blks, valid_x, t, p, stream); block6x6_pcg_kernel_2(p, s, stream); r.swap(t); } //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } // Ignore, only used in checks and debug void surfelwarp::block6x6_pcg_weber( const DeviceArray<float>& diag_blks, const DeviceArray<float>& A_data, const DeviceArray<int>& A_colptr, const DeviceArray<int>& A_rowptr, const DeviceArray<float>& b, DeviceArray<float>& x_buffer, DeviceArray<float>& inv_diag_blk_buffer, DeviceArray<float>& p_buffer, DeviceArray<float>& q_buffer, DeviceArray<float>& r_buffer, DeviceArray<float>& s_buffer, hipTextureObject_t s_texture, DeviceArray<float>& t_buffer, DeviceArray<float>& valid_x, int max_iters, hipStream_t stream ) { //Correct the size of array size_t N = b.size(); DeviceArray<float> inv_diag_blks = DeviceArray<float>(inv_diag_blk_buffer.ptr(), diag_blks.size()); valid_x = DeviceArray<float>(x_buffer.ptr(), N); DeviceArray<float> p = DeviceArray<float>(p_buffer.ptr(), N); DeviceArray<float> q = DeviceArray<float>(q_buffer.ptr(), N); DeviceArray<float> r = DeviceArray<float>(r_buffer.ptr(), N); DeviceArray<float> s = DeviceArray<float>(s_buffer.ptr(), N); DeviceArray<float> t = DeviceArray<float>(t_buffer.ptr(), N); //Compute the inverse of diag blocks for pre-conditioning block6x6_diag_inverse(diag_blks, inv_diag_blks, N / 6, stream); //The init kernel block6x6_init_kernel(b, inv_diag_blks, r, s, valid_x, stream); //The main loop for (auto i = 0; i < max_iters; i++) { block6x6_pcg_kernel_0(A_data, A_colptr, A_rowptr, s_texture, q, stream); block6x6_pcg_kernel_1(s, r, q, inv_diag_blks, valid_x, t, p, stream); block6x6_pcg_kernel_2(p, s, stream); r.swap(t); } //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } void surfelwarp::block6x6_diag_inverse(const float * A, float * A_inversed, int num_matrix, hipStream_t stream) { const int threads_per_blk = 64; dim3 blk(threads_per_blk); dim3 grid(divUp(num_matrix, blk.x)); hipLaunchKernelGGL(( device::matrix6x6InverseKernel<threads_per_blk>), dim3(grid), dim3(blk), 0, stream, A, A_inversed, num_matrix); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } /* r <- b; s <- inv_diag_blks; mu_new <- dot(r, s) */ void surfelwarp::block6x6_init_kernel( const DeviceArray<float>& b, const DeviceArray<float>& inv_diag_blks, DeviceArray<float>& r, DeviceArray<float>& s, DeviceArray<float>& x, hipStream_t stream ) { dim3 blk(reduce_block_threads); //dim3 grid(divUp(b.size(), blk.x)); dim3 grid(num_reduce_blocks_6x6); hipLaunchKernelGGL(( device::block6x6InitKernel), dim3(grid), dim3(blk), 0, stream, b, inv_diag_blks, r, s, x); //Perform a reduction on the global memory dim3 reduce_blk(32); dim3 reduce_grid(1); hipLaunchKernelGGL(( device::block6x6ReducePartialKernel), dim3(reduce_grid), dim3(reduce_blk), 0, stream, ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } /* nu_old <- nu_new; q <- A s; alpha <- nu_old / dot(q, s); */ void surfelwarp::block6x6_pcg_kernel_0( const DeviceArray<float> &A_data, const DeviceArray<int> &A_colptr, const DeviceArray<int> &A_rowptr, const DeviceArray<float> &s, DeviceArray<float> &q, hipStream_t stream ) { dim3 blk(reduce_block_threads); //dim3 grid(divUp(s.size(), blk.x)); dim3 grid(num_reduce_blocks_6x6); hipLaunchKernelGGL(( device::block6x6PCGKernel_0), dim3(grid), dim3(blk), 0, stream, A_data, A_colptr, A_rowptr, s, q); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } void surfelwarp::block6x6_pcg_kernel_0( const DeviceArray<float>& A_data, const DeviceArray<int>& A_colptr, const DeviceArray<int>& A_rowptr, hipTextureObject_t s, DeviceArray<float>& q, hipStream_t stream ) { dim3 blk(reduce_block_threads); //dim3 grid(divUp(s.size(), blk.x)); dim3 grid(num_reduce_blocks_6x6); hipLaunchKernelGGL(( device::block6x6PCGKernel_0), dim3(grid), dim3(blk), 0, stream, A_data, A_colptr, A_rowptr, s, q); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } /* alpha <- nu_new / dot(q, s); x <- x + alpha * s; * t <- r - alpha * q; p <- M_inv*t; nu_new <- dot(t, p) */ void surfelwarp::block6x6_pcg_kernel_1( const DeviceArray<float>& s, const DeviceArray<float>& r, const DeviceArray<float>& q, const DeviceArray<float>& inv_diag_blks, DeviceArray<float>& x, DeviceArray<float>& t, DeviceArray<float>& p, hipStream_t stream ) { dim3 blk(reduce_block_threads); dim3 grid(num_reduce_blocks_6x6); hipLaunchKernelGGL(( device::block6x6PCGKernel_1), dim3(grid), dim3(blk), 0, stream, s, r, q, inv_diag_blks, x, t, p); } /* alpha <- nu_new / dot(q, s); x <- x + alpha * s; * t <- r - alpha * q; p <- M_inv*t; nu_new <- dot(t, p) */ void surfelwarp::block6x6_pcg_kernel_alphabeta( const DeviceArray<float>& r, const DeviceArray<float>& q, const DeviceArray<float>& inv_diag_blks, DeviceArray<float>& s, DeviceArray<float>& x, DeviceArray<float>& t, DeviceArray<float>& p, hipStream_t stream ) { dim3 blk(reduce_block_threads); dim3 grid(divUp(s.size(), blk.x)); // dim3 grid(num_reduce_blocks_6x6); hipLaunchKernelGGL(( device::block6x6PCGKernelAlphaBeta), dim3(grid), dim3(blk), 0, stream, r, q, inv_diag_blks, s, x, t, p); } void surfelwarp::block6x6_pcg_kernel_2( const DeviceArray<float>& p, DeviceArray<float>& s, hipStream_t stream ) { dim3 blk(256); dim3 grid(divUp(s.size(), blk.x)); hipLaunchKernelGGL(( device::block6x6PCGKernel_2), dim3(grid), dim3(blk), 0, stream, p, s); } /** Below are the checking subroutines defined for 6x6 pcg solver */ void surfelwarp::checkBlock6x6Init( const std::vector<float> &b, const std::vector<float> &inv_diags, std::vector<float>& h_r, std::vector<float>& h_s ) { //Prepare the data DeviceArray<float> b_dev, d_inv_diags, r, s, x; b_dev.upload(b); d_inv_diags.upload(inv_diags); r.create(b_dev.size()); s.create(b_dev.size()); x.create(b_dev.size()); //Call the function block6x6_init_kernel(b_dev, d_inv_diags, r, s, x); //Check the value of dot product // hipDeviceSynchronize(); r.download(h_r); s.download(h_s); float dot_value = 0; for(auto i = 0;i < h_s.size();i++){ dot_value += h_r[i] * h_s[i]; } //Frist check r == b assert(h_r.size() == b.size()); for(auto i = 0; i < b.size(); i++) { assert(std::abs(h_r[i] - b[i]) < 1e-4); } //Check s = inv_diag * b for(auto row = 0; row < b.size(); row++) { int blk_idx = row / 6; int inblk_offset = row % 6; int diag_offset = 36 * blk_idx; int diag_start_idx = diag_offset + 6 * inblk_offset; float s_row = 0.0f; for(auto j = 0; j < 6; j++) { s_row += inv_diags[diag_start_idx + j] * b[6 * blk_idx + j]; } assert(std::abs(s_row - h_s[row]) < 1e-4); } //Compare it with device value float dot_device; hipMemcpyFromSymbol(&dot_device, device::nu_new_blk6x6, sizeof(float), 0, hipMemcpyDeviceToHost); if(std::abs((dot_device - dot_value) / dot_value) > 1e-6) { std::cout << "Relative err in init kernel dot product " << std::abs((dot_device - dot_value) / dot_value) << std::endl; } } void surfelwarp::checkBlock6x6Init( const std::vector<float> &b, const std::vector<float> &inv_diags ) { std::vector<float> r, s; checkBlock6x6Init(b, inv_diags, r, s); } void surfelwarp::checkBlock6x6Kernel_0( const std::vector<float> &A_data, const std::vector<int> &A_rowptr, const std::vector<int> &A_colptr, const std::vector<float> &s, //Output for later checking std::vector<float>& q_device ) { //Prepare the data DeviceArray<float> d_A_data, s_dev, q_dev; DeviceArray<int> d_A_rowptr, d_A_colptr; d_A_data.upload(A_data); s_dev.upload(s); q_dev.create(s.size()); d_A_colptr.upload(A_colptr); d_A_rowptr.upload(A_rowptr); //Call device function block6x6_pcg_kernel_0(d_A_data, d_A_colptr, d_A_rowptr, s_dev, q_dev); //Perform matrix vector product on host const auto matrix_size = s.size(); std::vector<float> q_host; hostEigenSpMV(A_data, A_rowptr, A_colptr, matrix_size, s, q_host); //Check q = A s q_device.clear(); q_dev.download(q_device); float maximum_relative_err = 0.0f; assert(q_device.size() == q_host.size()); for(auto i = 0; i < q_host.size(); i++) { float host_value = q_host[i]; float device_value = q_device[i]; if(std::abs(host_value - device_value) > 1e-4) { if(std::abs((host_value - device_value) / host_value) > maximum_relative_err) { maximum_relative_err = std::abs((host_value - device_value) / host_value); } } } std::cout << "The maximum relative error in SpMV " << maximum_relative_err << std::endl; //Next check the value of dot product float dev_dot_reduce[max_reduce_blocks]; hipMemcpyFromSymbol(dev_dot_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, hipMemcpyDeviceToHost); float dev_dot = 0.0f; for(auto j = 0; j < num_reduce_blocks_6x6; j++) { dev_dot += dev_dot_reduce[j]; } //Compute the dot prodcut at host float h_dot = 0.0f; for(auto j = 0; j < q_host.size(); j++) { h_dot += q_host[j] * s[j]; } assert(std::abs((h_dot - dev_dot) / dev_dot) < 1e-4); } void surfelwarp::checkBlock6x6Kernel_1( const std::vector<float> &s, const std::vector<float> &r, const std::vector<float> &q, const std::vector<float> &inv_diag_blks, std::vector<float> &x, std::vector<float> &t, std::vector<float> &p ) { //Prepare data for input DeviceArray<float> s_dev, r_dev, q_dev, inv_diag_blks_dev, x_dev, t_dev, p_dev; s_dev.upload(s); r_dev.upload(r); q_dev.upload(q); inv_diag_blks_dev.upload(inv_diag_blks); x_dev.upload(x); t_dev.create(x_dev.size()); p_dev.create(x_dev.size()); //Compute dot product on host float dev_dot_reduce[max_reduce_blocks]; hipMemcpyFromSymbol(dev_dot_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, hipMemcpyDeviceToHost); float dev_dot = 0.0f; for (auto j = 0; j < num_reduce_blocks_6x6; j++) { dev_dot += dev_dot_reduce[j]; } float dot_s_q = 0.0f; for (int j = 0; j < q.size(); j++) { dot_s_q += q[j] * s[j]; } assert(std::abs((dot_s_q - dev_dot) / dev_dot) < 1e-4); //Download nu to compute alpha float nu_old_host, nu_new_host; hipMemcpyFromSymbol(&nu_old_host, device::nu_old_blk6x6, sizeof(float), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&nu_new_host, device::nu_new_blk6x6, sizeof(float), 0, hipMemcpyDeviceToHost); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipGetLastError()); #endif assert(std::abs(nu_new_host - nu_old_host) < 1e-7); const float alpha = nu_old_host / dot_s_q; //The value of alpha is correct //std::cout << "Alpha from host " << alpha << std::endl; //Invoke the device version function block6x6_pcg_kernel_1(s_dev, r_dev, q_dev, inv_diag_blks_dev, x_dev, t_dev, p_dev); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipGetLastError()); #endif //Check x <- x + alpha * s for (auto i = 0; i < x.size(); i++) { x[i] += alpha * s[i]; } std::vector<float> h_x_dev; x_dev.download(h_x_dev); assert(s.size() == x.size()); auto max_relative_err = maxRelativeError(h_x_dev, x); if(max_relative_err > 1e-5) { std::cout << "Max relative err for x <- x + alpha s is " << max_relative_err << std::endl; } //Check t <- r - alpha * q; t.resize(s.size()); std::vector<float> h_t_dev; t_dev.download(h_t_dev); for(auto j = 0;j < t.size(); j++) { t[j] = r[j] - alpha * q[j]; assert(std::abs(t[j] - h_t_dev[j]) < 1e-4); } //Check p <- M_inv*t; std::vector<float> h_p_dev; p_dev.download(h_p_dev); p.resize(x.size()); for (auto row = 0; row < t.size(); row++) { int blk_idx = row / 6; int inblk_offset = row % 6; int diag_offset = 36 * blk_idx; int diag_start_idx = diag_offset + 6 * inblk_offset; float p_row = 0.0f; for (auto j = 0; j < 6; j++) { p_row += inv_diag_blks[diag_start_idx + j] * t[6 * blk_idx + j]; } p[row] = p_row; } max_relative_err = maxRelativeError(h_p_dev, p, 1e-5); if(max_relative_err > 1e-5) { std::cout << "Relative error for p <- Minv t " << max_relative_err << std::endl; } //Check for nu_new <- dot(t, p) float dot_t_p = 0.0f; for(auto j = 0; j < p.size(); j++) { //dot_t_p += h_t_dev[j] * p[j]; dot_t_p += t[j] * p[j]; } //Download the result to host hipMemcpyFromSymbol(dev_dot_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, hipMemcpyDeviceToHost); dev_dot = 0.0f; for (auto j = 0; j < num_reduce_blocks_6x6; j++) { dev_dot += dev_dot_reduce[j]; } //Compare it assert(std::abs((dev_dot - dot_t_p) / dot_t_p) < 1e-4); } void surfelwarp::checkBlock6x6Kernel_2( const std::vector<float> &p, std::vector<float> &s ) { //Prepare for device input DeviceArray<float> p_dev, s_dev; assert(s.size() == p.size()); p_dev.upload(p); s_dev.upload(s); //Compute the beta at host float parital_reduce[max_reduce_blocks]; float nu_old_host; hipMemcpyFromSymbol(&nu_old_host, device::nu_old_blk6x6, sizeof(float), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(parital_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, hipMemcpyDeviceToHost); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipGetLastError()); #endif float nu_new_host = 0.0f; for(auto j = 0; j < num_reduce_blocks_6x6; j++) { nu_new_host += parital_reduce[j]; } float beta = nu_new_host / nu_old_host; //Debug code, seems correct //std::cout << "Beta on host " << beta << std::endl; //Invoke the kernel block6x6_pcg_kernel_2(p_dev, s_dev); //Download the nu_new from device float nu_new_device; hipMemcpyFromSymbol(&nu_new_device, device::nu_new_blk6x6, sizeof(float), 0, hipMemcpyDeviceToHost); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipGetLastError()); #endif //Check that value: seems correct assert(std::abs((nu_new_host - nu_new_device) / nu_new_host) < 1e-4); //Check s <- p + beta s: seems correct std::vector<float> h_s_dev; s_dev.download(h_s_dev); for (auto i = 0; i < h_s_dev.size(); ++i) { s[i] = beta * s[i] + p[i]; } auto relative_err = maxRelativeError(s, h_s_dev); if(relative_err > 1e-4) { std::cout << "Max relative error in s <- p + beta s " << relative_err << std::endl; } }
ef8a2f936386f6fc30814e77007f224434f3b6c7.cu
#include "common/common_types.h" #include "common/common_utils.h" #include "common/sanity_check.h" #include "common/device_intrinsics.h" #include "math/DenseGaussian.h" #include "math/DenseLDLT.h" #include "pcg_solver/solver_configs.h" #include "pcg_solver/block6x6_pcg_weber.h" #include <device_launch_parameters.h> #include <cuda_runtime_api.h> #include <cub/cub.cuh> #include <iostream> namespace surfelwarp { namespace device { /** * \brief Perform parallel matrix inverse on 6x6 psd matrix array * \tparam num_threads Each thread process a matrix * \param A Input matrix array, will not be touched * \param A_inversed The output matrix array * \param num_matrix */ template <int num_threads = 64> __global__ void matrix6x6InverseKernel( const float* A, float* A_inversed, int num_matrix ) { //Load the matrix into the shared memory __shared__ float factored_matrix[36 * num_threads]; __shared__ float inversed_matrix[36 * num_threads]; //The input matrix pointer for this block const int blk_matrix_offset = 36 * blockDim.x * blockIdx.x; const float* A_this_blk = A + blk_matrix_offset; //Cooperative loading for (auto k = 0; k < 36; k++) { // There are 36 x num_threads float need to be loaded if(blk_matrix_offset + k * num_threads + threadIdx.x < num_matrix * 36) factored_matrix[k * num_threads + threadIdx.x] = A_this_blk[k * num_threads + threadIdx.x]; //Each thread loads one element } //Sync here __syncthreads(); //Call the Gaussian inversion float* A_this_thread = &(factored_matrix[36 * threadIdx.x]); float* A_inv_this_thread = &(inversed_matrix[36 * threadIdx.x]); DenseGaussian<6>::Inverse(A_this_thread, A_inv_this_thread); //Sync again __syncthreads(); //Cooperative storing float* A_inv_this_blk = A_inversed + blk_matrix_offset; #pragma unroll for (auto k = 0; k < 36; k++) { // There are 36 x num_threads float need to be loaded if (blk_matrix_offset + k * num_threads + threadIdx.x < num_matrix * 36) A_inv_this_blk[k * num_threads + threadIdx.x] = inversed_matrix[k * num_threads + threadIdx.x]; //Each thread stores one element } } __device__ float nu_old_blk6x6; __device__ float nu_new_blk6x6; __device__ float reduce_partials_blk6x6[max_reduce_blocks]; //The maximum number of blocks to perform reduce for dot(a, b) /** * \brief r <- b; s <- inv_diag_blks * b; mu_new <- dot(r, s) * \tparam num_warps The FIXED number of warps in this kernel, for reduction * \param b * \param inv_diag_blks * \param r * \param s */ template <int num_warps = reduce_block_warps> __global__ void block6x6InitKernel( const PtrSz<const float> b, const PtrSz<const float> inv_diag_blks, PtrSz<float> r, PtrSz<float> s, PtrSz<float> x ) { //r <- b; s <- inv_diag_blks * b; const int idx = threadIdx.x + blockIdx.x * blockDim.x; //The dot product from this row for mu_new <- dot(r, s) float dot_this_row = 0.0f; if (idx < b.size) { const int blk_idx = idx / 6; //Perform the block matrix vector product float s_row = 0.0f; for (auto j = 0; j < 6; j++) { const float mat_value = inv_diag_blks[6 * idx + j]; const float b_value = b[6 * blk_idx + j]; s_row += mat_value * b_value; } const float r_row = b[idx]; dot_this_row = s_row * r_row; //Store the value to s and r s[idx] = s_row; r[idx] = r_row; x[idx] = 0.0f; } //Warp reduction on dot_this_row const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; //Perform reduct on the warp_dot // __syncthreads(); if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) warp_dot_reduce = warp_dot[lane_id]; //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } __global__ void block6x6ReducePartialKernel() { float sum = 0.0f; if (threadIdx.x < num_reduce_blocks_6x6) { sum = reduce_partials_blk6x6[threadIdx.x]; } sum = warp_scan(sum); if (threadIdx.x == 31) { nu_new_blk6x6 = sum; // nu_new <- dot(r, s) } } /* nu_old <- nu_new; q <- A s; alpha <- nu_new / dot(q, s); */ template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernel_0( const PtrSz<const float> A_data, const PtrSz<const int> A_colptr, const PtrSz<const int> A_rowptr, const PtrSz<const float> s, PtrSz<float> q ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx == 0){ nu_old_blk6x6 = nu_new_blk6x6; } const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float dot_this_row = 0; //Perform a sparse matrix-vector product if(idx < s.size) { int begin = A_rowptr[idx]; const int end = A_rowptr[idx + bin_size]; int column_offset = (begin - lane_id) / 6 + lane_id; float sp_mv = 0.0f; while (begin < end) { const int colume = A_colptr[column_offset]; for(auto j = 0; j < 6; j++){ float mat_data = A_data[begin]; float s_data = colume >= 0 ? s[colume + j] : 0; sp_mv += mat_data * s_data; begin += bin_size; } //Increase the column index column_offset += bin_size; } //The value of this row q[idx] = sp_mv; dot_this_row = sp_mv * s[idx]; } //Perform warp scan float scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; //Perform reduct on the warp_dot __syncthreads(); //MAYBE NEEDED if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) warp_dot_reduce = warp_dot[lane_id]; //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernel_0( const PtrSz<const float> A_data, const PtrSz<const int> A_colptr, const PtrSz<const int> A_rowptr, cudaTextureObject_t s, PtrSz<float> q ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx == 0) { nu_old_blk6x6 = nu_new_blk6x6; } const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float dot_this_row = 0; //Perform a sparse matrix-vector product if (idx < q.size) { int begin = A_rowptr[idx]; const int end = A_rowptr[idx + bin_size]; int column_offset = (begin - lane_id) / 6 + lane_id; float sp_mv = 0.0f; while (begin < end) { const int colume = A_colptr[column_offset]; for (auto j = 0; j < 6; j++) { const float mat_data = A_data[begin]; const float s_data = (colume >= 0) ? fetch1DLinear<float>(s, colume + j) : 0.0f; sp_mv += mat_data * s_data; begin += bin_size; } //Increase the column index column_offset += bin_size; } //The value of this row q[idx] = sp_mv; dot_this_row = sp_mv * fetch1DLinear<float>(s, idx); } //Perform warp scan float scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; //Perform reduct on the warp_dot __syncthreads(); //MAYBE NEEDED if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) warp_dot_reduce = warp_dot[lane_id]; //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } /** * \brief alpha <- nu_new / dot(q, s); x <- x + alpha * s; * t <- r - alpha * q; p <- M_inv*t; nu_new <- dot(t, p) * \tparam num_warps The FIXED number of warps in this kernel */ template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernel_1( const PtrSz<const float> s, const PtrSz<const float> r, const PtrSz<const float> q, const PtrSz<const float> inv_diag_blks, PtrSz<float> x, PtrSz<float> t, PtrSz<float> p ) { //Each block performs a reduction for alpha = dot(q, s) __shared__ float alpha; const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float scanned_dot; //Perform reduction on warp_0 if (warp_id == 0) { scanned_dot = 0.0f; if (lane_id < num_reduce_blocks_6x6) { scanned_dot = reduce_partials_blk6x6[lane_id]; } scanned_dot = warp_scan(scanned_dot); if (lane_id == 31) { alpha = nu_new_blk6x6 / scanned_dot; } } //Do sync to broadcast alpha __syncthreads(); const float alpha_thread = alpha; //float alpha_thread = alpha; const int idx = threadIdx.x + blockDim.x * blockIdx.x; float dot_this_row = 0.0f; if (idx < x.size) { const int blk_idx = idx / 6; //Block matrix vector product float p_row = 0.0; float mat_value, r_value; for(auto j = 0; j < 6; j++) { mat_value = inv_diag_blks[6 * idx + j]; r_value = r[6 * blk_idx + j] - alpha_thread * q[6 * blk_idx + j]; p_row += mat_value * r_value; } p[idx] = p_row; //p <- M_inv * r //float r_row = r[idx]; //float q_row = q[idx]; const float r_row_new = r[idx] - alpha_thread * q[idx]; t[idx] = r_row_new; // t <- r - alpha * q x[idx] += alpha_thread * s[idx]; // x <- x + alpha s dot_this_row = p_row * r_row_new; } //Perform in block reduction on dot(q, s) scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; __syncthreads(); //MAYBE NEEDED if (warp_id == 0) { float warp_dot_reduce = 0.0f; if (lane_id < num_warps) { warp_dot_reduce = warp_dot[lane_id]; } //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); //Store to global memory if (lane_id == 31) reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; } } /** * \brief nu_new <- dot(t, p); beta <- nu_new/nu_old; s <- p + beta s */ __global__ void block6x6PCGKernel_2( const PtrSz<const float> p, PtrSz<float> s ) { //Each block perform a reduce to compute beta __shared__ float beta; const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; if(warp_id == 0) { float dot_reduce = 0.0f; if(lane_id < num_reduce_blocks_6x6) { dot_reduce = reduce_partials_blk6x6[lane_id]; } dot_reduce = warp_scan(dot_reduce); if(lane_id == 31) { if(blockIdx.x == 0) nu_new_blk6x6 = dot_reduce; beta = dot_reduce / nu_old_blk6x6; //Debug code: seems correct //printf("Beta from device %f \n", beta); } } //Do sync to broadcast the value of beta __syncthreads(); const float beta_thread = beta; const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < p.size) { s[idx] = p[idx] + beta_thread * s[idx]; } } /** * \brief nu_new <- dot(t, p); beta <- nu_new/nu_old; s <- p + beta s */ template<int num_warps = reduce_block_warps> __global__ void block6x6PCGKernelAlphaBeta( const PtrSz<const float> r, const PtrSz<const float> q, const PtrSz<const float> inv_diag_blks, PtrSz<float> s, PtrSz<float> x, PtrSz<float> t, PtrSz<float> p ) { //Each block performs a reduction for alpha = dot(q, s) __shared__ float alpha; __shared__ float beta; const int warp_id = threadIdx.x >> 5; const int lane_id = threadIdx.x & 31; float scanned_dot; //Perform reduction on warp_0 if (warp_id == 0) { scanned_dot = 0.0f; if (lane_id < num_reduce_blocks_6x6) { scanned_dot = reduce_partials_blk6x6[lane_id]; } scanned_dot = warp_scan(scanned_dot); if (lane_id == 31) { alpha = nu_new_blk6x6 / scanned_dot; } } //Do sync to broadcast alpha __syncthreads(); const float alpha_thread = alpha; const int idx = threadIdx.x + blockDim.x * blockIdx.x; float dot_this_row = 0.0f; if (idx < x.size) { const int blk_idx = idx / 6; //Block matrix vector product float p_row = 0.0; float mat_value, r_value; for(auto j = 0; j < 6; j++) { mat_value = inv_diag_blks[6 * idx + j]; r_value = r[6 * blk_idx + j] - alpha_thread * q[6 * blk_idx + j]; p_row += mat_value * r_value; } p[idx] = p_row; //p <- M_inv * r const float r_row_new = r[idx] - alpha_thread * q[idx]; t[idx] = r_row_new; // t <- r - alpha * q x[idx] += alpha_thread * s[idx]; // x <- x + alpha s dot_this_row = p_row * r_row_new; } //Perform in block reduction on dot(q, s) scanned_dot = dot_this_row; scanned_dot = warp_scan(scanned_dot); //Store the reduced warp_dot to shared memory for block scan __shared__ float warp_dot[num_warps]; if (lane_id == 31) warp_dot[warp_id] = scanned_dot; if (warp_id == 0) { float warp_dot_reduce = 0.0f; float dot_reduce = 0.0f; if (lane_id < num_warps) { warp_dot_reduce = warp_dot[lane_id]; } if(lane_id < num_reduce_blocks_6x6) { dot_reduce = reduce_partials_blk6x6[lane_id]; } //Do warp scan again warp_dot_reduce = warp_scan(warp_dot_reduce); dot_reduce = warp_scan(dot_reduce); //Store to global memory if (lane_id == 31) { reduce_partials_blk6x6[blockIdx.x] = warp_dot_reduce; if(blockIdx.x == 0) nu_new_blk6x6 = dot_reduce; beta = dot_reduce / nu_old_blk6x6; } } //Do sync to broadcast the value of beta __syncthreads(); const float beta_thread = beta; if(idx < p.size) { s[idx] = p[idx] + beta_thread * s[idx]; } } }; /* End of namespace device */ }; /* End of namespace surfelwarp */ void surfelwarp::block6x6_pcg_weber( const DeviceArray<float>& diag_blks, const DeviceArray<float>& A_data, const DeviceArray<int>& A_colptr, const DeviceArray<int>& A_rowptr, const DeviceArray<float>& b, DeviceArray<float>& x_buffer, DeviceArray<float>& inv_diag_blk_buffer, DeviceArray<float>& p_buffer, DeviceArray<float>& q_buffer, DeviceArray<float>& r_buffer, DeviceArray<float>& s_buffer, DeviceArray<float>& t_buffer, DeviceArray<float>& valid_x, int max_iters, cudaStream_t stream ) { //Correct the size of array size_t N = b.size(); DeviceArray<float> inv_diag_blks = DeviceArray<float>(inv_diag_blk_buffer.ptr(), diag_blks.size()); valid_x = DeviceArray<float>(x_buffer.ptr(), N); DeviceArray<float> p = DeviceArray<float>(p_buffer.ptr(), N); DeviceArray<float> q = DeviceArray<float>(q_buffer.ptr(), N); DeviceArray<float> r = DeviceArray<float>(r_buffer.ptr(), N); DeviceArray<float> s = DeviceArray<float>(s_buffer.ptr(), N); DeviceArray<float> t = DeviceArray<float>(t_buffer.ptr(), N); //Compute the inverse of diag blocks for pre-conditioning cudaSafeCall(cudaMemsetAsync(valid_x.ptr(), 0, sizeof(float) * valid_x.size(), stream)); block6x6_diag_inverse(diag_blks, inv_diag_blks, N / 6, stream); //The init kernel block6x6_init_kernel(b, inv_diag_blks, r, s, valid_x, stream); //The main loop for(auto i = 0; i < max_iters; i++) { block6x6_pcg_kernel_0(A_data, A_colptr, A_rowptr, s, q, stream); // block6x6_pcg_kernel_alphabeta(r, q, inv_diag_blks,s, valid_x, t, p, stream); block6x6_pcg_kernel_1(s, r, q, inv_diag_blks, valid_x, t, p, stream); block6x6_pcg_kernel_2(p, s, stream); r.swap(t); } //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } // Ignore, only used in checks and debug void surfelwarp::block6x6_pcg_weber( const DeviceArray<float>& diag_blks, const DeviceArray<float>& A_data, const DeviceArray<int>& A_colptr, const DeviceArray<int>& A_rowptr, const DeviceArray<float>& b, DeviceArray<float>& x_buffer, DeviceArray<float>& inv_diag_blk_buffer, DeviceArray<float>& p_buffer, DeviceArray<float>& q_buffer, DeviceArray<float>& r_buffer, DeviceArray<float>& s_buffer, cudaTextureObject_t s_texture, DeviceArray<float>& t_buffer, DeviceArray<float>& valid_x, int max_iters, cudaStream_t stream ) { //Correct the size of array size_t N = b.size(); DeviceArray<float> inv_diag_blks = DeviceArray<float>(inv_diag_blk_buffer.ptr(), diag_blks.size()); valid_x = DeviceArray<float>(x_buffer.ptr(), N); DeviceArray<float> p = DeviceArray<float>(p_buffer.ptr(), N); DeviceArray<float> q = DeviceArray<float>(q_buffer.ptr(), N); DeviceArray<float> r = DeviceArray<float>(r_buffer.ptr(), N); DeviceArray<float> s = DeviceArray<float>(s_buffer.ptr(), N); DeviceArray<float> t = DeviceArray<float>(t_buffer.ptr(), N); //Compute the inverse of diag blocks for pre-conditioning block6x6_diag_inverse(diag_blks, inv_diag_blks, N / 6, stream); //The init kernel block6x6_init_kernel(b, inv_diag_blks, r, s, valid_x, stream); //The main loop for (auto i = 0; i < max_iters; i++) { block6x6_pcg_kernel_0(A_data, A_colptr, A_rowptr, s_texture, q, stream); block6x6_pcg_kernel_1(s, r, q, inv_diag_blks, valid_x, t, p, stream); block6x6_pcg_kernel_2(p, s, stream); r.swap(t); } //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::block6x6_diag_inverse(const float * A, float * A_inversed, int num_matrix, cudaStream_t stream) { const int threads_per_blk = 64; dim3 blk(threads_per_blk); dim3 grid(divUp(num_matrix, blk.x)); device::matrix6x6InverseKernel<threads_per_blk><<<grid, blk, 0, stream>>>(A, A_inversed, num_matrix); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } /* r <- b; s <- inv_diag_blks; mu_new <- dot(r, s) */ void surfelwarp::block6x6_init_kernel( const DeviceArray<float>& b, const DeviceArray<float>& inv_diag_blks, DeviceArray<float>& r, DeviceArray<float>& s, DeviceArray<float>& x, cudaStream_t stream ) { dim3 blk(reduce_block_threads); //dim3 grid(divUp(b.size(), blk.x)); dim3 grid(num_reduce_blocks_6x6); device::block6x6InitKernel<<<grid, blk, 0, stream>>>(b, inv_diag_blks, r, s, x); //Perform a reduction on the global memory dim3 reduce_blk(32); dim3 reduce_grid(1); device::block6x6ReducePartialKernel<<<reduce_grid, reduce_blk, 0, stream>>>(); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } /* nu_old <- nu_new; q <- A s; alpha <- nu_old / dot(q, s); */ void surfelwarp::block6x6_pcg_kernel_0( const DeviceArray<float> &A_data, const DeviceArray<int> &A_colptr, const DeviceArray<int> &A_rowptr, const DeviceArray<float> &s, DeviceArray<float> &q, cudaStream_t stream ) { dim3 blk(reduce_block_threads); //dim3 grid(divUp(s.size(), blk.x)); dim3 grid(num_reduce_blocks_6x6); device::block6x6PCGKernel_0<<<grid, blk, 0, stream>>>(A_data, A_colptr, A_rowptr, s, q); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::block6x6_pcg_kernel_0( const DeviceArray<float>& A_data, const DeviceArray<int>& A_colptr, const DeviceArray<int>& A_rowptr, cudaTextureObject_t s, DeviceArray<float>& q, cudaStream_t stream ) { dim3 blk(reduce_block_threads); //dim3 grid(divUp(s.size(), blk.x)); dim3 grid(num_reduce_blocks_6x6); device::block6x6PCGKernel_0<<<grid, blk, 0, stream>>>(A_data, A_colptr, A_rowptr, s, q); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } /* alpha <- nu_new / dot(q, s); x <- x + alpha * s; * t <- r - alpha * q; p <- M_inv*t; nu_new <- dot(t, p) */ void surfelwarp::block6x6_pcg_kernel_1( const DeviceArray<float>& s, const DeviceArray<float>& r, const DeviceArray<float>& q, const DeviceArray<float>& inv_diag_blks, DeviceArray<float>& x, DeviceArray<float>& t, DeviceArray<float>& p, cudaStream_t stream ) { dim3 blk(reduce_block_threads); dim3 grid(num_reduce_blocks_6x6); device::block6x6PCGKernel_1<<<grid, blk, 0, stream>>>(s, r, q, inv_diag_blks, x, t, p); } /* alpha <- nu_new / dot(q, s); x <- x + alpha * s; * t <- r - alpha * q; p <- M_inv*t; nu_new <- dot(t, p) */ void surfelwarp::block6x6_pcg_kernel_alphabeta( const DeviceArray<float>& r, const DeviceArray<float>& q, const DeviceArray<float>& inv_diag_blks, DeviceArray<float>& s, DeviceArray<float>& x, DeviceArray<float>& t, DeviceArray<float>& p, cudaStream_t stream ) { dim3 blk(reduce_block_threads); dim3 grid(divUp(s.size(), blk.x)); // dim3 grid(num_reduce_blocks_6x6); device::block6x6PCGKernelAlphaBeta<<<grid, blk, 0, stream>>>(r, q, inv_diag_blks, s, x, t, p); } void surfelwarp::block6x6_pcg_kernel_2( const DeviceArray<float>& p, DeviceArray<float>& s, cudaStream_t stream ) { dim3 blk(256); dim3 grid(divUp(s.size(), blk.x)); device::block6x6PCGKernel_2<<<grid, blk, 0, stream>>>(p, s); } /** Below are the checking subroutines defined for 6x6 pcg solver */ void surfelwarp::checkBlock6x6Init( const std::vector<float> &b, const std::vector<float> &inv_diags, std::vector<float>& h_r, std::vector<float>& h_s ) { //Prepare the data DeviceArray<float> b_dev, d_inv_diags, r, s, x; b_dev.upload(b); d_inv_diags.upload(inv_diags); r.create(b_dev.size()); s.create(b_dev.size()); x.create(b_dev.size()); //Call the function block6x6_init_kernel(b_dev, d_inv_diags, r, s, x); //Check the value of dot product // cudaDeviceSynchronize(); r.download(h_r); s.download(h_s); float dot_value = 0; for(auto i = 0;i < h_s.size();i++){ dot_value += h_r[i] * h_s[i]; } //Frist check r == b assert(h_r.size() == b.size()); for(auto i = 0; i < b.size(); i++) { assert(std::abs(h_r[i] - b[i]) < 1e-4); } //Check s = inv_diag * b for(auto row = 0; row < b.size(); row++) { int blk_idx = row / 6; int inblk_offset = row % 6; int diag_offset = 36 * blk_idx; int diag_start_idx = diag_offset + 6 * inblk_offset; float s_row = 0.0f; for(auto j = 0; j < 6; j++) { s_row += inv_diags[diag_start_idx + j] * b[6 * blk_idx + j]; } assert(std::abs(s_row - h_s[row]) < 1e-4); } //Compare it with device value float dot_device; cudaMemcpyFromSymbol(&dot_device, device::nu_new_blk6x6, sizeof(float), 0, cudaMemcpyDeviceToHost); if(std::abs((dot_device - dot_value) / dot_value) > 1e-6) { std::cout << "Relative err in init kernel dot product " << std::abs((dot_device - dot_value) / dot_value) << std::endl; } } void surfelwarp::checkBlock6x6Init( const std::vector<float> &b, const std::vector<float> &inv_diags ) { std::vector<float> r, s; checkBlock6x6Init(b, inv_diags, r, s); } void surfelwarp::checkBlock6x6Kernel_0( const std::vector<float> &A_data, const std::vector<int> &A_rowptr, const std::vector<int> &A_colptr, const std::vector<float> &s, //Output for later checking std::vector<float>& q_device ) { //Prepare the data DeviceArray<float> d_A_data, s_dev, q_dev; DeviceArray<int> d_A_rowptr, d_A_colptr; d_A_data.upload(A_data); s_dev.upload(s); q_dev.create(s.size()); d_A_colptr.upload(A_colptr); d_A_rowptr.upload(A_rowptr); //Call device function block6x6_pcg_kernel_0(d_A_data, d_A_colptr, d_A_rowptr, s_dev, q_dev); //Perform matrix vector product on host const auto matrix_size = s.size(); std::vector<float> q_host; hostEigenSpMV(A_data, A_rowptr, A_colptr, matrix_size, s, q_host); //Check q = A s q_device.clear(); q_dev.download(q_device); float maximum_relative_err = 0.0f; assert(q_device.size() == q_host.size()); for(auto i = 0; i < q_host.size(); i++) { float host_value = q_host[i]; float device_value = q_device[i]; if(std::abs(host_value - device_value) > 1e-4) { if(std::abs((host_value - device_value) / host_value) > maximum_relative_err) { maximum_relative_err = std::abs((host_value - device_value) / host_value); } } } std::cout << "The maximum relative error in SpMV " << maximum_relative_err << std::endl; //Next check the value of dot product float dev_dot_reduce[max_reduce_blocks]; cudaMemcpyFromSymbol(dev_dot_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, cudaMemcpyDeviceToHost); float dev_dot = 0.0f; for(auto j = 0; j < num_reduce_blocks_6x6; j++) { dev_dot += dev_dot_reduce[j]; } //Compute the dot prodcut at host float h_dot = 0.0f; for(auto j = 0; j < q_host.size(); j++) { h_dot += q_host[j] * s[j]; } assert(std::abs((h_dot - dev_dot) / dev_dot) < 1e-4); } void surfelwarp::checkBlock6x6Kernel_1( const std::vector<float> &s, const std::vector<float> &r, const std::vector<float> &q, const std::vector<float> &inv_diag_blks, std::vector<float> &x, std::vector<float> &t, std::vector<float> &p ) { //Prepare data for input DeviceArray<float> s_dev, r_dev, q_dev, inv_diag_blks_dev, x_dev, t_dev, p_dev; s_dev.upload(s); r_dev.upload(r); q_dev.upload(q); inv_diag_blks_dev.upload(inv_diag_blks); x_dev.upload(x); t_dev.create(x_dev.size()); p_dev.create(x_dev.size()); //Compute dot product on host float dev_dot_reduce[max_reduce_blocks]; cudaMemcpyFromSymbol(dev_dot_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, cudaMemcpyDeviceToHost); float dev_dot = 0.0f; for (auto j = 0; j < num_reduce_blocks_6x6; j++) { dev_dot += dev_dot_reduce[j]; } float dot_s_q = 0.0f; for (int j = 0; j < q.size(); j++) { dot_s_q += q[j] * s[j]; } assert(std::abs((dot_s_q - dev_dot) / dev_dot) < 1e-4); //Download nu to compute alpha float nu_old_host, nu_new_host; cudaMemcpyFromSymbol(&nu_old_host, device::nu_old_blk6x6, sizeof(float), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&nu_new_host, device::nu_new_blk6x6, sizeof(float), 0, cudaMemcpyDeviceToHost); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); #endif assert(std::abs(nu_new_host - nu_old_host) < 1e-7); const float alpha = nu_old_host / dot_s_q; //The value of alpha is correct //std::cout << "Alpha from host " << alpha << std::endl; //Invoke the device version function block6x6_pcg_kernel_1(s_dev, r_dev, q_dev, inv_diag_blks_dev, x_dev, t_dev, p_dev); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); #endif //Check x <- x + alpha * s for (auto i = 0; i < x.size(); i++) { x[i] += alpha * s[i]; } std::vector<float> h_x_dev; x_dev.download(h_x_dev); assert(s.size() == x.size()); auto max_relative_err = maxRelativeError(h_x_dev, x); if(max_relative_err > 1e-5) { std::cout << "Max relative err for x <- x + alpha s is " << max_relative_err << std::endl; } //Check t <- r - alpha * q; t.resize(s.size()); std::vector<float> h_t_dev; t_dev.download(h_t_dev); for(auto j = 0;j < t.size(); j++) { t[j] = r[j] - alpha * q[j]; assert(std::abs(t[j] - h_t_dev[j]) < 1e-4); } //Check p <- M_inv*t; std::vector<float> h_p_dev; p_dev.download(h_p_dev); p.resize(x.size()); for (auto row = 0; row < t.size(); row++) { int blk_idx = row / 6; int inblk_offset = row % 6; int diag_offset = 36 * blk_idx; int diag_start_idx = diag_offset + 6 * inblk_offset; float p_row = 0.0f; for (auto j = 0; j < 6; j++) { p_row += inv_diag_blks[diag_start_idx + j] * t[6 * blk_idx + j]; } p[row] = p_row; } max_relative_err = maxRelativeError(h_p_dev, p, 1e-5); if(max_relative_err > 1e-5) { std::cout << "Relative error for p <- Minv t " << max_relative_err << std::endl; } //Check for nu_new <- dot(t, p) float dot_t_p = 0.0f; for(auto j = 0; j < p.size(); j++) { //dot_t_p += h_t_dev[j] * p[j]; dot_t_p += t[j] * p[j]; } //Download the result to host cudaMemcpyFromSymbol(dev_dot_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, cudaMemcpyDeviceToHost); dev_dot = 0.0f; for (auto j = 0; j < num_reduce_blocks_6x6; j++) { dev_dot += dev_dot_reduce[j]; } //Compare it assert(std::abs((dev_dot - dot_t_p) / dot_t_p) < 1e-4); } void surfelwarp::checkBlock6x6Kernel_2( const std::vector<float> &p, std::vector<float> &s ) { //Prepare for device input DeviceArray<float> p_dev, s_dev; assert(s.size() == p.size()); p_dev.upload(p); s_dev.upload(s); //Compute the beta at host float parital_reduce[max_reduce_blocks]; float nu_old_host; cudaMemcpyFromSymbol(&nu_old_host, device::nu_old_blk6x6, sizeof(float), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(parital_reduce, device::reduce_partials_blk6x6, sizeof(float) * max_reduce_blocks, 0, cudaMemcpyDeviceToHost); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); #endif float nu_new_host = 0.0f; for(auto j = 0; j < num_reduce_blocks_6x6; j++) { nu_new_host += parital_reduce[j]; } float beta = nu_new_host / nu_old_host; //Debug code, seems correct //std::cout << "Beta on host " << beta << std::endl; //Invoke the kernel block6x6_pcg_kernel_2(p_dev, s_dev); //Download the nu_new from device float nu_new_device; cudaMemcpyFromSymbol(&nu_new_device, device::nu_new_blk6x6, sizeof(float), 0, cudaMemcpyDeviceToHost); #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); #endif //Check that value: seems correct assert(std::abs((nu_new_host - nu_new_device) / nu_new_host) < 1e-4); //Check s <- p + beta s: seems correct std::vector<float> h_s_dev; s_dev.download(h_s_dev); for (auto i = 0; i < h_s_dev.size(); ++i) { s[i] = beta * s[i] + p[i]; } auto relative_err = maxRelativeError(s, h_s_dev); if(relative_err > 1e-4) { std::cout << "Max relative error in s <- p + beta s " << relative_err << std::endl; } }
9281ede1944b25f7bbb7aa3709c89fd5eaf58bef.hip
// !!! This is a file automatically generated by hipify!!! /** * \file rational_approximant.cu * \brief The generic initialization file for poles/hosts for RA based evaulation of the matrix exponential * * \author Nicholas Curtis * \date 03/09/2015 * * Contains initialization and declaration of RA */ //cf #include <hip/hip_complex.h> #include "header_hip.cuh" extern "C" { #include "cf.h" } #include "solver_options.cuh" #include "gpu_macros.cuh" __device__ __constant__ hipDoubleComplex poles[N_RA]; __device__ __constant__ hipDoubleComplex res[N_RA]; /** * \brief get poles and residues for rational approximant to matrix exponential */ void find_poles_and_residuals() { // get poles and residues for rational approximant to matrix exponential double *poles_r = (double *) calloc (N_RA, sizeof(double)); double *poles_i = (double *) calloc (N_RA, sizeof(double)); double *res_r = (double *) calloc (N_RA, sizeof(double)); double *res_i = (double *) calloc (N_RA, sizeof(double)); cf (N_RA, poles_r, poles_i, res_r, res_i); hipDoubleComplex polesHost[N_RA]; hipDoubleComplex resHost[N_RA]; for (int i = 0; i < N_RA; ++i) { polesHost[i] = make_cuDoubleComplex(poles_r[i], poles_i[i]); resHost[i] = make_cuDoubleComplex(res_r[i], res_i[i]); } // free memory free (poles_r); free (poles_i); free (res_r); free (res_i); //copy to GPU memory cudaErrorCheck( hipMemcpyToSymbol (poles, polesHost, N_RA * sizeof(hipDoubleComplex), 0, hipMemcpyHostToDevice) ); cudaErrorCheck( hipMemcpyToSymbol (res, resHost, N_RA * sizeof(hipDoubleComplex), 0, hipMemcpyHostToDevice) ); }
9281ede1944b25f7bbb7aa3709c89fd5eaf58bef.cu
/** * \file rational_approximant.cu * \brief The generic initialization file for poles/hosts for RA based evaulation of the matrix exponential * * \author Nicholas Curtis * \date 03/09/2015 * * Contains initialization and declaration of RA */ //cf #include <cuComplex.h> #include "header.cuh" extern "C" { #include "cf.h" } #include "solver_options.cuh" #include "gpu_macros.cuh" __device__ __constant__ cuDoubleComplex poles[N_RA]; __device__ __constant__ cuDoubleComplex res[N_RA]; /** * \brief get poles and residues for rational approximant to matrix exponential */ void find_poles_and_residuals() { // get poles and residues for rational approximant to matrix exponential double *poles_r = (double *) calloc (N_RA, sizeof(double)); double *poles_i = (double *) calloc (N_RA, sizeof(double)); double *res_r = (double *) calloc (N_RA, sizeof(double)); double *res_i = (double *) calloc (N_RA, sizeof(double)); cf (N_RA, poles_r, poles_i, res_r, res_i); cuDoubleComplex polesHost[N_RA]; cuDoubleComplex resHost[N_RA]; for (int i = 0; i < N_RA; ++i) { polesHost[i] = make_cuDoubleComplex(poles_r[i], poles_i[i]); resHost[i] = make_cuDoubleComplex(res_r[i], res_i[i]); } // free memory free (poles_r); free (poles_i); free (res_r); free (res_i); //copy to GPU memory cudaErrorCheck( cudaMemcpyToSymbol (poles, polesHost, N_RA * sizeof(cuDoubleComplex), 0, cudaMemcpyHostToDevice) ); cudaErrorCheck( cudaMemcpyToSymbol (res, resHost, N_RA * sizeof(cuDoubleComplex), 0, cudaMemcpyHostToDevice) ); }
392055559769cefaa46f6d5336d8b712f11586c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <random> using namespace std; // Matrices are stored in row-major order: // M(row, column) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float * elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Forward declaration of sequential CPU function void sequential_cpu(Matrix A, Matrix B, Matrix C); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMu(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y); hipEventRecord(start); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipEventRecord(stop); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << "Kernel call took " << milliseconds << " milliseconds" << endl; // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); /* hipEventRecord(start); sequential_cpu(A, B, C); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); cout << "Sequential CPU function call took " << milliseconds << " milliseconds" << endl; */ } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } // Sequential CPU version is given for comparison void sequential_cpu(Matrix A, Matrix B, Matrix C) { for(int i = 0; i < C.height; ++i) { for(int j = 0; j < C.width; ++j) { C.elements[i*C.width + j] = 0; for(int ac = 0; ac < A.width; ++ac) { for(int br = 0; br < B.height; ++br) { C.elements[i*C.width + j] += A.elements[i*A.width + ac]*B.elements[j + br*B.width]; } } } } } int main() { int n; size_t size; std::default_random_engine generator; std::uniform_real_distribution<float> distribution(-1.0,1.0); Matrix A; A.width = BLOCK_SIZE*150; A.height = BLOCK_SIZE*100; n = A.width * A.height; size = n * sizeof(float); A.elements = (float*)malloc(size); for(int i = 0; i < n; ++i) A.elements[i] = distribution(generator); Matrix B; B.width = BLOCK_SIZE*200; B.height = A.width; n = B.width * B.height; size = n * sizeof(float); B.elements = (float*)malloc(size); for(int i = 0; i < n; ++i) B.elements[i] = distribution(generator); Matrix C; C.width = B.width; C.height = A.height; n = C.width * C.height; size = n * sizeof(float); C.elements = (float*)malloc(size); for(int i = 0; i < 5; ++i) { printf("i=%d\n",i); MatMu(A, B, C); } }
392055559769cefaa46f6d5336d8b712f11586c8.cu
#include <iostream> #include <random> using namespace std; // Matrices are stored in row-major order: // M(row, column) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float * elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Forward declaration of sequential CPU function void sequential_cpu(Matrix A, Matrix B, Matrix C); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMu(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y); cudaEventRecord(start); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaEventRecord(stop); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << "Kernel call took " << milliseconds << " milliseconds" << endl; // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); /* cudaEventRecord(start); sequential_cpu(A, B, C); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); cout << "Sequential CPU function call took " << milliseconds << " milliseconds" << endl; */ } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } // Sequential CPU version is given for comparison void sequential_cpu(Matrix A, Matrix B, Matrix C) { for(int i = 0; i < C.height; ++i) { for(int j = 0; j < C.width; ++j) { C.elements[i*C.width + j] = 0; for(int ac = 0; ac < A.width; ++ac) { for(int br = 0; br < B.height; ++br) { C.elements[i*C.width + j] += A.elements[i*A.width + ac]*B.elements[j + br*B.width]; } } } } } int main() { int n; size_t size; std::default_random_engine generator; std::uniform_real_distribution<float> distribution(-1.0,1.0); Matrix A; A.width = BLOCK_SIZE*150; A.height = BLOCK_SIZE*100; n = A.width * A.height; size = n * sizeof(float); A.elements = (float*)malloc(size); for(int i = 0; i < n; ++i) A.elements[i] = distribution(generator); Matrix B; B.width = BLOCK_SIZE*200; B.height = A.width; n = B.width * B.height; size = n * sizeof(float); B.elements = (float*)malloc(size); for(int i = 0; i < n; ++i) B.elements[i] = distribution(generator); Matrix C; C.width = B.width; C.height = A.height; n = C.width * C.height; size = n * sizeof(float); C.elements = (float*)malloc(size); for(int i = 0; i < 5; ++i) { printf("i=%d\n",i); MatMu(A, B, C); } }
8978df62d724664f41e10c7d56dfb8cc78b202c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CUB_HALF_OPTIMIZATION 1 #include <benchmark/benchmark.h> #include <type_traits> #include <utility> #include "init/init.hpp" #include "reduction/args.hpp" #include "utils/utils.hpp" #include "kernel_hip.cuh" #include <hipcub/hipcub.hpp> using namespace wmma_reduction; template <size_t BASE_SEGMENT_SIZE, size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void tryCUDA_WMMA_FULL_REDUCTION_2KERS(benchmark::State &state) { const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; const size_t num_elements = state.range(0); const size_t num_segments = (num_elements + SEGMENT_SIZE - 1) / SEGMENT_SIZE; if (num_elements % SEGMENT_SIZE) { state.SkipWithError("num_elements must be multiples of SEGMENT_SIZE"); return; } half *d_in_fp16 = nullptr; half *d_out = nullptr; half *d_temp_out = nullptr; PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(hipMalloc(&d_out, 1 * sizeof(half))); PRINT_IF_ERROR(hipMalloc(&d_temp_out, num_segments * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; PRINT_IF_ERROR(hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_temp_out, d_out, num_segments)); PRINT_IF_ERROR(hipMalloc(&d_temp_storage, temp_storage_bytes)); hipEvent_t start, stop; PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); defer(hipEventDestroy(start)); defer(hipEventDestroy(stop)); try { for (auto _ : state) { PRINT_IF_ERROR(hipEventRecord(start)); switch (BASE_SEGMENT_SIZE) { case 16: hipLaunchKernelGGL(( compute_wmma_segmented_reduction_16n<SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM>) , dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_temp_out, num_segments); break; case 256: hipLaunchKernelGGL(( compute_wmma_segmented_reduction_256n<half, SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM>) , dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_temp_out, num_segments); break; default: static_assert(true, "only 16 and 256 base segment sizes are support"); } #if 1 hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_temp_out, d_out, num_segments); #else hipLaunchKernelGGL(( compute_wmma_warp_reduction<1, BLOCK_DIM>) , dim3(1), dim3(WARP_SIZE), 0, 0, d_temp_out, d_out, 1, num_segments); #endif PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_elements", num_elements}, {"num_segments", num_segments}, {"segment_size", SEGMENT_SIZE}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_elements, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half h_out; PRINT_IF_ERROR( hipMemcpy(&h_out, d_out, 1 * sizeof(half), hipMemcpyDeviceToHost)); int errors = 0; float correct_sum = 0; for (int i = 0; i < num_elements; i++) { correct_sum += h_in[i]; } if (fabs(half_to_float(h_out) - correct_sum) > 0.001) { errors++; printf("Expected Reuction = %f, got h_out = %f\n", correct_sum, half_to_float(h_out)); } if (errors > 0) { printf("CUDA_WMMA_FULL_REDUCTION does not agree with SEQUENTIAL! %d " "errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } #endif hipFree(d_in_fp16); hipFree(d_out); hipFree(d_temp_out); hipFree(d_temp_storage); } catch (...) { hipFree(d_in_fp16); hipFree(d_out); hipFree(d_temp_out); hipFree(d_temp_storage); hipDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <size_t BASE_SEGMENT_SIZE, size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_2KERS(benchmark::State &state) { hipDeviceReset(); try { tryCUDA_WMMA_FULL_REDUCTION_2KERS<BASE_SEGMENT_SIZE, SEGMENT_SIZE, WARPS_PER_BLOCK>( state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_16(benchmark::State &state) { CUDA_WMMA_FULL_REDUCTION_2KERS<16, SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_256(benchmark::State &state) { CUDA_WMMA_FULL_REDUCTION_2KERS<256, SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } #define BENCHMARK_REDUCTION0(SEGMENT_SIZE, WARPS_PER_BLOCK) \ BENCHMARK_TEMPLATE(CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_16, SEGMENT_SIZE, \ WARPS_PER_BLOCK) \ ->ARGS() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_256, SEGMENT_SIZE, \ WARPS_PER_BLOCK) \ ->ARGS() \ ->UseManualTime() #define BENCHMARK_REDUCTION(SEGMENT_SIZE) \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 1); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 2); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 4); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 8); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 16) BENCHMARK_REDUCTION(256); BENCHMARK_REDUCTION(2 * 256); BENCHMARK_REDUCTION(4 * 256); BENCHMARK_REDUCTION(8 * 256); BENCHMARK_REDUCTION(16 * 256); // BENCHMARK_REDUCTION(32 * 256); // BENCHMARK_REDUCTION(64 * 256); /* BENCHMARK_REDUCTION(128 * 256); */ /* BENCHMARK_REDUCTION(256 * 256); */ /* BENCHMARK_REDUCTION(512 * 256); */ /* BENCHMARK_REDUCTION(1024 * 256); */
8978df62d724664f41e10c7d56dfb8cc78b202c0.cu
#define CUB_HALF_OPTIMIZATION 1 #include <benchmark/benchmark.h> #include <type_traits> #include <utility> #include "init/init.hpp" #include "reduction/args.hpp" #include "utils/utils.hpp" #include "kernel.cuh" #include <cub/cub.cuh> using namespace wmma_reduction; template <size_t BASE_SEGMENT_SIZE, size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void tryCUDA_WMMA_FULL_REDUCTION_2KERS(benchmark::State &state) { const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; const size_t num_elements = state.range(0); const size_t num_segments = (num_elements + SEGMENT_SIZE - 1) / SEGMENT_SIZE; if (num_elements % SEGMENT_SIZE) { state.SkipWithError("num_elements must be multiples of SEGMENT_SIZE"); return; } half *d_in_fp16 = nullptr; half *d_out = nullptr; half *d_temp_out = nullptr; PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(cudaMalloc(&d_out, 1 * sizeof(half))); PRINT_IF_ERROR(cudaMalloc(&d_temp_out, num_segments * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; PRINT_IF_ERROR(cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_temp_out, d_out, num_segments)); PRINT_IF_ERROR(cudaMalloc(&d_temp_storage, temp_storage_bytes)); cudaEvent_t start, stop; PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); defer(cudaEventDestroy(start)); defer(cudaEventDestroy(stop)); try { for (auto _ : state) { PRINT_IF_ERROR(cudaEventRecord(start)); switch (BASE_SEGMENT_SIZE) { case 16: compute_wmma_segmented_reduction_16n<SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM> <<<gridDim, blockDim>>>(d_in_fp16, d_temp_out, num_segments); break; case 256: compute_wmma_segmented_reduction_256n<half, SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM> <<<gridDim, blockDim>>>(d_in_fp16, d_temp_out, num_segments); break; default: static_assert(true, "only 16 and 256 base segment sizes are support"); } #if 1 cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_temp_out, d_out, num_segments); #else compute_wmma_warp_reduction<1, BLOCK_DIM> <<<1, WARP_SIZE>>>(d_temp_out, d_out, 1, num_segments); #endif PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_elements", num_elements}, {"num_segments", num_segments}, {"segment_size", SEGMENT_SIZE}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_elements, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half h_out; PRINT_IF_ERROR( cudaMemcpy(&h_out, d_out, 1 * sizeof(half), cudaMemcpyDeviceToHost)); int errors = 0; float correct_sum = 0; for (int i = 0; i < num_elements; i++) { correct_sum += h_in[i]; } if (fabs(half_to_float(h_out) - correct_sum) > 0.001) { errors++; printf("Expected Reuction = %f, got h_out = %f\n", correct_sum, half_to_float(h_out)); } if (errors > 0) { printf("CUDA_WMMA_FULL_REDUCTION does not agree with SEQUENTIAL! %d " "errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } #endif cudaFree(d_in_fp16); cudaFree(d_out); cudaFree(d_temp_out); cudaFree(d_temp_storage); } catch (...) { cudaFree(d_in_fp16); cudaFree(d_out); cudaFree(d_temp_out); cudaFree(d_temp_storage); cudaDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <size_t BASE_SEGMENT_SIZE, size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_2KERS(benchmark::State &state) { cudaDeviceReset(); try { tryCUDA_WMMA_FULL_REDUCTION_2KERS<BASE_SEGMENT_SIZE, SEGMENT_SIZE, WARPS_PER_BLOCK>( state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_16(benchmark::State &state) { CUDA_WMMA_FULL_REDUCTION_2KERS<16, SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_256(benchmark::State &state) { CUDA_WMMA_FULL_REDUCTION_2KERS<256, SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } #define BENCHMARK_REDUCTION0(SEGMENT_SIZE, WARPS_PER_BLOCK) \ BENCHMARK_TEMPLATE(CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_16, SEGMENT_SIZE, \ WARPS_PER_BLOCK) \ ->ARGS() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_WMMA_FULL_REDUCTION_2KERS_BASE_256, SEGMENT_SIZE, \ WARPS_PER_BLOCK) \ ->ARGS() \ ->UseManualTime() #define BENCHMARK_REDUCTION(SEGMENT_SIZE) \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 1); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 2); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 4); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 8); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 16) BENCHMARK_REDUCTION(256); BENCHMARK_REDUCTION(2 * 256); BENCHMARK_REDUCTION(4 * 256); BENCHMARK_REDUCTION(8 * 256); BENCHMARK_REDUCTION(16 * 256); // BENCHMARK_REDUCTION(32 * 256); // BENCHMARK_REDUCTION(64 * 256); /* BENCHMARK_REDUCTION(128 * 256); */ /* BENCHMARK_REDUCTION(256 * 256); */ /* BENCHMARK_REDUCTION(512 * 256); */ /* BENCHMARK_REDUCTION(1024 * 256); */
a2bceff43508d4200ed86bb4ce62c384a2ec5ff0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <cstdio> #include <cstring> #include <hiprand/hiprand_kernel.h> #include <unistd.h> #include "debug.h" #include "types.h" #include "EasyBMP.h" #define ENABLE_CHECK // Round a / b to nearest higher integer value inline int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // Align a to nearest higher multiple of b inline int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } static void generate_scene(t_sphere * spheres, int n_spheres, t_light * lights, int n_lights) { int n_random_coord = n_spheres * 3 + n_lights * 3; int n_random_rad = n_spheres; int n_random_colors = n_spheres * 3; size_t n = n_random_coord + n_random_rad + n_random_colors; hiprandGenerator_t gen; float *devData, *hostData; hostData = (float *)calloc(n, sizeof(float)); if (!hostData) { fprintf(stderr, "Malloc error, exiting\n"); exit(-1); } CUDA_CALL( hipMalloc((void **)&devData, n*sizeof(float)) ); CURAND_CALL( hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT) ); CURAND_CALL( hiprandSetPseudoRandomGeneratorSeed(gen, (unsigned long long)time(NULL)) ); CURAND_CALL( hiprandGenerateUniform(gen, devData, n) ); CUDA_CALL( hipMemcpy(hostData, devData, n * sizeof(float), hipMemcpyDeviceToHost) ); float x_pos = 0.9f; float y_pos = BOX_SIZE / 5.0; for (int i = 0; i < n_spheres; i++) { spheres[i].center.x = x_pos; spheres[i].center.y = y_pos; x_pos += BOX_SIZE / (n_spheres / 2.0); if ( x_pos > BOX_SIZE - 0.9) { x_pos = 0.9f; y_pos = BOX_SIZE / 2.5 ; } } int j = 0; for (int i = 0; i < n_spheres; i++) { spheres[i].center.x += 2.0 * (hostData[j++] - 0.5); spheres[i].center.y += 2.0 * (hostData[j++] - 0.5); spheres[i].center.z = hostData[j++] * BOX_SIZE_Z + DISTANCE; spheres[i].radius = hostData[j++] * RADIUS_MAX + RADIUS_MIN; spheres[i].red = hostData[j++] / (DEPTH_MAX - 3); spheres[i].green = hostData[j++] / (DEPTH_MAX - 3); spheres[i].blue = hostData[j++] / (DEPTH_MAX - 3); } for (int i = 0; i < n_lights; i++) { lights[i].x = (hostData[j++] - 0.5) * BOX_SIZE * 6; lights[i].y = (hostData[j++] - 0.5) * BOX_SIZE * 6; lights[i].z = hostData[j++] * DISTANCE/2.0; } CURAND_CALL( hiprandDestroyGenerator(gen) ); CUDA_CALL( hipFree(devData) ); free(hostData); } __global__ void kernel( unsigned char * dev_image_red, unsigned char * dev_image_blue, unsigned char * dev_image_green, int height, int width, t_sphere * spheres, int n_spheres, t_light * lights, int n_lights); static void ray_trace( unsigned char * pR, unsigned char * pG, unsigned char * pB, int height, int width, int n_spheres, int n_lights, char** values) { //#define STACK_INCREASE #ifdef STACK_INCREASE size_t stack=0; CUDA_CALL( hipDeviceGetLimit(&stack, hipLimitStackSize) ); printf ("Cuda stack size %ld \n", stack); stack = 1536; printf ("Setting cuda stack size to %ld \n", stack); CUDA_CALL( hipDeviceSetLimit(hipLimitStackSize, stack) ); #endif //hipDeviceSetCacheConfig(hipFuncCachePreferL1); //hipDeviceSetCacheConfig(hipFuncCachePreferShared); t_sphere * spheres = (t_sphere *) malloc (sizeof(t_sphere) * n_spheres); t_light * lights = (t_light *) malloc (sizeof(t_light) * n_lights); if (lights == NULL || spheres == NULL) { fprintf(stderr, "Malloc error, exiting\n"); exit(-1); } if (!values) generate_scene(spheres, n_spheres, lights, n_lights); else { // Parse scene from the command line. char** value = values; value++; // skip n_spheres for (int i = 0; i < n_spheres; i++) { spheres[i].center.x = atof(*(value++)); spheres[i].center.y = atof(*(value++)); spheres[i].center.z = atof(*(value++)); spheres[i].radius = atof(*(value++)); spheres[i].red = atof(*(value++)); spheres[i].green = atof(*(value++)); spheres[i].blue = atof(*(value++)); } value++; // skip n_lights for (int i = 0; i < n_lights; i++) { lights[i].x = atof(*(value++)); lights[i].y = atof(*(value++)); lights[i].z = atof(*(value++)); } } #ifdef DEBUG print_spheres(spheres, n_spheres); print_lights(lights, n_lights); #endif t_sphere * dev_spheres; t_light * dev_lights; CUDA_CALL( hipMalloc((void **)&dev_spheres, sizeof(t_sphere) * n_spheres ) ); CUDA_CALL( hipMalloc((void **)&dev_lights, sizeof(t_light) * n_lights ) ); CUDA_CALL( hipMemcpy(dev_spheres, spheres, sizeof(t_sphere) * n_spheres, hipMemcpyHostToDevice) ); CUDA_CALL( hipMemcpy(dev_lights, lights, sizeof(t_light) * n_lights, hipMemcpyHostToDevice) ); unsigned char * dev_image_red; unsigned char * dev_image_green; unsigned char * dev_image_blue; CUDA_CALL( hipMalloc((void **)&dev_image_red, height * width *sizeof(unsigned char)) ); CUDA_CALL( hipMalloc((void **)&dev_image_green, height * width *sizeof(unsigned char)) ); CUDA_CALL( hipMalloc((void **)&dev_image_blue, height * width *sizeof(unsigned char)) ); CUDA_CALL( hipMemset(dev_image_red, 0, height * width *sizeof(unsigned char)) ); CUDA_CALL( hipMemset(dev_image_green, 0, height * width *sizeof(unsigned char)) ); CUDA_CALL( hipMemset(dev_image_blue, 0, height * width *sizeof(unsigned char)) ); dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dim3 grid(iDivUp(width, block.x), iDivUp(height, block.y), 1); #ifdef DEBUG printf ("Running kernel with block.x = %d block.y = %d \n", block.x, block.y); printf ("Running kernel with grid.x = %d grid.y = %d \n", grid.x, grid.y); #endif hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block), 0, 0, dev_image_red, dev_image_blue, dev_image_green, height, width, dev_spheres, n_spheres, dev_lights, n_lights); CUDA_CALL( hipGetLastError() ); CUDA_CALL( hipMemcpy(pR, dev_image_red, height * width *sizeof(unsigned char), hipMemcpyDeviceToHost) ); CUDA_CALL( hipMemcpy(pB, dev_image_blue, height * width *sizeof(unsigned char), hipMemcpyDeviceToHost) ); CUDA_CALL( hipMemcpy(pG, dev_image_green,height * width *sizeof(unsigned char), hipMemcpyDeviceToHost) ); CUDA_CALL( hipFree(dev_image_red) ); CUDA_CALL( hipFree(dev_image_green) ); CUDA_CALL( hipFree(dev_image_blue) ); CUDA_CALL( hipFree(dev_spheres) ); CUDA_CALL( hipFree(dev_lights) ); free (spheres); free (lights); } int main( int argc, char* argv[] ) { bool randomScene = false; if (argc > 1) if ((std::string)argv[1] == "random") randomScene = true; if ((randomScene && (argc != 7)) || (argc == 1)) { printf("Usage: %s random <n_spheres> <n_lights> <width> <height> <bmp_filename> \n", argv[0]); return -1; } int n_spheres, n_lights, width, height; char* filename; if (randomScene) { n_spheres = atoi(argv[2]); n_lights = atoi(argv[3]); width = atoi(argv[4]); height = atoi(argv[5]); filename = argv[6]; } else { char** arg = &argv[1]; bool failed = true; do { if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; n_spheres = atoi(*(arg++)); arg += n_spheres * 7; if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; n_lights = atoi(*(arg++)); arg += n_lights * 3; if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; width = atoi(*(arg++)); if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; height = atoi(*(arg++)); if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; filename = *arg; failed = false; } while (0); if (failed) { fprintf(stderr, "Parsing failed: not enough arguments\n"); exit(1); } } #ifdef ENABLE_CHECK if (n_spheres < 5 || n_spheres > 10) { printf ("n_spheres is out of range [5:10]\n"); return -1; } #endif #ifdef ENABLE_CHECK if (n_lights < 1 || n_lights > 2) { printf ("n_lights is out of range [1:2]\n"); return -1; } #endif #ifdef ENABLE_CHECK if (width < 800 || width > 1920) { printf ("width is out of range [800:1920]\n"); return -1; } #endif #ifdef ENABLE_CHECK if (height < 600 || height > 1080) { printf ("height is out of range [600:1080]\n"); return -1; } #endif #ifdef DEBUG printf ("Picture size is width = %d height = %d \n", width, height); #endif hipEvent_t start = 0, stop = 0; CUDA_CALL (hipEventCreate (&start) ); CUDA_CALL (hipEventCreate (&stop) ); CUDA_CALL( hipEventRecord (start, 0) ); unsigned char * pR = (unsigned char *) malloc( height*width ); unsigned char * pG = (unsigned char *) malloc( height*width ); unsigned char * pB = (unsigned char *) malloc( height*width ); if ( pR == NULL || pG == NULL || pB == NULL) { fprintf(stderr, "Malloc error, exiting\n"); return -1; } // Pass down spheres/lights, if specific scene is given in command line. char** values = NULL; if (!randomScene) values = &argv[1]; ray_trace(pR, pG, pB, height, width, n_spheres, n_lights, values); CUDA_CALL( hipEventRecord (stop, 0) ); CUDA_CALL( hipEventSynchronize(stop) ); float gpuTime = 0.0f; CUDA_CALL( hipEventElapsedTime (&gpuTime, start, stop) ); printf("CUDA ray tracing time: %.2f milliseconds\n", gpuTime); CUDA_CALL( hipEventDestroy (start) ); CUDA_CALL( hipEventDestroy (stop) ); BMP AnImage; AnImage.SetSize(width, height); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { RGBApixel pixel; pixel.Red = pR [ j * width + i ]; pixel.Green = pG [ j * width + i ]; pixel.Blue = pB [ j * width + i ]; AnImage.SetPixel( i , j , pixel ); } } AnImage.WriteToFile(filename); free(pR); free(pG); free(pB); return 0; }
a2bceff43508d4200ed86bb4ce62c384a2ec5ff0.cu
#include <cstdlib> #include <cstdio> #include <cstring> #include <curand_kernel.h> #include <unistd.h> #include "debug.h" #include "types.h" #include "EasyBMP.h" #define ENABLE_CHECK // Round a / b to nearest higher integer value inline int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // Align a to nearest higher multiple of b inline int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } static void generate_scene(t_sphere * spheres, int n_spheres, t_light * lights, int n_lights) { int n_random_coord = n_spheres * 3 + n_lights * 3; int n_random_rad = n_spheres; int n_random_colors = n_spheres * 3; size_t n = n_random_coord + n_random_rad + n_random_colors; curandGenerator_t gen; float *devData, *hostData; hostData = (float *)calloc(n, sizeof(float)); if (!hostData) { fprintf(stderr, "Malloc error, exiting\n"); exit(-1); } CUDA_CALL( cudaMalloc((void **)&devData, n*sizeof(float)) ); CURAND_CALL( curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT) ); CURAND_CALL( curandSetPseudoRandomGeneratorSeed(gen, (unsigned long long)time(NULL)) ); CURAND_CALL( curandGenerateUniform(gen, devData, n) ); CUDA_CALL( cudaMemcpy(hostData, devData, n * sizeof(float), cudaMemcpyDeviceToHost) ); float x_pos = 0.9f; float y_pos = BOX_SIZE / 5.0; for (int i = 0; i < n_spheres; i++) { spheres[i].center.x = x_pos; spheres[i].center.y = y_pos; x_pos += BOX_SIZE / (n_spheres / 2.0); if ( x_pos > BOX_SIZE - 0.9) { x_pos = 0.9f; y_pos = BOX_SIZE / 2.5 ; } } int j = 0; for (int i = 0; i < n_spheres; i++) { spheres[i].center.x += 2.0 * (hostData[j++] - 0.5); spheres[i].center.y += 2.0 * (hostData[j++] - 0.5); spheres[i].center.z = hostData[j++] * BOX_SIZE_Z + DISTANCE; spheres[i].radius = hostData[j++] * RADIUS_MAX + RADIUS_MIN; spheres[i].red = hostData[j++] / (DEPTH_MAX - 3); spheres[i].green = hostData[j++] / (DEPTH_MAX - 3); spheres[i].blue = hostData[j++] / (DEPTH_MAX - 3); } for (int i = 0; i < n_lights; i++) { lights[i].x = (hostData[j++] - 0.5) * BOX_SIZE * 6; lights[i].y = (hostData[j++] - 0.5) * BOX_SIZE * 6; lights[i].z = hostData[j++] * DISTANCE/2.0; } CURAND_CALL( curandDestroyGenerator(gen) ); CUDA_CALL( cudaFree(devData) ); free(hostData); } __global__ void kernel( unsigned char * dev_image_red, unsigned char * dev_image_blue, unsigned char * dev_image_green, int height, int width, t_sphere * spheres, int n_spheres, t_light * lights, int n_lights); static void ray_trace( unsigned char * pR, unsigned char * pG, unsigned char * pB, int height, int width, int n_spheres, int n_lights, char** values) { //#define STACK_INCREASE #ifdef STACK_INCREASE size_t stack=0; CUDA_CALL( cudaDeviceGetLimit(&stack, cudaLimitStackSize) ); printf ("Cuda stack size %ld \n", stack); stack = 1536; printf ("Setting cuda stack size to %ld \n", stack); CUDA_CALL( cudaDeviceSetLimit(cudaLimitStackSize, stack) ); #endif //cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); t_sphere * spheres = (t_sphere *) malloc (sizeof(t_sphere) * n_spheres); t_light * lights = (t_light *) malloc (sizeof(t_light) * n_lights); if (lights == NULL || spheres == NULL) { fprintf(stderr, "Malloc error, exiting\n"); exit(-1); } if (!values) generate_scene(spheres, n_spheres, lights, n_lights); else { // Parse scene from the command line. char** value = values; value++; // skip n_spheres for (int i = 0; i < n_spheres; i++) { spheres[i].center.x = atof(*(value++)); spheres[i].center.y = atof(*(value++)); spheres[i].center.z = atof(*(value++)); spheres[i].radius = atof(*(value++)); spheres[i].red = atof(*(value++)); spheres[i].green = atof(*(value++)); spheres[i].blue = atof(*(value++)); } value++; // skip n_lights for (int i = 0; i < n_lights; i++) { lights[i].x = atof(*(value++)); lights[i].y = atof(*(value++)); lights[i].z = atof(*(value++)); } } #ifdef DEBUG print_spheres(spheres, n_spheres); print_lights(lights, n_lights); #endif t_sphere * dev_spheres; t_light * dev_lights; CUDA_CALL( cudaMalloc((void **)&dev_spheres, sizeof(t_sphere) * n_spheres ) ); CUDA_CALL( cudaMalloc((void **)&dev_lights, sizeof(t_light) * n_lights ) ); CUDA_CALL( cudaMemcpy(dev_spheres, spheres, sizeof(t_sphere) * n_spheres, cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(dev_lights, lights, sizeof(t_light) * n_lights, cudaMemcpyHostToDevice) ); unsigned char * dev_image_red; unsigned char * dev_image_green; unsigned char * dev_image_blue; CUDA_CALL( cudaMalloc((void **)&dev_image_red, height * width *sizeof(unsigned char)) ); CUDA_CALL( cudaMalloc((void **)&dev_image_green, height * width *sizeof(unsigned char)) ); CUDA_CALL( cudaMalloc((void **)&dev_image_blue, height * width *sizeof(unsigned char)) ); CUDA_CALL( cudaMemset(dev_image_red, 0, height * width *sizeof(unsigned char)) ); CUDA_CALL( cudaMemset(dev_image_green, 0, height * width *sizeof(unsigned char)) ); CUDA_CALL( cudaMemset(dev_image_blue, 0, height * width *sizeof(unsigned char)) ); dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dim3 grid(iDivUp(width, block.x), iDivUp(height, block.y), 1); #ifdef DEBUG printf ("Running kernel with block.x = %d block.y = %d \n", block.x, block.y); printf ("Running kernel with grid.x = %d grid.y = %d \n", grid.x, grid.y); #endif kernel<<<grid,block>>>(dev_image_red, dev_image_blue, dev_image_green, height, width, dev_spheres, n_spheres, dev_lights, n_lights); CUDA_CALL( cudaGetLastError() ); CUDA_CALL( cudaMemcpy(pR, dev_image_red, height * width *sizeof(unsigned char), cudaMemcpyDeviceToHost) ); CUDA_CALL( cudaMemcpy(pB, dev_image_blue, height * width *sizeof(unsigned char), cudaMemcpyDeviceToHost) ); CUDA_CALL( cudaMemcpy(pG, dev_image_green,height * width *sizeof(unsigned char), cudaMemcpyDeviceToHost) ); CUDA_CALL( cudaFree(dev_image_red) ); CUDA_CALL( cudaFree(dev_image_green) ); CUDA_CALL( cudaFree(dev_image_blue) ); CUDA_CALL( cudaFree(dev_spheres) ); CUDA_CALL( cudaFree(dev_lights) ); free (spheres); free (lights); } int main( int argc, char* argv[] ) { bool randomScene = false; if (argc > 1) if ((std::string)argv[1] == "random") randomScene = true; if ((randomScene && (argc != 7)) || (argc == 1)) { printf("Usage: %s random <n_spheres> <n_lights> <width> <height> <bmp_filename> \n", argv[0]); return -1; } int n_spheres, n_lights, width, height; char* filename; if (randomScene) { n_spheres = atoi(argv[2]); n_lights = atoi(argv[3]); width = atoi(argv[4]); height = atoi(argv[5]); filename = argv[6]; } else { char** arg = &argv[1]; bool failed = true; do { if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; n_spheres = atoi(*(arg++)); arg += n_spheres * 7; if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; n_lights = atoi(*(arg++)); arg += n_lights * 3; if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; width = atoi(*(arg++)); if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; height = atoi(*(arg++)); if ((size_t)arg - (size_t)argv >= sizeof(char*) * argc) break; filename = *arg; failed = false; } while (0); if (failed) { fprintf(stderr, "Parsing failed: not enough arguments\n"); exit(1); } } #ifdef ENABLE_CHECK if (n_spheres < 5 || n_spheres > 10) { printf ("n_spheres is out of range [5:10]\n"); return -1; } #endif #ifdef ENABLE_CHECK if (n_lights < 1 || n_lights > 2) { printf ("n_lights is out of range [1:2]\n"); return -1; } #endif #ifdef ENABLE_CHECK if (width < 800 || width > 1920) { printf ("width is out of range [800:1920]\n"); return -1; } #endif #ifdef ENABLE_CHECK if (height < 600 || height > 1080) { printf ("height is out of range [600:1080]\n"); return -1; } #endif #ifdef DEBUG printf ("Picture size is width = %d height = %d \n", width, height); #endif cudaEvent_t start = 0, stop = 0; CUDA_CALL (cudaEventCreate (&start) ); CUDA_CALL (cudaEventCreate (&stop) ); CUDA_CALL( cudaEventRecord (start, 0) ); unsigned char * pR = (unsigned char *) malloc( height*width ); unsigned char * pG = (unsigned char *) malloc( height*width ); unsigned char * pB = (unsigned char *) malloc( height*width ); if ( pR == NULL || pG == NULL || pB == NULL) { fprintf(stderr, "Malloc error, exiting\n"); return -1; } // Pass down spheres/lights, if specific scene is given in command line. char** values = NULL; if (!randomScene) values = &argv[1]; ray_trace(pR, pG, pB, height, width, n_spheres, n_lights, values); CUDA_CALL( cudaEventRecord (stop, 0) ); CUDA_CALL( cudaEventSynchronize(stop) ); float gpuTime = 0.0f; CUDA_CALL( cudaEventElapsedTime (&gpuTime, start, stop) ); printf("CUDA ray tracing time: %.2f milliseconds\n", gpuTime); CUDA_CALL( cudaEventDestroy (start) ); CUDA_CALL( cudaEventDestroy (stop) ); BMP AnImage; AnImage.SetSize(width, height); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { RGBApixel pixel; pixel.Red = pR [ j * width + i ]; pixel.Green = pG [ j * width + i ]; pixel.Blue = pB [ j * width + i ]; AnImage.SetPixel( i , j , pixel ); } } AnImage.WriteToFile(filename); free(pR); free(pG); free(pB); return 0; }
3d22cddf20546653542628f340f3dc650d1d74bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "internal.h" // ind const uint32 IND_NTB = 512; #define devVecInd(exp) \ uint32 i = blockIdx.x * IND_NTB + threadIdx.x; \ if (i < n) { exp; } #define mtVecInd(devFunc, ...) \ hipGetLastError(); \ hipLaunchKernelGGL(( devFunc), dim3(divCeil(pX->nx, IND_NTB)), dim3(IND_NTB), 0, stream, \ pX->p, pX->nx, __VA_ARGS__); \ syncCheck(stream, pCode); __global__ void devVecAddScalar(float *x, uint32 n, float *y, float a) { devVecInd(y[i] = x[i] + a); } __export void mtVecAddScalar(MtTensor *pX, MtTensor *pY, float *pA, hipStream_t stream, int *pCode) { mtVecInd(devVecAddScalar, pY->p, *pA); } __global__ void devVecMulScalar(float *x, uint32 n, float *y, float a) { devVecInd(y[i] = a * x[i]); } __export void mtVecMulScalar(MtTensor *pX, MtTensor *pY, float *pA, hipStream_t stream, int *pCode) { mtVecInd(devVecMulScalar, pY->p, *pA); } __global__ void devVecMulAddScalar(float *x, uint32 n, float *y, float a, float b) { devVecInd(y[i] = a * x[i] + b); } __export void mtVecMulAddScalar(MtTensor *pX, MtTensor *pY, float *pA, float *pB, hipStream_t stream, int *pCode) { mtVecInd(devVecMulAddScalar, pY->p, *pA, *pB); } __global__ void devVecPowScalar(float *x, uint32 n, float *y, float a) { devVecInd(y[i] = __powf(x[i], a)); } __export void mtVecPowScalar(MtTensor *pX, MtTensor *pY, float *pA, hipStream_t stream, int *pCode) { mtVecInd(devVecPowScalar, pY->p, *pA); } __global__ void devVecPowMulScalar(float *x, uint32 n, float *y, float a, float b) { devVecInd(y[i] = __powf(x[i], a) * b); } __export void mtVecPowMulScalar(MtTensor *pX, MtTensor *pY, float *pA, float *pB, hipStream_t stream, int *pCode) { mtVecInd(devVecPowMulScalar, pY->p, *pA, *pB); } __global__ void devVecAddVec(float *x, uint32 n, float *y, float *z) { devVecInd(z[i] = x[i] + y[i]); } __export void mtVecAddVec(MtTensor *pX, MtTensor *pY, MtTensor *pZ, hipStream_t stream, int *pCode) { mtVecInd(devVecAddVec, pY->p, pZ->p); } __global__ void devVecSubVec(float *x, uint32 n, float *y, float *z) { devVecInd(z[i] = x[i] - y[i]); } __export void mtVecSubVec(MtTensor *pX, MtTensor *pY, MtTensor *pZ, hipStream_t stream, int *pCode) { mtVecInd(devVecSubVec, pY->p, pZ->p); } __global__ void devVecPatchMulVec(float *x, uint32 n, float *y, float *z) { devVecInd(z[i] = x[i] * y[i]); } __export void mtVecPatchMulVec(MtTensor *pX, MtTensor *pY, MtTensor *pZ, hipStream_t stream, int *pCode) { mtVecInd(devVecPatchMulVec, pY->p, pZ->p); } // acc const uint32 ACC_NTB = 512; const uint32 ACC_NPT = 8; __export void mtNewVecAccBuffer(MtTensor *pTen, buffer *pBuf, int *pCode) { newBuffer(divCeil(pTen->nx, ACC_NTB * ACC_NPT), pBuf, pCode); } #define devVecAcc(rtInit, rbExp, ...) \ uint32 itb = threadIdx.x; \ uint32 i = blockIdx.x * ACC_NTB * ACC_NPT + itb; \ float rt = (rtInit); \ for (uint32 j = 0; j < ACC_NPT && i < n; j++, i += ACC_NTB) { __VA_ARGS__; } \ __shared__ float rb[ACC_NTB]; \ rb[itb] = rt; \ __syncthreads(); \ for (uint32 i = ACC_NTB >> 1; i != 0; i >>= 1) { \ if (itb < i) { rbExp; } \ __syncthreads(); \ } \ if (0 == itb) r[blockIdx.x] = rb[0]; #define mtVecAcc(devFunc, rInit, rExp, ...) \ float *rg = (float *)buf; \ uint32 nb = divCeil(pX->nx, ACC_NTB * ACC_NPT); \ hipGetLastError(); \ hipLaunchKernelGGL(( devFunc), dim3(nb), dim3(ACC_NTB), 0, stream, pX->p, pX->nx, __VA_ARGS__); \ syncCheck(stream, pCode); \ float r = (rInit); \ for (uint32 i = 0; i < nb; i++) { rExp; } \ *pRes = r; __global__ void devVecSum(float *x, uint32 n, float *r) { devVecAcc(0, rb[itb] += rb[itb + i], rt += x[i]); } __export void mtVecSum(MtTensor *pX, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecSum, 0, r += rg[i], rg); } __global__ void devVecSquareSum(float *x, uint32 n, float *r) { float e; devVecAcc(0, rb[itb] += rb[itb + i], e = x[i], rt += e * e); } __export void mtVecSquareSum(MtTensor *pX, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecSquareSum, 0, r += rg[i], rg); } __global__ void devVecMin(float *x, uint32 n, float *r) { devVecAcc(NAN, rb[itb] = fminf(rb[itb], rb[itb + i]), rt = fminf(x[i], rt)); } __export void mtVecMin(MtTensor *pX, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecMin, NAN, r = fminf(rg[i], r), rg); } __global__ void devVecMax(float *x, uint32 n, float *r) { devVecAcc(NAN, rb[itb] = fmaxf(rb[itb], rb[itb + i]), rt = fmaxf(x[i], rt)); } __export void mtVecMax(MtTensor *pX, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecMax, NAN, r = fmaxf(rg[i], r), rg); } __global__ void devVecDot(float *x, uint32 n, float *y, float *r) { devVecAcc(0, rb[itb] += rb[itb + i], rt += x[i] * y[i]); } __export void mtVecDot(MtTensor *pX, MtTensor *pY, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecDot, 0, r += rg[i], pY->p, rg); } __global__ void devVecSumSquareSum(float *x, uint32 n, float *y, float *r) { float e; devVecAcc(0, rb[itb] += rb[itb + i], e = x[i] + y[i], rt += e * e); } __export void mtVecSumSquareSum(MtTensor *pX, MtTensor *pY, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecSumSquareSum, 0, r += rg[i], pY->p, rg); } __global__ void devVecDiffSquareSum(float *x, uint32 n, float *y, float *r) { float e; devVecAcc(0, rb[itb] += rb[itb + i], e = x[i] - y[i], rt += e * e); } __export void mtVecDiffSquareSum(MtTensor *pX, MtTensor *pY, buffer buf, hipStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecDiffSquareSum, 0, r += rg[i], pY->p, rg); }
3d22cddf20546653542628f340f3dc650d1d74bd.cu
#include "internal.h" // ind const uint32 IND_NTB = 512; #define devVecInd(exp) \ uint32 i = blockIdx.x * IND_NTB + threadIdx.x; \ if (i < n) { exp; } #define mtVecInd(devFunc, ...) \ cudaGetLastError(); \ devFunc<<<divCeil(pX->nx, IND_NTB), IND_NTB, 0, stream>>>( \ pX->p, pX->nx, __VA_ARGS__); \ syncCheck(stream, pCode); __global__ void devVecAddScalar(float *x, uint32 n, float *y, float a) { devVecInd(y[i] = x[i] + a); } __export void mtVecAddScalar(MtTensor *pX, MtTensor *pY, float *pA, cudaStream_t stream, int *pCode) { mtVecInd(devVecAddScalar, pY->p, *pA); } __global__ void devVecMulScalar(float *x, uint32 n, float *y, float a) { devVecInd(y[i] = a * x[i]); } __export void mtVecMulScalar(MtTensor *pX, MtTensor *pY, float *pA, cudaStream_t stream, int *pCode) { mtVecInd(devVecMulScalar, pY->p, *pA); } __global__ void devVecMulAddScalar(float *x, uint32 n, float *y, float a, float b) { devVecInd(y[i] = a * x[i] + b); } __export void mtVecMulAddScalar(MtTensor *pX, MtTensor *pY, float *pA, float *pB, cudaStream_t stream, int *pCode) { mtVecInd(devVecMulAddScalar, pY->p, *pA, *pB); } __global__ void devVecPowScalar(float *x, uint32 n, float *y, float a) { devVecInd(y[i] = __powf(x[i], a)); } __export void mtVecPowScalar(MtTensor *pX, MtTensor *pY, float *pA, cudaStream_t stream, int *pCode) { mtVecInd(devVecPowScalar, pY->p, *pA); } __global__ void devVecPowMulScalar(float *x, uint32 n, float *y, float a, float b) { devVecInd(y[i] = __powf(x[i], a) * b); } __export void mtVecPowMulScalar(MtTensor *pX, MtTensor *pY, float *pA, float *pB, cudaStream_t stream, int *pCode) { mtVecInd(devVecPowMulScalar, pY->p, *pA, *pB); } __global__ void devVecAddVec(float *x, uint32 n, float *y, float *z) { devVecInd(z[i] = x[i] + y[i]); } __export void mtVecAddVec(MtTensor *pX, MtTensor *pY, MtTensor *pZ, cudaStream_t stream, int *pCode) { mtVecInd(devVecAddVec, pY->p, pZ->p); } __global__ void devVecSubVec(float *x, uint32 n, float *y, float *z) { devVecInd(z[i] = x[i] - y[i]); } __export void mtVecSubVec(MtTensor *pX, MtTensor *pY, MtTensor *pZ, cudaStream_t stream, int *pCode) { mtVecInd(devVecSubVec, pY->p, pZ->p); } __global__ void devVecPatchMulVec(float *x, uint32 n, float *y, float *z) { devVecInd(z[i] = x[i] * y[i]); } __export void mtVecPatchMulVec(MtTensor *pX, MtTensor *pY, MtTensor *pZ, cudaStream_t stream, int *pCode) { mtVecInd(devVecPatchMulVec, pY->p, pZ->p); } // acc const uint32 ACC_NTB = 512; const uint32 ACC_NPT = 8; __export void mtNewVecAccBuffer(MtTensor *pTen, buffer *pBuf, int *pCode) { newBuffer(divCeil(pTen->nx, ACC_NTB * ACC_NPT), pBuf, pCode); } #define devVecAcc(rtInit, rbExp, ...) \ uint32 itb = threadIdx.x; \ uint32 i = blockIdx.x * ACC_NTB * ACC_NPT + itb; \ float rt = (rtInit); \ for (uint32 j = 0; j < ACC_NPT && i < n; j++, i += ACC_NTB) { __VA_ARGS__; } \ __shared__ float rb[ACC_NTB]; \ rb[itb] = rt; \ __syncthreads(); \ for (uint32 i = ACC_NTB >> 1; i != 0; i >>= 1) { \ if (itb < i) { rbExp; } \ __syncthreads(); \ } \ if (0 == itb) r[blockIdx.x] = rb[0]; #define mtVecAcc(devFunc, rInit, rExp, ...) \ float *rg = (float *)buf; \ uint32 nb = divCeil(pX->nx, ACC_NTB * ACC_NPT); \ cudaGetLastError(); \ devFunc<<<nb, ACC_NTB, 0, stream>>>(pX->p, pX->nx, __VA_ARGS__); \ syncCheck(stream, pCode); \ float r = (rInit); \ for (uint32 i = 0; i < nb; i++) { rExp; } \ *pRes = r; __global__ void devVecSum(float *x, uint32 n, float *r) { devVecAcc(0, rb[itb] += rb[itb + i], rt += x[i]); } __export void mtVecSum(MtTensor *pX, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecSum, 0, r += rg[i], rg); } __global__ void devVecSquareSum(float *x, uint32 n, float *r) { float e; devVecAcc(0, rb[itb] += rb[itb + i], e = x[i], rt += e * e); } __export void mtVecSquareSum(MtTensor *pX, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecSquareSum, 0, r += rg[i], rg); } __global__ void devVecMin(float *x, uint32 n, float *r) { devVecAcc(NAN, rb[itb] = fminf(rb[itb], rb[itb + i]), rt = fminf(x[i], rt)); } __export void mtVecMin(MtTensor *pX, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecMin, NAN, r = fminf(rg[i], r), rg); } __global__ void devVecMax(float *x, uint32 n, float *r) { devVecAcc(NAN, rb[itb] = fmaxf(rb[itb], rb[itb + i]), rt = fmaxf(x[i], rt)); } __export void mtVecMax(MtTensor *pX, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecMax, NAN, r = fmaxf(rg[i], r), rg); } __global__ void devVecDot(float *x, uint32 n, float *y, float *r) { devVecAcc(0, rb[itb] += rb[itb + i], rt += x[i] * y[i]); } __export void mtVecDot(MtTensor *pX, MtTensor *pY, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecDot, 0, r += rg[i], pY->p, rg); } __global__ void devVecSumSquareSum(float *x, uint32 n, float *y, float *r) { float e; devVecAcc(0, rb[itb] += rb[itb + i], e = x[i] + y[i], rt += e * e); } __export void mtVecSumSquareSum(MtTensor *pX, MtTensor *pY, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecSumSquareSum, 0, r += rg[i], pY->p, rg); } __global__ void devVecDiffSquareSum(float *x, uint32 n, float *y, float *r) { float e; devVecAcc(0, rb[itb] += rb[itb + i], e = x[i] - y[i], rt += e * e); } __export void mtVecDiffSquareSum(MtTensor *pX, MtTensor *pY, buffer buf, cudaStream_t stream, float *pRes, int *pCode) { mtVecAcc(devVecDiffSquareSum, 0, r += rg[i], pY->p, rg); }
9cd6a9474aa11d3884e8c2db5f00be7850e0ea29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* College: University of Massachusetts Lowell EECE 7110:High-Performance Comp. on GPUs Semester: Spring 2018 Student : 01639617 Project : Assignment_2 Professor : Dr.Hang Liu Due date: 2/12/2017 Authors : Sai Sri Devesh Kadambari */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> using namespace std; #define zero 0 __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { __shared__ int smem[250]; int row = blockIdx.x ; int tid = blockIdx.x * blockDim.x + threadIdx.x; int step = m/gridDim.x; //step=80 int index_begin = row * step; int index_end= (row+ 1) * step; //Block 0= 0->80 float f=(blockDim.x)/2;int k_b; __syncthreads(); //wait until all the threads in the block reach this point for(int i=(index_begin);i<index_end;i++) //Row=0->80 { smem[tid] =a[i * (blockDim.x)+tid] *b[tid]; //save multiplication value into the smem buffer __syncthreads(); //wait until all the threads reach this point for(int j=((blockDim.x)/2);j>0;j=ceilf(f)) //i=250/2 is 125->62.5(63)->(63-1)->(21)->20->(10)->(5)->(3)->(2)->(1) { k_b=2*f; if(((k_b)%2!=0) && (threadIdx.x == (j-1))) { smem[threadIdx.x -1]+=smem[threadIdx.x]; j=j-1; f=j; } if(threadIdx.x < j) { int temp =smem[threadIdx.x]+smem[threadIdx.x + j]; smem[threadIdx.x]=temp; } __syncthreads(); f=f/2; } c[i]=smem[zero]; } } int main(int argc, char const *argv[]) { int m, n, k; printf("please type in m=A_rows n=A_columns and k=B_columns \n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c;// *h_cc; hipHostMalloc((void **) &h_a, sizeof(int)*m*n); hipHostMalloc((void **) &h_b, sizeof(int)*n*k); hipHostMalloc((void **) &h_c, sizeof(int)*m*k); //hipHostMalloc((void **) &h_cc, sizeof(int)*m*k); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; //h_a[row_variable*Max_column + column_variable] } } for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); clock_t t; t = clock(); int *d_a, *d_b, *d_c; hipMalloc((void **) &d_a, sizeof(int)*m*n); hipMalloc((void **) &d_b, sizeof(int)*n*k); hipMalloc((void **) &d_c, sizeof(int)*m*k); hipMemcpy(d_a, h_a, sizeof(int)*m*n, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int)*n*k, hipMemcpyHostToDevice); //unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; //unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(128); dim3 dimBlock(256); hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, m, n, k); hipMemcpy(h_c, d_c, sizeof(int)*m*k, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); t = clock()-t; double time_taken = ((double)t)/CLOCKS_PER_SEC; hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %lf ms.\n\n", m, n, n, k, (time_taken/1000)); hipFree(d_a); hipFree(d_b); hipFree(d_c); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); //hipHostFree(h_cc); return 0; }
9cd6a9474aa11d3884e8c2db5f00be7850e0ea29.cu
/* College: University of Massachusetts Lowell EECE 7110:High-Performance Comp. on GPUs Semester: Spring 2018 Student : 01639617 Project : Assignment_2 Professor : Dr.Hang Liu Due date: 2/12/2017 Authors : Sai Sri Devesh Kadambari */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> using namespace std; #define zero 0 __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { __shared__ int smem[250]; int row = blockIdx.x ; int tid = blockIdx.x * blockDim.x + threadIdx.x; int step = m/gridDim.x; //step=80 int index_begin = row * step; int index_end= (row+ 1) * step; //Block 0= 0->80 float f=(blockDim.x)/2;int k_b; __syncthreads(); //wait until all the threads in the block reach this point for(int i=(index_begin);i<index_end;i++) //Row=0->80 { smem[tid] =a[i * (blockDim.x)+tid] *b[tid]; //save multiplication value into the smem buffer __syncthreads(); //wait until all the threads reach this point for(int j=((blockDim.x)/2);j>0;j=ceilf(f)) //i=250/2 is 125->62.5(63)->(63-1)->(21)->20->(10)->(5)->(3)->(2)->(1) { k_b=2*f; if(((k_b)%2!=0) && (threadIdx.x == (j-1))) { smem[threadIdx.x -1]+=smem[threadIdx.x]; j=j-1; f=j; } if(threadIdx.x < j) { int temp =smem[threadIdx.x]+smem[threadIdx.x + j]; smem[threadIdx.x]=temp; } __syncthreads(); f=f/2; } c[i]=smem[zero]; } } int main(int argc, char const *argv[]) { int m, n, k; printf("please type in m=A_rows n=A_columns and k=B_columns \n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c;// *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m*n); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); //cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; //h_a[row_variable*Max_column + column_variable] } } for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); clock_t t; t = clock(); int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); //unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; //unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(128); dim3 dimBlock(256); gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k); cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); t = clock()-t; double time_taken = ((double)t)/CLOCKS_PER_SEC; cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %lf ms.\n\n", m, n, n, k, (time_taken/1000)); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); //cudaFreeHost(h_cc); return 0; }
c010999ac8e3e4afd94c95010fc408ad49c3e0bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/affine_grid_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void LinspaceKernel(T start, T step, int64_t size, T* out) { CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; } } template <typename T> struct Linspace<paddle::platform::CUDADeviceContext, T> { void operator()(T start, T end, int count, bool align_corners, framework::Tensor* numbers, const framework::ExecutionContext& ctx) { T* number_data = numbers->mutable_data<T>({count}, ctx.GetPlace()); T slice = (end - start) / (T)(count - 1); if (!align_corners) { slice = (end - start) / (T)count; start *= (T)(count - 1) / (T)count; } auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (count + block - 1) / block; hipLaunchKernelGGL(( LinspaceKernel<T>), dim3(grid), dim3(block), 0, stream, start, slice, count, number_data); } }; template <typename T> __global__ void affine_grid_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* theta, // N, 2, 3 T* output) { CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; // affine from (h_coor, w_coor) to (x, y) output[index * 2] = theta[theta_offset] * w_coor + theta[theta_offset + 1] * h_coor + theta[theta_offset + 2]; output[index * 2 + 1] = theta[theta_offset + 3] * w_coor + theta[theta_offset + 4] * h_coor + theta[theta_offset + 5]; } } template <typename T> __global__ void affine_grid_grad_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* out_grad, // N, H, W, 2 T* theta_grad) { // N, 2, 3 CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; T out_grad_x = out_grad[index * 2]; platform::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x); T out_grad_y = out_grad[index * 2 + 1]; platform::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_y * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y); } } template <typename T> class AffineGridOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* theta = ctx.Input<Tensor>("Theta"); int n = theta->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } auto* output = ctx.Output<Tensor>("Output"); T* out_data = output->mutable_data<T>({n, h, w, 2}, ctx.GetPlace()); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); hipLaunchKernelGGL(( affine_grid_kernel), dim3(grid), dim3(block), 0, cu_stream, count, n, h, w, h_start, w_start, h_step, w_step, theta->data<T>(), // N, 2, 3 out_data); } }; template <typename T> class AffineGridGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto theta_grad = ctx.Output<Tensor>(framework::GradVarName("Theta")); int n = output_grad->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } T* theta_grad_data = theta_grad->mutable_data<T>({n, 2, 3}, ctx.GetPlace()); math::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.cuda_device_context(), theta_grad, static_cast<T>(0)); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; VLOG(3) << "count: " << count << "; h_step: " << h_step << "; w_step: " << w_step << "; h_start: " << h_start << "; w_start: " << w_start; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); hipLaunchKernelGGL(( affine_grid_grad_kernel), dim3(grid), dim3(block), 0, cu_stream, count, n, h, w, h_start, w_start, h_step, w_step, output_grad->data<T>(), theta_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(affine_grid, ops::AffineGridOpCUDAKernel<float>, ops::AffineGridOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(affine_grid_grad, ops::AffineGridGradOpCUDAKernel<float>, ops::AffineGridGradOpCUDAKernel<double>);
c010999ac8e3e4afd94c95010fc408ad49c3e0bb.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/affine_grid_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void LinspaceKernel(T start, T step, int64_t size, T* out) { CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; } } template <typename T> struct Linspace<paddle::platform::CUDADeviceContext, T> { void operator()(T start, T end, int count, bool align_corners, framework::Tensor* numbers, const framework::ExecutionContext& ctx) { T* number_data = numbers->mutable_data<T>({count}, ctx.GetPlace()); T slice = (end - start) / (T)(count - 1); if (!align_corners) { slice = (end - start) / (T)count; start *= (T)(count - 1) / (T)count; } auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (count + block - 1) / block; LinspaceKernel<T><<<grid, block, 0, stream>>>(start, slice, count, number_data); } }; template <typename T> __global__ void affine_grid_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* theta, // N, 2, 3 T* output) { CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; // affine from (h_coor, w_coor) to (x, y) output[index * 2] = theta[theta_offset] * w_coor + theta[theta_offset + 1] * h_coor + theta[theta_offset + 2]; output[index * 2 + 1] = theta[theta_offset + 3] * w_coor + theta[theta_offset + 4] * h_coor + theta[theta_offset + 5]; } } template <typename T> __global__ void affine_grid_grad_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* out_grad, // N, H, W, 2 T* theta_grad) { // N, 2, 3 CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; T out_grad_x = out_grad[index * 2]; platform::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x); T out_grad_y = out_grad[index * 2 + 1]; platform::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_y * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y); } } template <typename T> class AffineGridOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* theta = ctx.Input<Tensor>("Theta"); int n = theta->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } auto* output = ctx.Output<Tensor>("Output"); T* out_data = output->mutable_data<T>({n, h, w, 2}, ctx.GetPlace()); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); affine_grid_kernel<<<grid, block, 0, cu_stream>>>( count, n, h, w, h_start, w_start, h_step, w_step, theta->data<T>(), // N, 2, 3 out_data); } }; template <typename T> class AffineGridGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto theta_grad = ctx.Output<Tensor>(framework::GradVarName("Theta")); int n = output_grad->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } T* theta_grad_data = theta_grad->mutable_data<T>({n, 2, 3}, ctx.GetPlace()); math::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.cuda_device_context(), theta_grad, static_cast<T>(0)); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; VLOG(3) << "count: " << count << "; h_step: " << h_step << "; w_step: " << w_step << "; h_start: " << h_start << "; w_start: " << w_start; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); affine_grid_grad_kernel<<<grid, block, 0, cu_stream>>>( count, n, h, w, h_start, w_start, h_step, w_step, output_grad->data<T>(), theta_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(affine_grid, ops::AffineGridOpCUDAKernel<float>, ops::AffineGridOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(affine_grid_grad, ops::AffineGridGradOpCUDAKernel<float>, ops::AffineGridGradOpCUDAKernel<double>);
b86a360404c2aa985c1c4cc70db0495732d77821.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ float LCG_random_float(unsigned int * seed) { const unsigned int m = 2147483648; const unsigned int a = 26757677; const unsigned int c = 1; *seed = (a * (*seed) + c) % m; return (float) (*seed) / (float) m; } __device__ void LCG_random_init(unsigned int * seed) { const unsigned int m = 2147483648; const unsigned int a = 26757677; const unsigned int c = 1; *seed = (a * (*seed) + c) % m; } __global__ void setupKernel(unsigned int* state) { int idx = blockIdx.x*blockDim.x + threadIdx.x; for (int i = 0; i < idx; i++) LCG_random_init(&state[idx]); } __device__ void decrypt(const int* encrypted, const int* key, int* decrypted) { int columns[KEY_LENGTH][SECTION_CONSTANT+1]; int offset = 0; int colLength[KEY_LENGTH]; for (int j=0; j<KEY_LENGTH; ++j) { colLength[j] = ENCRYPTEDLEN / KEY_LENGTH; if (j < ENCRYPTEDLEN % KEY_LENGTH) colLength[j]++; } for (int keyPos=0; keyPos < KEY_LENGTH; ++keyPos) { offset = 0; for (int i=0; i<KEY_LENGTH; ++i) if (key[i] < key[keyPos]) offset += colLength[i]; for (int j=0; j<colLength[keyPos]; ++j) columns[key[keyPos]][j] = encrypted[offset+j]; } for (int j=0; j<ENCRYPTEDLEN; ++j) decrypted[j] = columns[key[j % KEY_LENGTH]][j / KEY_LENGTH]; } __device__ void swapElements(int *key, int posLeft, int posRight) { if (posLeft != posRight) { key[posLeft] -= key[posRight]; key[posRight] += key[posLeft]; key[posLeft] = key[posRight] - key[posLeft]; } } __device__ void swapBlock(int *key, int posLeft, int posRight, int length) { for (int i=0; i<length; i++) swapElements(key, (posLeft+i)%KEY_LENGTH, (posRight+i)%KEY_LENGTH); } __global__ void decode(const float *__restrict d_scores, const int *__restrict d_encrypted, unsigned int*__restrict globalState, int *__restrict d_decrypted) { __shared__ float shared_scores[ALPHABET*ALPHABET]; int key[KEY_LENGTH]; int localDecrypted[ENCRYPTEDLEN]; int bestLocalDecrypted[ENCRYPTEDLEN]; int leftLetter = 0; int rightLetter = 0; int backupKey[KEY_LENGTH]; int shiftHelper[KEY_LENGTH]; int blockStart, blockEnd; int l,f,t,t0,n,ff,tt; float tempScore = 0.f; float bestScore = CAP; int j = 0, jj = 0; int idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int localState = globalState[idx]; if (threadIdx.x == 0) { for (j=0; j<ALPHABET;++j) for (jj=0; jj<ALPHABET; ++jj) shared_scores[j*ALPHABET + jj] = d_scores[j*ALPHABET + jj]; } __syncthreads(); for (j=0; j<KEY_LENGTH; ++j) key[j]=j; for (j=0; j<KEY_LENGTH; ++j) { swapElements(key, j, LCG_random_float(&localState)*KEY_LENGTH); } for (int cycles=0; cycles<CLIMBINGS; ++cycles) { for (j=0; j<KEY_LENGTH;j++) backupKey[j] = key[j]; tempScore = 0.f; int branch = LCG_random_float(&localState)*100; if (branch < HEUR_THRESHOLD_OP1) { for (j=0; j<1+LCG_random_float(&localState)*OP1_HOP; j++) { leftLetter = LCG_random_float(&localState)*KEY_LENGTH; rightLetter = LCG_random_float(&localState)*KEY_LENGTH; swapElements(key, leftLetter, rightLetter); } } else if (branch < HEUR_THRESHOLD_OP2) { for (j=0; j< 1+LCG_random_float(&localState)*OP2_HOP;j++) { blockStart = LCG_random_float(&localState)*KEY_LENGTH; blockEnd = LCG_random_float(&localState)*KEY_LENGTH; swapBlock(key, blockStart, blockEnd, 1+LCG_random_float(&localState)*(abs((blockStart-blockEnd))-1)); } } else { l = 1 + LCG_random_float(&localState)*(KEY_LENGTH-2); f = LCG_random_float(&localState)*(KEY_LENGTH-1); t = (f+1+(LCG_random_float(&localState)*(KEY_LENGTH-2))); t = t % KEY_LENGTH; for (j=0; j< KEY_LENGTH;j++) shiftHelper[j] = key[j]; t0 = (t-f+KEY_LENGTH) % KEY_LENGTH; n = (t0+l) % KEY_LENGTH; for (j=0; j<n;j++) { ff = (f+j) % KEY_LENGTH; tt = (((t0+j)%n)+f)%KEY_LENGTH; key[tt] = shiftHelper[ff]; } } decrypt(d_encrypted, key, localDecrypted); for (j=0; j<ENCRYPTEDLEN-1; ++j) { tempScore += shared_scores[ALPHABET*localDecrypted[j] + localDecrypted[j+1]]; } if (tempScore < bestScore) { bestScore = tempScore; for (j=0; j<ENCRYPTEDLEN; ++j) { bestLocalDecrypted[j] = localDecrypted[j]; } } else { for (j=0; j<KEY_LENGTH;j++) key[j] = backupKey[j]; } } for (j=0; j<ENCRYPTEDLEN; ++j) d_decrypted[idx*ENCRYPTEDLEN+j] = bestLocalDecrypted[j]; }
b86a360404c2aa985c1c4cc70db0495732d77821.cu
__device__ float LCG_random_float(unsigned int * seed) { const unsigned int m = 2147483648; const unsigned int a = 26757677; const unsigned int c = 1; *seed = (a * (*seed) + c) % m; return (float) (*seed) / (float) m; } __device__ void LCG_random_init(unsigned int * seed) { const unsigned int m = 2147483648; const unsigned int a = 26757677; const unsigned int c = 1; *seed = (a * (*seed) + c) % m; } __global__ void setupKernel(unsigned int* state) { int idx = blockIdx.x*blockDim.x + threadIdx.x; for (int i = 0; i < idx; i++) LCG_random_init(&state[idx]); } __device__ void decrypt(const int* encrypted, const int* key, int* decrypted) { int columns[KEY_LENGTH][SECTION_CONSTANT+1]; int offset = 0; int colLength[KEY_LENGTH]; for (int j=0; j<KEY_LENGTH; ++j) { colLength[j] = ENCRYPTEDLEN / KEY_LENGTH; if (j < ENCRYPTEDLEN % KEY_LENGTH) colLength[j]++; } for (int keyPos=0; keyPos < KEY_LENGTH; ++keyPos) { offset = 0; for (int i=0; i<KEY_LENGTH; ++i) if (key[i] < key[keyPos]) offset += colLength[i]; for (int j=0; j<colLength[keyPos]; ++j) columns[key[keyPos]][j] = encrypted[offset+j]; } for (int j=0; j<ENCRYPTEDLEN; ++j) decrypted[j] = columns[key[j % KEY_LENGTH]][j / KEY_LENGTH]; } __device__ void swapElements(int *key, int posLeft, int posRight) { if (posLeft != posRight) { key[posLeft] -= key[posRight]; key[posRight] += key[posLeft]; key[posLeft] = key[posRight] - key[posLeft]; } } __device__ void swapBlock(int *key, int posLeft, int posRight, int length) { for (int i=0; i<length; i++) swapElements(key, (posLeft+i)%KEY_LENGTH, (posRight+i)%KEY_LENGTH); } __global__ void decode(const float *__restrict d_scores, const int *__restrict d_encrypted, unsigned int*__restrict globalState, int *__restrict d_decrypted) { __shared__ float shared_scores[ALPHABET*ALPHABET]; int key[KEY_LENGTH]; int localDecrypted[ENCRYPTEDLEN]; int bestLocalDecrypted[ENCRYPTEDLEN]; int leftLetter = 0; int rightLetter = 0; int backupKey[KEY_LENGTH]; int shiftHelper[KEY_LENGTH]; int blockStart, blockEnd; int l,f,t,t0,n,ff,tt; float tempScore = 0.f; float bestScore = CAP; int j = 0, jj = 0; int idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int localState = globalState[idx]; if (threadIdx.x == 0) { for (j=0; j<ALPHABET;++j) for (jj=0; jj<ALPHABET; ++jj) shared_scores[j*ALPHABET + jj] = d_scores[j*ALPHABET + jj]; } __syncthreads(); for (j=0; j<KEY_LENGTH; ++j) key[j]=j; for (j=0; j<KEY_LENGTH; ++j) { swapElements(key, j, LCG_random_float(&localState)*KEY_LENGTH); } for (int cycles=0; cycles<CLIMBINGS; ++cycles) { for (j=0; j<KEY_LENGTH;j++) backupKey[j] = key[j]; tempScore = 0.f; int branch = LCG_random_float(&localState)*100; if (branch < HEUR_THRESHOLD_OP1) { for (j=0; j<1+LCG_random_float(&localState)*OP1_HOP; j++) { leftLetter = LCG_random_float(&localState)*KEY_LENGTH; rightLetter = LCG_random_float(&localState)*KEY_LENGTH; swapElements(key, leftLetter, rightLetter); } } else if (branch < HEUR_THRESHOLD_OP2) { for (j=0; j< 1+LCG_random_float(&localState)*OP2_HOP;j++) { blockStart = LCG_random_float(&localState)*KEY_LENGTH; blockEnd = LCG_random_float(&localState)*KEY_LENGTH; swapBlock(key, blockStart, blockEnd, 1+LCG_random_float(&localState)*(abs((blockStart-blockEnd))-1)); } } else { l = 1 + LCG_random_float(&localState)*(KEY_LENGTH-2); f = LCG_random_float(&localState)*(KEY_LENGTH-1); t = (f+1+(LCG_random_float(&localState)*(KEY_LENGTH-2))); t = t % KEY_LENGTH; for (j=0; j< KEY_LENGTH;j++) shiftHelper[j] = key[j]; t0 = (t-f+KEY_LENGTH) % KEY_LENGTH; n = (t0+l) % KEY_LENGTH; for (j=0; j<n;j++) { ff = (f+j) % KEY_LENGTH; tt = (((t0+j)%n)+f)%KEY_LENGTH; key[tt] = shiftHelper[ff]; } } decrypt(d_encrypted, key, localDecrypted); for (j=0; j<ENCRYPTEDLEN-1; ++j) { tempScore += shared_scores[ALPHABET*localDecrypted[j] + localDecrypted[j+1]]; } if (tempScore < bestScore) { bestScore = tempScore; for (j=0; j<ENCRYPTEDLEN; ++j) { bestLocalDecrypted[j] = localDecrypted[j]; } } else { for (j=0; j<KEY_LENGTH;j++) key[j] = backupKey[j]; } } for (j=0; j<ENCRYPTEDLEN; ++j) d_decrypted[idx*ENCRYPTEDLEN+j] = bestLocalDecrypted[j]; }
8e520d1034a4efcadc86224e38e1e8f41e320127.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/function/mean.hpp> #include <nbla/cuda/math.hpp> #include <nbla/cuda/utils/block_reduce.cuh> namespace nbla { template <typename T> __global__ void kernel_reduce_per_block(const int N, const T *x, T *buff, T scale = 1) { typedef typename CudaTypeForceFloat<T>::type AccT; AccT thread_data = 0; NBLA_CUDA_KERNEL_LOOP(i, N) { thread_data += (AccT)x[i]; } thread_data = blockReduceSum(thread_data); if (threadIdx.x == 0) { buff[blockIdx.x] = thread_data * scale; } } template <typename T> void MeanCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size, int reduction_size) { const Tc *x = reinterpret_cast<const Tc *>(x_); Tc *y = reinterpret_cast<Tc *>(y_); cuda_set_device(this->device_); if (outer_size == 1) { if (reduction_size >= 1024) { int blocks = min(NBLA_CUDA_GET_BLOCKS(reduction_size), /*max blocks*/ 1024); shared_ptr<CudaCachedArray> arr_buff = make_shared<CudaCachedArray>(blocks, get_dtype<Tc>(), this->ctx_); Tc *buff = arr_buff->pointer<Tc>(); hipLaunchKernelGGL(( kernel_reduce_per_block), dim3(blocks), dim3(NBLA_CUDA_NUM_THREADS), 0, 0, reduction_size, x, buff); NBLA_CUDA_KERNEL_CHECK(); hipLaunchKernelGGL(( kernel_reduce_per_block<Tc>), dim3(1), dim3(1024), 0, 0, blocks, buff, y, (T)(1. / reduction_size)); NBLA_CUDA_KERNEL_CHECK(); } else { hipLaunchKernelGGL(( kernel_reduce_per_block<Tc>), dim3(1), dim3(1024), 0, 0, reduction_size, x, y, (T)(1. / reduction_size)); NBLA_CUDA_KERNEL_CHECK(); } return; } const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemv<Tc>(this->device_, y, x, reduction_size, outer_size, true, ones, reduction_size, 1. / reduction_size, 0); } template <typename T, bool accum> __global__ void kernel_reduce_mean_backward(const int num, T *dx, const T *dy, T scale) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + scale * (*dy); } } template <typename T> void MeanCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size, int reduction_size, bool accum) { const Tc *dy = reinterpret_cast<const Tc *>(dy_); Tc *dx = reinterpret_cast<Tc *>(dx_); cuda_set_device(this->device_); if (outer_size == 1) { if (accum) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, true>), reduction_size, dx, dy, (T)(1. / reduction_size)); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, false>), reduction_size, dx, dy, (T)(1. / reduction_size)); } return; } const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemm<Tc>(this->device_, dx, true, dy, outer_size, 1, false, ones, 1, reduction_size, false, 1. / reduction_size, accum ? 1 : 0); } }
8e520d1034a4efcadc86224e38e1e8f41e320127.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/function/mean.hpp> #include <nbla/cuda/math.hpp> #include <nbla/cuda/utils/block_reduce.cuh> namespace nbla { template <typename T> __global__ void kernel_reduce_per_block(const int N, const T *x, T *buff, T scale = 1) { typedef typename CudaTypeForceFloat<T>::type AccT; AccT thread_data = 0; NBLA_CUDA_KERNEL_LOOP(i, N) { thread_data += (AccT)x[i]; } thread_data = blockReduceSum(thread_data); if (threadIdx.x == 0) { buff[blockIdx.x] = thread_data * scale; } } template <typename T> void MeanCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size, int reduction_size) { const Tc *x = reinterpret_cast<const Tc *>(x_); Tc *y = reinterpret_cast<Tc *>(y_); cuda_set_device(this->device_); if (outer_size == 1) { if (reduction_size >= 1024) { int blocks = min(NBLA_CUDA_GET_BLOCKS(reduction_size), /*max blocks*/ 1024); shared_ptr<CudaCachedArray> arr_buff = make_shared<CudaCachedArray>(blocks, get_dtype<Tc>(), this->ctx_); Tc *buff = arr_buff->pointer<Tc>(); kernel_reduce_per_block<<<blocks, NBLA_CUDA_NUM_THREADS>>>(reduction_size, x, buff); NBLA_CUDA_KERNEL_CHECK(); kernel_reduce_per_block<Tc><<<1, 1024>>>(blocks, buff, y, (T)(1. / reduction_size)); NBLA_CUDA_KERNEL_CHECK(); } else { kernel_reduce_per_block<Tc><<<1, 1024>>>(reduction_size, x, y, (T)(1. / reduction_size)); NBLA_CUDA_KERNEL_CHECK(); } return; } const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemv<Tc>(this->device_, y, x, reduction_size, outer_size, true, ones, reduction_size, 1. / reduction_size, 0); } template <typename T, bool accum> __global__ void kernel_reduce_mean_backward(const int num, T *dx, const T *dy, T scale) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + scale * (*dy); } } template <typename T> void MeanCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size, int reduction_size, bool accum) { const Tc *dy = reinterpret_cast<const Tc *>(dy_); Tc *dx = reinterpret_cast<Tc *>(dx_); cuda_set_device(this->device_); if (outer_size == 1) { if (accum) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, true>), reduction_size, dx, dy, (T)(1. / reduction_size)); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, false>), reduction_size, dx, dy, (T)(1. / reduction_size)); } return; } const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemm<Tc>(this->device_, dx, true, dy, outer_size, 1, false, ones, 1, reduction_size, false, 1. / reduction_size, accum ? 1 : 0); } }
649d04fc769ef9baf3e9b50424e7f5c30deb2f27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "gpu.h" //extended propagation data residence in GPU device __device__ float * g_ex_Vx0_in; __device__ float * g_ex_Vz0_in; __device__ float * g_ex_Vy0_in; __device__ float * g_ex_sigmaxx0_in; __device__ float * g_ex_sigmazz0_in; __device__ float * g_ex_sigmayy0_in; __device__ float * g_ex_sigmaxy0_in; __device__ float * g_ex_sigmaxz0_in; __device__ float * g_ex_sigmayz0_in; //Time step +2 __device__ float * g_ex_Vx0_in1; __device__ float * g_ex_Vz0_in1; __device__ float * g_ex_Vy0_in1; __device__ float * g_ex_sigmaxx0_in1; __device__ float * g_ex_sigmazz0_in1; __device__ float * g_ex_sigmayy0_in1; __device__ float * g_ex_sigmaxy0_in1; __device__ float * g_ex_sigmaxz0_in1; __device__ float * g_ex_sigmayz0_in1; //time step 0 and output __device__ float * g_ex_Vx0_out; __device__ float * g_ex_Vz0_out; __device__ float * g_ex_Vy0_out; __device__ float * g_ex_sigmaxx0_out; __device__ float * g_ex_sigmazz0_out; __device__ float * g_ex_sigmayy0_out; __device__ float * g_ex_sigmaxy0_out; __device__ float * g_ex_sigmaxz0_out; __device__ float * g_ex_sigmayz0_out; //expaned arrays to store different Operators __device__ float *g_ex_m2; __device__ float *g_ex_m3; __device__ float *g_ex_m2m3; __device__ float *g_ex_m1_x; __device__ float *g_ex_m1_z; __device__ float *g_ex_m1_y; __device__ float *g_tmp; __global__ void rtm_gpu_kernel(int ny, int nz, int nx, float *g_ex_Vy0_in, float * g_ex_Vx0_in, float * g_ex_Vz0_in, float * g_ex_sigmayy0_in, float *g_ex_sigmaxx0_in, float * g_ex_sigmazz0_in, float * g_ex_sigmaxy0_in, float * g_ex_sigmaxz0_in, float * g_ex_sigmayz0_in,//(nz, nx, nt) float *g_ex_Vy0_in1, float * g_ex_Vx0_in1, float * g_ex_Vz0_in1, float * g_ex_sigmayy0_in1, float *g_ex_sigmaxx0_in1, float * g_ex_sigmazz0_in1, float * g_ex_sigmaxy0_in1, float * g_ex_sigmaxz0_in1, float * g_ex_sigmayz0_in1,//(nz, nx, nt) float *g_ex_Vy0_out, float * g_ex_Vx0_out, float * g_ex_Vz0_out, float * g_ex_sigmayy0_out, float *g_ex_sigmaxx0_out, float * g_ex_sigmazz0_out, float * g_ex_sigmaxy0_out, float * g_ex_sigmaxz0_out, float * g_ex_sigmayz0_out,//(nz, nx, nt) const float * __restrict__ g_ex_m1_y, const float * __restrict__ g_ex_m1_x, const float * __restrict__ g_ex_m1_z, const float * __restrict__ g_ex_m2, const float * __restrict__ g_ex_m3, const float * __restrict__ g_ex_m2m3);//(nz+10, nx+10) extern "C" void rtm_gpu_init(int ny, int nz, int nx) { //set cuda devices and put all data onto gpu memory hipError_t cuda_ret; hipError_t err; //Set Device cuda_ret = hipSetDevice(1); if(cuda_ret != hipSuccess){ fprintf(stderr, "Failed to Set The cuda Device !\n"); exit(0); } else{ fprintf(stderr, "GPU Device Set ====> OK\n"); } // data init //Time step +1 hipMalloc(&g_ex_Vx0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_Vz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_Vy0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxx0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmazz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmayy0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxy0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmayz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); //Time step +2 hipMalloc(&g_ex_Vx0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_Vz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_Vy0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxx0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmazz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmayy0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxy0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmayz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); //time step 0 and output hipMalloc(&g_ex_Vx0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_Vz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_Vy0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxx0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmazz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmayy0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxy0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmaxz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_sigmayz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); //expaned arrays to store different Operators hipMalloc(&g_ex_m2, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_m2m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_m1_x, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_m1_y, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipMalloc(&g_ex_m1_z, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); hipFuncSetCacheConfig(rtm_gpu_kernel,hipFuncCachePreferShared); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error6: %s.\n", hipGetErrorString(err)); exit(0); }else{ fprintf(stderr,"GPU Data Init ====> OK\n"); } // data copy } extern "C" void rtm_gpu_copy_in(int ny, int nz, int nx, float *ex_Vy0_in, float * ex_Vx0_in, float * ex_Vz0_in, float * ex_sigmayy0_in, float *ex_sigmaxx0_in, float * ex_sigmazz0_in, float * ex_sigmaxy0_in, float * ex_sigmaxz0_in, float * ex_sigmayz0_in,//(nz, nx, nt) float *ex_Vy0_in1, float * ex_Vx0_in1, float * ex_Vz0_in1, float * ex_sigmayy0_in1, float *ex_sigmaxx0_in1, float * ex_sigmazz0_in1, float * ex_sigmaxy0_in1, float * ex_sigmaxz0_in1, float * ex_sigmayz0_in1,//(nz, nx, nt) float *ex_Vy0_out, float * ex_Vx0_out, float * ex_Vz0_out, float * ex_sigmayy0_out, float *ex_sigmaxx0_out, float * ex_sigmazz0_out, float * ex_sigmaxy0_out, float * ex_sigmaxz0_out, float * ex_sigmayz0_out,//(nz, nx, nt) float * ex_m1_y, float * ex_m1_x, float * ex_m1_z, float * ex_m2, float * ex_m3, float * ex_m2m3)//(nz+10, nx+10) { hipError_t err; // data copy hipMemcpy(g_ex_Vy0_in, ex_Vy0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vx0_in, ex_Vx0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vz0_in, ex_Vz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxx0_in, ex_sigmaxx0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmayy0_in, ex_sigmayy0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxy0_in, ex_sigmaxy0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmayz0_in, ex_sigmayz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxz0_in, ex_sigmaxz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmazz0_in, ex_sigmazz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vy0_in1, ex_Vy0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vx0_in1, ex_Vx0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vz0_in1, ex_Vz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxx0_in1, ex_sigmaxx0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmayy0_in1, ex_sigmayy0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxy0_in1, ex_sigmaxy0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmayz0_in1, ex_sigmayz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxz0_in1, ex_sigmaxz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmazz0_in1, ex_sigmazz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vy0_out, ex_Vy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vx0_out, ex_Vx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_Vz0_out, ex_Vz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxx0_out, ex_sigmaxx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmayy0_out, ex_sigmayy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxy0_out, ex_sigmaxy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmayz0_out, ex_sigmayz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmaxz0_out, ex_sigmaxz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_sigmazz0_out, ex_sigmazz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_m1_y, ex_m1_y, sizeof(float)*(ny+10)*(nx+10)*(nz+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_m1_x, ex_m1_x, sizeof(float)*(ny+10)*(nx+10)*(nz+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_m1_z, ex_m1_z, sizeof(float)*(ny+10)*(nx+10)*(nz+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_m2, ex_m2, sizeof(float)*(ny+10)*(nx+10)*(nz+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_m3, ex_m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10), hipMemcpyHostToDevice); hipMemcpy(g_ex_m2m3, ex_m2m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10), hipMemcpyHostToDevice); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error2: %s.\n", hipGetErrorString(err)); exit(0); }else{ fprintf(stderr,"Data Copy To GPU ====> OK\n"); } } extern "C" void rtm_gpu_copy_out(int ny, int nz, int nx, float *ex_Vy0_out, float * ex_Vx0_out, float * ex_Vz0_out, float * ex_sigmayy0_out, float *ex_sigmaxx0_out, float * ex_sigmazz0_out, float * ex_sigmaxy0_out, float * ex_sigmaxz0_out, float * ex_sigmayz0_out)//(nz, nx, nt) { hipError_t err; // data copy back from GPU mem hipMemcpy(ex_Vy0_out, g_ex_Vy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_Vx0_out, g_ex_Vx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_Vz0_out, g_ex_Vz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_sigmaxx0_out, g_ex_sigmaxx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_sigmayy0_out, g_ex_sigmayy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_sigmaxy0_out, g_ex_sigmaxy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_sigmaxz0_out, g_ex_sigmaxz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_sigmayz0_out, g_ex_sigmayz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); hipMemcpy(ex_sigmazz0_out, g_ex_sigmazz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), hipMemcpyDeviceToHost); //hipMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error3: %s.\n", hipGetErrorString(err)); exit(0); }else{ fprintf(stderr,"Data Copy To CPU ====> OK\n"); } } extern "C" void rtm_gpu_final() { //release GPU memory space hipError_t err; hipFree(g_ex_Vx0_in); hipFree(g_ex_Vz0_in); hipFree(g_ex_Vy0_in); hipFree(g_ex_sigmaxx0_in); hipFree(g_ex_sigmazz0_in); hipFree(g_ex_sigmayy0_in); hipFree(g_ex_sigmaxy0_in); hipFree(g_ex_sigmaxz0_in); hipFree(g_ex_sigmayz0_in); //Time step +2 hipFree(g_ex_Vx0_in1); hipFree(g_ex_Vz0_in1); hipFree(g_ex_Vy0_in1); hipFree(g_ex_sigmaxx0_in1); hipFree(g_ex_sigmazz0_in1); hipFree(g_ex_sigmayy0_in1); hipFree(g_ex_sigmaxy0_in1); hipFree(g_ex_sigmaxz0_in1); hipFree(g_ex_sigmayz0_in1); //time step 0 and output hipFree(g_ex_Vx0_out); hipFree(g_ex_Vz0_out); hipFree(g_ex_Vy0_out); hipFree(g_ex_sigmaxx0_out); hipFree(g_ex_sigmazz0_out); hipFree(g_ex_sigmayy0_out); hipFree(g_ex_sigmaxy0_out); hipFree(g_ex_sigmaxz0_out); hipFree(g_ex_sigmayz0_out); //expaned arrays to store different Operators hipFree(g_ex_m2); hipFree(g_ex_m3); hipFree(g_ex_m2m3); hipFree(g_ex_m1_x); hipFree(g_ex_m1_y); hipFree(g_ex_m1_z); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error4: %s.\n", hipGetErrorString(err)); exit(0); }else{ fprintf(stderr,"GPU Mem Released ====> OK\n"); } } void rtm_gpu_change_pointer(){ fprintf(stderr, "GPU pointer changed\n"); g_tmp = g_ex_Vx0_out; g_ex_Vx0_out = g_ex_Vx0_in; g_ex_Vx0_in = g_tmp; g_tmp = g_ex_Vx0_out; g_ex_Vx0_out = g_ex_Vx0_in1; g_ex_Vx0_in1 = g_tmp; g_tmp = g_ex_Vz0_out; g_ex_Vz0_out = g_ex_Vz0_in; g_ex_Vz0_in = g_tmp; g_tmp = g_ex_Vz0_out; g_ex_Vz0_out = g_ex_Vz0_in1; g_ex_Vz0_in1 = g_tmp; g_tmp = g_ex_Vy0_out; g_ex_Vy0_out = g_ex_Vy0_in; g_ex_Vy0_in = g_tmp; g_tmp = g_ex_Vy0_out; g_ex_Vy0_out = g_ex_Vy0_in1; g_ex_Vy0_in1 = g_tmp; g_tmp = g_ex_sigmaxx0_out; g_ex_sigmaxx0_out = g_ex_sigmaxx0_in; g_ex_sigmaxx0_in = g_tmp; g_tmp = g_ex_sigmaxx0_out; g_ex_sigmaxx0_out = g_ex_sigmaxx0_in1; g_ex_sigmaxx0_in1 = g_tmp; g_tmp = g_ex_sigmazz0_out; g_ex_sigmazz0_out = g_ex_sigmazz0_in; g_ex_sigmazz0_in = g_tmp; g_tmp = g_ex_sigmazz0_out; g_ex_sigmazz0_out = g_ex_sigmazz0_in1; g_ex_sigmazz0_in1 = g_tmp; g_tmp = g_ex_sigmayy0_out; g_ex_sigmayy0_out = g_ex_sigmayy0_in; g_ex_sigmayy0_in = g_tmp; g_tmp = g_ex_sigmayy0_out; g_ex_sigmayy0_out = g_ex_sigmayy0_in1; g_ex_sigmayy0_in1 = g_tmp; g_tmp = g_ex_sigmaxy0_out; g_ex_sigmaxy0_out = g_ex_sigmaxy0_in; g_ex_sigmaxy0_in = g_tmp; g_tmp = g_ex_sigmaxy0_out; g_ex_sigmaxy0_out = g_ex_sigmaxy0_in1; g_ex_sigmaxy0_in1 = g_tmp; g_tmp = g_ex_sigmaxz0_out; g_ex_sigmaxz0_out = g_ex_sigmaxz0_in; g_ex_sigmaxz0_in = g_tmp; g_tmp = g_ex_sigmaxz0_out; g_ex_sigmaxz0_out = g_ex_sigmaxz0_in1; g_ex_sigmaxz0_in1 = g_tmp; g_tmp = g_ex_sigmayz0_out; g_ex_sigmayz0_out = g_ex_sigmayz0_in; g_ex_sigmayz0_in = g_tmp; g_tmp = g_ex_sigmayz0_out; g_ex_sigmayz0_out = g_ex_sigmayz0_in1; g_ex_sigmayz0_in1 = g_tmp; } __global__ void rtm_gpu_kernel(int ny, int nz, int nx, float *g_ex_Vy0_in, float * g_ex_Vx0_in, float * g_ex_Vz0_in, float * g_ex_sigmayy0_in, float *g_ex_sigmaxx0_in, float * g_ex_sigmazz0_in, float * g_ex_sigmaxy0_in, float * g_ex_sigmaxz0_in, float * g_ex_sigmayz0_in,//(nz, nx, nt) float *g_ex_Vy0_in1, float * g_ex_Vx0_in1, float * g_ex_Vz0_in1, float * g_ex_sigmayy0_in1, float *g_ex_sigmaxx0_in1, float * g_ex_sigmazz0_in1, float * g_ex_sigmaxy0_in1, float * g_ex_sigmaxz0_in1, float * g_ex_sigmayz0_in1,//(nz, nx, nt) float *g_ex_Vy0_out, float * g_ex_Vx0_out, float * g_ex_Vz0_out, float * g_ex_sigmayy0_out, float *g_ex_sigmaxx0_out, float * g_ex_sigmazz0_out, float * g_ex_sigmaxy0_out, float * g_ex_sigmaxz0_out, float * g_ex_sigmayz0_out,//(nz, nx, nt) const float * __restrict__ g_ex_m1_y, const float * __restrict__ g_ex_m1_x, const float * __restrict__ g_ex_m1_z, const float * __restrict__ g_ex_m2, const float * __restrict__ g_ex_m3, const float * __restrict__ g_ex_m2m3)//(nz+10, nx+10) { float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0; //GPU thread index int iz, ix, iy; iz = blockIdx.x*blockDim.x + threadIdx.x; ix = blockIdx.y*blockDim.y + threadIdx.y; iy = blockIdx.z*blockDim.z + threadIdx.z; //gt = it; g_ex_Vx0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_Vx0_out[n3d_index_ex(iz,ix ,iy)] + g_ex_Vx0_in1[n3d_index_ex(iz, ix, iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-5, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-5,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-4, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-3, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-2, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-1, iy)]*c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-1,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+1, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+2, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+3, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+4, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-5, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-5,iy)] + g_ex_m2[n3d_index_ex(iz,ix-4, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-3, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m2[n3d_index_ex(iz,ix-2, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m2[n3d_index_ex(iz,ix-1, iy)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-1,iy)] - g_ex_m2[n3d_index_ex(iz, ix, iy)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix+1, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m2[n3d_index_ex(iz,ix+2, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m2[n3d_index_ex(iz,ix+3, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m2[n3d_index_ex(iz,ix+4, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-5, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-5,iy)] + g_ex_m2[n3d_index_ex(iz,ix-4, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-3, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m2[n3d_index_ex(iz,ix-2, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m2[n3d_index_ex(iz,ix-1, iy)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-1,iy)] - g_ex_m2[n3d_index_ex(iz, ix, iy)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix+1, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m2[n3d_index_ex(iz,ix+2, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m2[n3d_index_ex(iz,ix+3, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m2[n3d_index_ex(iz,ix+4, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+4,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m3[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m3[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m3[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m3[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m3[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m3[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m3[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+5)] + g_ex_m3[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m3[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m3[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m3[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m3[n3d_index_ex(iz, ix, iy)]*c5*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_sigmaxz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m3[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m3[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m3[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m3[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz+5,ix,iy)] ; g_ex_Vy0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_Vy0_out[n3d_index_ex(iz,ix ,iy)] + g_ex_Vy0_in1[n3d_index_ex(iz, ix, iy)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-5)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-5)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-4)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-3)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-2)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-1)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-1)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+1)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+2)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+3)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+4)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-5)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-5)] + g_ex_m2[n3d_index_ex(iz,ix, iy-4)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-3)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m2[n3d_index_ex(iz,ix, iy-2)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m2[n3d_index_ex(iz,ix, iy-1)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-1)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy+1)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m2[n3d_index_ex(iz,ix, iy+2)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m2[n3d_index_ex(iz,ix, iy+3)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m2[n3d_index_ex(iz,ix, iy+4)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-5)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-5)] + g_ex_m2[n3d_index_ex(iz,ix, iy-4)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-3)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m2[n3d_index_ex(iz,ix, iy-2)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m2[n3d_index_ex(iz,ix, iy-1)]*c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-1)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy+1)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m2[n3d_index_ex(iz,ix, iy+2)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m2[n3d_index_ex(iz,ix, iy+3)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m2[n3d_index_ex(iz,ix, iy+4)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+4)] + g_ex_m3[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m3[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m3[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m3[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_sigmayz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m3[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m3[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m3[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m3[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz+5,ix,iy)] + g_ex_m3[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m3[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m3[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m3[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m3[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m3[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m3[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m3[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_Vz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_Vz0_out[n3d_index_ex(iz,ix ,iy)] + g_ex_Vz0_in1[n3d_index_ex(iz, ix, iy)] + g_ex_m2m3[n3d_index_ex(iz-5,ix, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz-5,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-4,ix, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-3,ix, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-2,ix, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-1,ix, iy)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz-1,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+1,ix, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+2,ix, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+3,ix, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+4,ix, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz+4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-5,ix, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz-5,ix,iy)] + g_ex_m2[n3d_index_ex(iz-4,ix, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-3,ix, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m2[n3d_index_ex(iz-2,ix, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m2[n3d_index_ex(iz-1,ix, iy)]*c5*g_ex_sigmaxx0_in[n3d_index_ex(iz-1,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz+1,ix, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m2[n3d_index_ex(iz+2,ix, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m2[n3d_index_ex(iz+3,ix, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m2[n3d_index_ex(iz+4,ix, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz+4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-5,ix, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz-5,ix,iy)] + g_ex_m2[n3d_index_ex(iz-4,ix, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-3,ix, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m2[n3d_index_ex(iz-2,ix, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m2[n3d_index_ex(iz-1,ix, iy)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz-1,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz+1,ix, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m2[n3d_index_ex(iz+2,ix, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m2[n3d_index_ex(iz+3,ix, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m2[n3d_index_ex(iz+4,ix, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz+4,ix,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m3[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m3[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m3[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m3[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m3[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m3[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m3[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+5)] + g_ex_m3[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m3[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m3[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m3[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m3[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m3[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m3[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m3[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_sigmaxx0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmaxx0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmaxx0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_Vx0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_sigmayy0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmayy0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmayy0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+5)] ; g_ex_sigmazz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmazz0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmazz0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_z[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_Vz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz+5,ix,iy)] ; g_ex_sigmaxy0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmaxy0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmaxy0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_Vy0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix+5,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+5)] ; g_ex_sigmaxz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmaxz0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmaxz0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_x[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_Vx0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz+5,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_Vz0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_sigmayz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmayz0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmayz0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_y[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_Vy0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz+5,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+5)] ; } extern "C" void rtm_gpu_func(int ny, int nz, int nx, float *ex_Vy0_in, float * ex_Vx0_in, float * ex_Vz0_in, float * ex_sigmayy0_in, float *ex_sigmaxx0_in, float * ex_sigmazz0_in, float * ex_sigmaxy0_in, float * ex_sigmaxz0_in, float * ex_sigmayz0_in,//(nz, nx, nt) float *ex_Vy0_in1, float * ex_Vx0_in1, float * ex_Vz0_in1, float * ex_sigmayy0_in1, float *ex_sigmaxx0_in1, float * ex_sigmazz0_in1, float * ex_sigmaxy0_in1, float * ex_sigmaxz0_in1, float * ex_sigmayz0_in1,//(nz, nx, nt) float *ex_Vy0_out, float * ex_Vx0_out, float * ex_Vz0_out, float * ex_sigmayy0_out, float *ex_sigmaxx0_out, float * ex_sigmazz0_out, float * ex_sigmaxy0_out, float * ex_sigmaxz0_out, float * ex_sigmayz0_out,//(nz, nx, nt) float * ex_m1_y, float * ex_m1_x,float * ex_m1_z,float * ex_m2, float * ex_m3, float * ex_m2m3,//)//(nz+10,nx+10) float * debug, float * gpu_kernel_time) { hipError_t err; hipEvent_t start1, start2, start3, stop1, stop2, stop3; float elapsedTime1 = 0.0f; float elapsedTime2 = 0.0f; float elapsedTime3 = 0.0f; int g_it; hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&start3); hipEventCreate(&stop1); hipEventCreate(&stop2); hipEventCreate(&stop3); //time record //data copy in hipEventRecord(start1, 0); rtm_gpu_copy_in(ny, nz, nx, ex_Vy0_in, ex_Vx0_in, ex_Vz0_in, ex_sigmayy0_in, ex_sigmaxx0_in, ex_sigmazz0_in, ex_sigmaxy0_in, ex_sigmaxz0_in, ex_sigmayz0_in, ex_Vy0_in1, ex_Vx0_in1, ex_Vz0_in1, ex_sigmayy0_in1, ex_sigmaxx0_in1, ex_sigmazz0_in1, ex_sigmaxy0_in1, ex_sigmaxz0_in1, ex_sigmayz0_in1, ex_Vy0_out, ex_Vx0_out, ex_Vz0_out, ex_sigmayy0_out, ex_sigmaxx0_out, ex_sigmazz0_out, ex_sigmaxy0_out, ex_sigmaxz0_out, ex_sigmayz0_out, ex_m1_y, ex_m1_x, ex_m1_z, ex_m2, ex_m3, ex_m2m3); hipEventRecord(stop1, 0); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error5: %s.\n", hipGetErrorString(err)); exit(0); } //RTM computing dim3 dimGrid(nz/TZ, nx/TX, ny/TY); dim3 dimBlock(TZ, TX, TY); hipEventRecord(start2, 0); fprintf(stderr,"GPU Computing ... ...(NZ=%d, NX=%d, NY=%d, TZ=%d, TX=%d, TY=%d)\n", nz, nx, ny, TZ, TX, TY); for(g_it = 0; g_it < Steps_write_back; g_it++){ fprintf(stderr, "Step %d\n", g_it); hipLaunchKernelGGL(( rtm_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ny, nz, nx, g_ex_Vy0_in, g_ex_Vx0_in, g_ex_Vz0_in, g_ex_sigmayy0_in, g_ex_sigmaxx0_in, g_ex_sigmazz0_in, g_ex_sigmaxy0_in, g_ex_sigmaxz0_in, g_ex_sigmayz0_in, g_ex_Vy0_in1, g_ex_Vx0_in1, g_ex_Vz0_in1, g_ex_sigmayy0_in1, g_ex_sigmaxx0_in1, g_ex_sigmazz0_in1, g_ex_sigmaxy0_in1, g_ex_sigmaxz0_in1, g_ex_sigmayz0_in1, g_ex_Vy0_out, g_ex_Vx0_out, g_ex_Vz0_out, g_ex_sigmayy0_out, g_ex_sigmaxx0_out, g_ex_sigmazz0_out, g_ex_sigmaxy0_out, g_ex_sigmaxz0_out, g_ex_sigmayz0_out, g_ex_m1_y, g_ex_m1_x, g_ex_m1_z, g_ex_m2, g_ex_m3, g_ex_m2m3); //hipDeviceSynchronize(); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error2: %s.\n", hipGetErrorString(err)); exit(0); } if(g_it<Steps_write_back-1) rtm_gpu_change_pointer(); } hipEventRecord(stop2, 0); //data copy out hipEventRecord(start3, 0); rtm_gpu_copy_out(ny, nz, nx, ex_Vy0_out, ex_Vx0_out, ex_Vz0_out, ex_sigmayy0_out, ex_sigmaxx0_out, ex_sigmazz0_out, ex_sigmaxy0_out, ex_sigmaxz0_out, ex_sigmayz0_out); hipEventRecord(stop3, 0); err = hipGetLastError(); if(hipSuccess != err){ fprintf(stderr, "Cuda error3: %s.\n", hipGetErrorString(err)); } //hipEventRecord(stop, 0); hipEventSynchronize(stop1); hipEventSynchronize(stop2); hipEventSynchronize(stop3); hipEventElapsedTime(&elapsedTime1, start1, stop1); hipEventElapsedTime(&elapsedTime2, start2, stop2); hipEventElapsedTime(&elapsedTime3, start3, stop3); gpu_kernel_time[0] = (float)(elapsedTime1/1000.); gpu_kernel_time[1] = (float)(elapsedTime2/1000.); gpu_kernel_time[2] = (float)(elapsedTime3/1000.); fprintf(stderr, "GPU copy in Time: %.4f\n", (float)elapsedTime1/1000.); fprintf(stderr, "GPU Comput. Time: %.4f\n", (float)elapsedTime2/1000.); fprintf(stderr, "GPU copy ot Time: %.4f\n", (float)elapsedTime3/1000.); } __global__ void rtm_gpu_kernel_all_shared(int it,int nt, int nz, int nx, float * g_ex_Vx0, float * g_ex_Vz0, float * g_ex_sigmaxx0, float * g_ex_sigmazz0, float * g_ex_sigmaxz0, //(nz, nx, nt) float * g_ex_m1_x,float * g_ex_m1_z,float * g_ex_aux_m2_c, float * g_ex_aux_m3_c, float * g_ex_aux_m2m3_c)//(nz+10, nx+10) { float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0; //GPU thread index int iz, ix; iz = blockIdx.x*blockDim.x + threadIdx.x; ix = blockIdx.y*blockDim.y + threadIdx.y; //gt = it; __shared__ float sh_ex_aux_m2m3_c[(TZ+10)*(TX+10)]; __shared__ float sh_ex_aux_m2_c[(TZ+10)*(TX+10)]; __shared__ float sh_ex_aux_m3_c[(TZ+10)*(TX+10)]; __shared__ float sh_ex_m1_x[(TZ+10)*(TX+10)]; __shared__ float sh_ex_m1_z[(TZ+10)*(TX+10)]; __shared__ float sh_ex_Vx0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_Vz0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_sigmaxx0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_sigmazz0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_sigmaxz0[(TZ+10)*(TX+10)]; //sh_ex_aux_m2m3_c[threadIdx][]; sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_aux_m2m3_c[index_ex(iz,ix)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_aux_m2_c[index_ex(iz,ix)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_aux_m3_c[index_ex(iz,ix)]; sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_m1_x[index_ex(iz,ix)]; sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_m1_z[index_ex(iz,ix)]; sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_Vx0[index3d_ex(iz,ix,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_Vz0[index3d_ex(iz,ix,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_sigmaxx0[index3d_ex(iz,ix,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_sigmazz0[index3d_ex(iz,ix,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_sigmaxz0[index3d_ex(iz,ix,it+1)]; if(threadIdx.x<5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_aux_m2m3_c[index_ex(iz-5,ix)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_aux_m2_c[index_ex(iz-5,ix)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_aux_m3_c[index_ex(iz-5,ix)]; sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_m1_x[index_ex(iz-5,ix)]; sh_ex_m1_z[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_m1_z[index_ex(iz-5,ix)]; sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_Vx0[index3d_ex(iz-5,ix,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_Vz0[index3d_ex(iz-5,ix,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_sigmaxx0[index3d_ex(iz-5,ix,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_sigmazz0[index3d_ex(iz-5,ix,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_sigmaxz0[index3d_ex(iz-5,ix,it+1)]; } if(threadIdx.x>=TZ-5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_aux_m2m3_c[index_ex(iz+5,ix)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_aux_m2_c[index_ex(iz+5,ix)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_aux_m3_c[index_ex(iz+5,ix)]; sh_ex_m1_x[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_m1_x[index_ex(iz+5,ix)]; sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_m1_z[index_ex(iz+5,ix)]; sh_ex_Vx0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_Vx0[index3d_ex(iz+5,ix,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_Vz0[index3d_ex(iz+5,ix,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_sigmaxx0[index3d_ex(iz+5,ix,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_sigmazz0[index3d_ex(iz+5,ix,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_sigmaxz0[index3d_ex(iz+5,ix,it+1)]; } if(threadIdx.y<5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_aux_m2m3_c[index_ex(iz,ix-5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_aux_m2_c[index_ex(iz,ix-5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_aux_m3_c[index_ex(iz,ix-5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_m1_x[index_ex(iz,ix-5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_m1_z[index_ex(iz,ix-5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_Vx0[index3d_ex(iz,ix-5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_Vz0[index3d_ex(iz,ix-5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_sigmaxx0[index3d_ex(iz,ix-5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_sigmazz0[index3d_ex(iz,ix-5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_sigmaxz0[index3d_ex(iz,ix-5,it+1)]; } if(threadIdx.y>=TX-5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_aux_m2m3_c[index_ex(iz,ix+5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_aux_m2_c[index_ex(iz,ix+5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_aux_m3_c[index_ex(iz,ix+5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_m1_x[index_ex(iz,ix+5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_m1_z[index_ex(iz,ix+5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_Vx0[index3d_ex(iz,ix+5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_Vz0[index3d_ex(iz,ix+5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_sigmaxx0[index3d_ex(iz,ix+5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_sigmazz0[index3d_ex(iz,ix+5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_sigmaxz0[index3d_ex(iz,ix+5,it+1)]; } if(threadIdx.x <5 && threadIdx.y <5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_aux_m2m3_c[index_ex(iz-5,ix-5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_aux_m2_c[index_ex(iz-5,ix-5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_aux_m3_c[index_ex(iz-5,ix-5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_m1_x[index_ex(iz-5,ix-5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_m1_z[index_ex(iz-5,ix-5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_Vx0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_Vz0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_sigmaxx0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_sigmazz0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_sigmaxz0[index3d_ex(iz-5,ix-5,it+1)]; } if(threadIdx.x >= 5+TZ && threadIdx.y >= 5+TX){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_aux_m2m3_c[index_ex(iz+5,ix+5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_aux_m2_c[index_ex(iz+5,ix+5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_aux_m3_c[index_ex(iz+5,ix+5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_m1_x[index_ex(iz+5,ix+5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_m1_z[index_ex(iz+5,ix+5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_Vx0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_Vz0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_sigmaxx0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_sigmazz0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_sigmaxz0[index3d_ex(iz+5,ix+5,it+1)]; } if(threadIdx.x >= TZ+5 && threadIdx.y <5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_aux_m2m3_c[index_ex(iz+5,ix-5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_aux_m2_c[index_ex(iz+5,ix-5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_aux_m3_c[index_ex(iz+5,ix-5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_m1_x[index_ex(iz+5,ix-5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_m1_z[index_ex(iz+5,ix-5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_Vx0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_Vz0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_sigmaxx0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_sigmazz0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_sigmaxz0[index3d_ex(iz+5,ix-5,it+1)]; } if(threadIdx.x <5 && threadIdx.y >= TX-5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_aux_m2m3_c[index_ex(iz-5,ix+5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_aux_m2_c[index_ex(iz-5,ix+5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_aux_m3_c[index_ex(iz-5,ix+5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_m1_x[index_ex(iz-5,ix+5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_m1_z[index_ex(iz-5,ix+5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_Vx0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_Vz0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_sigmaxx0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_sigmazz0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_sigmaxz0[index3d_ex(iz-5,ix+5,it+1)]; } __syncthreads(); g_ex_Vx0[index3d_ex(iz,ix ,it)] = g_ex_Vx0[index3d_ex(iz,ix ,it)] + g_ex_Vx0[index3d_ex(iz, ix, it+2)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-5)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-5)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-1)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+4)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-5)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-1)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+4)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-1,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+4,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] ; __syncthreads(); g_ex_Vz0[index3d_ex(iz,ix ,it)] = g_ex_Vz0[index3d_ex(iz,ix, it)] + g_ex_Vz0[index3d_ex(iz,ix ,it+2)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-1,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+4,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-1,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+4,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-1)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+4)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+5)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] ; g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxx0[index3d_ex(iz,ix ,it+2)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-1)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c5*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+4)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+5)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+5)] ; __syncthreads(); g_ex_sigmazz0[index3d_ex(iz,ix ,it)] = g_ex_sigmazz0[index3d_ex(iz,ix ,it)] + g_ex_sigmazz0[index3d_ex(iz,ix ,it+2)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x-1,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c5*sh_ex_Vz0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x+4,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] ; __syncthreads(); g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxz0[index3d_ex(iz,ix ,it+2)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c5*sh_ex_Vx0[index_blk_ex(threadIdx.x-1,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x+4,threadIdx.y)] //; + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-5)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c5*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-1)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+4)] ; __syncthreads(); } __global__ void rtm_gpu_kernel_l1(int it,int nt, int nz, int nx, float * g_ex_Vx0, float * g_ex_Vz0, float * g_ex_sigmaxx0, float * g_ex_sigmazz0, float * g_ex_sigmaxz0, //(nz, nx, nt) float * g_ex_m1_x,float * g_ex_m1_z,float * g_ex_aux_m2_c, float * g_ex_aux_m3_c, float * g_ex_aux_m2m3_c)//(nz+10, nx+10) { float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0; //GPU thread index int iz, ix; iz = blockIdx.x*blockDim.x + threadIdx.x; ix = blockIdx.y*blockDim.y + threadIdx.y; //gt = it; g_ex_Vx0[index3d_ex(iz,ix ,it)] = g_ex_Vx0[index3d_ex(iz,ix ,it)] + g_ex_Vx0[index3d_ex(iz, ix, it+2)] + g_ex_aux_m2m3_c[index_ex(iz,ix-5)]*c1*g_ex_sigmaxx0[index3d_ex(iz,ix-5,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-4)]*c2*g_ex_sigmaxx0[index3d_ex(iz,ix-4,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-3)]*c3*g_ex_sigmaxx0[index3d_ex(iz,ix-3,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-2)]*c4*g_ex_sigmaxx0[index3d_ex(iz,ix-2,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-1)]*c5*g_ex_sigmaxx0[index3d_ex(iz,ix-1,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix)] *c5*g_ex_sigmaxx0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+1)]*c4*g_ex_sigmaxx0[index3d_ex(iz,ix+1,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+2)]*c3*g_ex_sigmaxx0[index3d_ex(iz,ix+2,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+3)]*c2*g_ex_sigmaxx0[index3d_ex(iz,ix+3,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+4)]*c1*g_ex_sigmaxx0[index3d_ex(iz,ix+4,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-5)]*c1*g_ex_sigmazz0[index3d_ex(iz,ix-5,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-4)]*c2*g_ex_sigmazz0[index3d_ex(iz,ix-4,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-3)]*c3*g_ex_sigmazz0[index3d_ex(iz,ix-3,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-2)]*c4*g_ex_sigmazz0[index3d_ex(iz,ix-2,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-1)]*c5*g_ex_sigmazz0[index3d_ex(iz,ix-1,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix)] *c5*g_ex_sigmazz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+1)]*c4*g_ex_sigmazz0[index3d_ex(iz,ix+1,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+2)]*c3*g_ex_sigmazz0[index3d_ex(iz,ix+2,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+3)]*c2*g_ex_sigmazz0[index3d_ex(iz,ix+3,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+4)]*c1*g_ex_sigmazz0[index3d_ex(iz,ix+4,it+1)] + g_ex_aux_m3_c[index_ex(iz-4,ix)]*c1*g_ex_sigmaxz0[index3d_ex(iz-4,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz-3,ix)]*c2*g_ex_sigmaxz0[index3d_ex(iz-3,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz-2,ix)]*c3*g_ex_sigmaxz0[index3d_ex(iz-2,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz-1,ix)]*c4*g_ex_sigmaxz0[index3d_ex(iz-1,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix)] *c5*g_ex_sigmaxz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+1,ix)]*c5*g_ex_sigmaxz0[index3d_ex(iz+1,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+2,ix)]*c4*g_ex_sigmaxz0[index3d_ex(iz+2,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+3,ix)]*c3*g_ex_sigmaxz0[index3d_ex(iz+3,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+4,ix)]*c2*g_ex_sigmaxz0[index3d_ex(iz+4,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+5,ix)]*c1*g_ex_sigmaxz0[index3d_ex(iz+5,ix,it+1)] ; g_ex_Vz0[index3d_ex(iz,ix ,it)] = g_ex_Vz0[index3d_ex(iz,ix ,it)] + g_ex_Vz0[index3d_ex(iz,ix ,it+2)] + g_ex_aux_m2_c[index_ex(iz-5,ix)]*c1*g_ex_sigmaxx0[index3d_ex(iz-5,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-4,ix)]*c2*g_ex_sigmaxx0[index3d_ex(iz-4,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-3,ix)]*c3*g_ex_sigmaxx0[index3d_ex(iz-3,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-2,ix)]*c4*g_ex_sigmaxx0[index3d_ex(iz-2,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-1,ix)]*c5*g_ex_sigmaxx0[index3d_ex(iz-1,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix)] *c5*g_ex_sigmaxx0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+1,ix)]*c4*g_ex_sigmaxx0[index3d_ex(iz+1,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+2,ix)]*c3*g_ex_sigmaxx0[index3d_ex(iz+2,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+3,ix)]*c2*g_ex_sigmaxx0[index3d_ex(iz+3,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+4,ix)]*c1*g_ex_sigmaxx0[index3d_ex(iz+4,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-5,ix)]*c1*g_ex_sigmazz0[index3d_ex(iz-5,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-4,ix)]*c2*g_ex_sigmazz0[index3d_ex(iz-4,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-3,ix)]*c3*g_ex_sigmazz0[index3d_ex(iz-3,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-2,ix)]*c4*g_ex_sigmazz0[index3d_ex(iz-2,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-1,ix)]*c5*g_ex_sigmazz0[index3d_ex(iz-1,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix)] *c5*g_ex_sigmazz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+1,ix)]*c4*g_ex_sigmazz0[index3d_ex(iz+1,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+2,ix)]*c3*g_ex_sigmazz0[index3d_ex(iz+2,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+3,ix)]*c2*g_ex_sigmazz0[index3d_ex(iz+3,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+4,ix)]*c1*g_ex_sigmazz0[index3d_ex(iz+4,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-4)]*c1*g_ex_sigmaxz0[index3d_ex(iz,ix-4,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-3)]*c2*g_ex_sigmaxz0[index3d_ex(iz,ix-3,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-2)]*c3*g_ex_sigmaxz0[index3d_ex(iz,ix-2,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-1)]*c4*g_ex_sigmaxz0[index3d_ex(iz,ix-1,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix)] *c5*g_ex_sigmaxz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+1)]*c5*g_ex_sigmaxz0[index3d_ex(iz,ix+1,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+2)]*c4*g_ex_sigmaxz0[index3d_ex(iz,ix+2,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+3)]*c3*g_ex_sigmaxz0[index3d_ex(iz,ix+3,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+4)]*c2*g_ex_sigmaxz0[index3d_ex(iz,ix+4,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+5)]*c1*g_ex_sigmaxz0[index3d_ex(iz,ix+5,it+1)] ; g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxx0[index3d_ex(iz,ix ,it+2)] + g_ex_m1_x[index_ex(iz,ix-4)]*c1*g_ex_Vx0[index3d_ex(iz,ix-4,it+1)] + g_ex_m1_x[index_ex(iz,ix-3)]*c2*g_ex_Vx0[index3d_ex(iz,ix-3,it+1)] + g_ex_m1_x[index_ex(iz,ix-2)]*c3*g_ex_Vx0[index3d_ex(iz,ix-2,it+1)] + g_ex_m1_x[index_ex(iz,ix-1)]*c4*g_ex_Vx0[index3d_ex(iz,ix-1,it+1)] + g_ex_m1_x[index_ex(iz,ix)] *c5*g_ex_Vx0[index3d_ex(iz,ix,it+1)] - g_ex_m1_x[index_ex(iz,ix+1)]*c5*g_ex_Vx0[index3d_ex(iz,ix+1,it+1)] - g_ex_m1_x[index_ex(iz,ix+2)]*c4*g_ex_Vx0[index3d_ex(iz,ix+2,it+1)] - g_ex_m1_x[index_ex(iz,ix+3)]*c3*g_ex_Vx0[index3d_ex(iz,ix+3,it+1)] - g_ex_m1_x[index_ex(iz,ix+4)]*c2*g_ex_Vx0[index3d_ex(iz,ix+4,it+1)] - g_ex_m1_x[index_ex(iz,ix+5)]*c1*g_ex_Vx0[index3d_ex(iz,ix+5,it+1)] ; g_ex_sigmazz0[index3d_ex(iz,ix ,it)] = g_ex_sigmazz0[index3d_ex(iz,ix ,it)] + g_ex_sigmazz0[index3d_ex(iz,ix ,it+2)] + g_ex_m1_z[index_ex(iz-4,ix)]*c1*g_ex_Vz0[index3d_ex(iz-4,ix,it+1)] + g_ex_m1_z[index_ex(iz-3,ix)]*c2*g_ex_Vz0[index3d_ex(iz-3,ix,it+1)] + g_ex_m1_z[index_ex(iz-2,ix)]*c3*g_ex_Vz0[index3d_ex(iz-2,ix,it+1)] + g_ex_m1_z[index_ex(iz-1,ix)]*c4*g_ex_Vz0[index3d_ex(iz-1,ix,it+1)] + g_ex_m1_z[index_ex(iz,ix)] *c5*g_ex_Vz0[index3d_ex(iz,ix,it+1)] - g_ex_m1_z[index_ex(iz+1,ix)]*c5*g_ex_Vz0[index3d_ex(iz+1,ix,it+1)] - g_ex_m1_z[index_ex(iz+2,ix)]*c4*g_ex_Vz0[index3d_ex(iz+2,ix,it+1)] - g_ex_m1_z[index_ex(iz+3,ix)]*c3*g_ex_Vz0[index3d_ex(iz+3,ix,it+1)] - g_ex_m1_z[index_ex(iz+4,ix)]*c2*g_ex_Vz0[index3d_ex(iz+4,ix,it+1)] - g_ex_m1_z[index_ex(iz+5,ix)]*c1*g_ex_Vz0[index3d_ex(iz+5,ix,it+1)] ; g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxz0[index3d_ex(iz,ix ,it+2)] + g_ex_m1_x[index_ex(iz-5,ix)]*c1*g_ex_Vx0[index3d_ex(iz-5,ix,it+1)] + g_ex_m1_x[index_ex(iz-4,ix)]*c2*g_ex_Vx0[index3d_ex(iz-4,ix,it+1)] + g_ex_m1_x[index_ex(iz-3,ix)]*c3*g_ex_Vx0[index3d_ex(iz-3,ix,it+1)] + g_ex_m1_x[index_ex(iz-2,ix)]*c4*g_ex_Vx0[index3d_ex(iz-2,ix,it+1)] + g_ex_m1_x[index_ex(iz-1,ix)]*c5*g_ex_Vx0[index3d_ex(iz-1,ix,it+1)] - g_ex_m1_x[index_ex(iz,ix)] *c5*g_ex_Vx0[index3d_ex(iz,ix,it+1)] - g_ex_m1_x[index_ex(iz+1,ix)]*c4*g_ex_Vx0[index3d_ex(iz+1,ix,it+1)] - g_ex_m1_x[index_ex(iz+2,ix)]*c3*g_ex_Vx0[index3d_ex(iz+2,ix,it+1)] - g_ex_m1_x[index_ex(iz+3,ix)]*c2*g_ex_Vx0[index3d_ex(iz+3,ix,it+1)] - g_ex_m1_x[index_ex(iz+4,ix)]*c1*g_ex_Vx0[index3d_ex(iz+4,ix,it+1)] //; + g_ex_m1_z[index_ex(iz,ix-5)]*c1*g_ex_Vz0[index3d_ex(iz,ix-5,it+1)] + g_ex_m1_z[index_ex(iz,ix-4)]*c2*g_ex_Vz0[index3d_ex(iz,ix-4,it+1)] + g_ex_m1_z[index_ex(iz,ix-3)]*c3*g_ex_Vz0[index3d_ex(iz,ix-3,it+1)] + g_ex_m1_z[index_ex(iz,ix-2)]*c4*g_ex_Vz0[index3d_ex(iz,ix-2,it+1)] + g_ex_m1_z[index_ex(iz,ix-1)]*c5*g_ex_Vz0[index3d_ex(iz,ix-1,it+1)] - g_ex_m1_z[index_ex(iz,ix)] *c5*g_ex_Vz0[index3d_ex(iz,ix,it+1)] - g_ex_m1_z[index_ex(iz,ix+1)]*c4*g_ex_Vz0[index3d_ex(iz,ix+1,it+1)] - g_ex_m1_z[index_ex(iz,ix+2)]*c3*g_ex_Vz0[index3d_ex(iz,ix+2,it+1)] - g_ex_m1_z[index_ex(iz,ix+3)]*c2*g_ex_Vz0[index3d_ex(iz,ix+3,it+1)] - g_ex_m1_z[index_ex(iz,ix+4)]*c1*g_ex_Vz0[index3d_ex(iz,ix+4,it+1)] ; }
649d04fc769ef9baf3e9b50424e7f5c30deb2f27.cu
#include <stdio.h> #include "gpu.h" //extended propagation data residence in GPU device __device__ float * g_ex_Vx0_in; __device__ float * g_ex_Vz0_in; __device__ float * g_ex_Vy0_in; __device__ float * g_ex_sigmaxx0_in; __device__ float * g_ex_sigmazz0_in; __device__ float * g_ex_sigmayy0_in; __device__ float * g_ex_sigmaxy0_in; __device__ float * g_ex_sigmaxz0_in; __device__ float * g_ex_sigmayz0_in; //Time step +2 __device__ float * g_ex_Vx0_in1; __device__ float * g_ex_Vz0_in1; __device__ float * g_ex_Vy0_in1; __device__ float * g_ex_sigmaxx0_in1; __device__ float * g_ex_sigmazz0_in1; __device__ float * g_ex_sigmayy0_in1; __device__ float * g_ex_sigmaxy0_in1; __device__ float * g_ex_sigmaxz0_in1; __device__ float * g_ex_sigmayz0_in1; //time step 0 and output __device__ float * g_ex_Vx0_out; __device__ float * g_ex_Vz0_out; __device__ float * g_ex_Vy0_out; __device__ float * g_ex_sigmaxx0_out; __device__ float * g_ex_sigmazz0_out; __device__ float * g_ex_sigmayy0_out; __device__ float * g_ex_sigmaxy0_out; __device__ float * g_ex_sigmaxz0_out; __device__ float * g_ex_sigmayz0_out; //expaned arrays to store different Operators __device__ float *g_ex_m2; __device__ float *g_ex_m3; __device__ float *g_ex_m2m3; __device__ float *g_ex_m1_x; __device__ float *g_ex_m1_z; __device__ float *g_ex_m1_y; __device__ float *g_tmp; __global__ void rtm_gpu_kernel(int ny, int nz, int nx, float *g_ex_Vy0_in, float * g_ex_Vx0_in, float * g_ex_Vz0_in, float * g_ex_sigmayy0_in, float *g_ex_sigmaxx0_in, float * g_ex_sigmazz0_in, float * g_ex_sigmaxy0_in, float * g_ex_sigmaxz0_in, float * g_ex_sigmayz0_in,//(nz, nx, nt) float *g_ex_Vy0_in1, float * g_ex_Vx0_in1, float * g_ex_Vz0_in1, float * g_ex_sigmayy0_in1, float *g_ex_sigmaxx0_in1, float * g_ex_sigmazz0_in1, float * g_ex_sigmaxy0_in1, float * g_ex_sigmaxz0_in1, float * g_ex_sigmayz0_in1,//(nz, nx, nt) float *g_ex_Vy0_out, float * g_ex_Vx0_out, float * g_ex_Vz0_out, float * g_ex_sigmayy0_out, float *g_ex_sigmaxx0_out, float * g_ex_sigmazz0_out, float * g_ex_sigmaxy0_out, float * g_ex_sigmaxz0_out, float * g_ex_sigmayz0_out,//(nz, nx, nt) const float * __restrict__ g_ex_m1_y, const float * __restrict__ g_ex_m1_x, const float * __restrict__ g_ex_m1_z, const float * __restrict__ g_ex_m2, const float * __restrict__ g_ex_m3, const float * __restrict__ g_ex_m2m3);//(nz+10, nx+10) extern "C" void rtm_gpu_init(int ny, int nz, int nx) { //set cuda devices and put all data onto gpu memory cudaError_t cuda_ret; cudaError_t err; //Set Device cuda_ret = cudaSetDevice(1); if(cuda_ret != cudaSuccess){ fprintf(stderr, "Failed to Set The cuda Device !\n"); exit(0); } else{ fprintf(stderr, "GPU Device Set ====> OK\n"); } // data init //Time step +1 cudaMalloc(&g_ex_Vx0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_Vz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_Vy0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxx0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmazz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmayy0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxy0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmayz0_in, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); //Time step +2 cudaMalloc(&g_ex_Vx0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_Vz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_Vy0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxx0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmazz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmayy0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxy0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmayz0_in1, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); //time step 0 and output cudaMalloc(&g_ex_Vx0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_Vz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_Vy0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxx0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmazz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmayy0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxy0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmaxz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_sigmayz0_out, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); //expaned arrays to store different Operators cudaMalloc(&g_ex_m2, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_m2m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_m1_x, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_m1_y, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaMalloc(&g_ex_m1_z, sizeof(float)*(ny+10)*(nx+10)*(nz+10)); cudaFuncSetCacheConfig(rtm_gpu_kernel,cudaFuncCachePreferShared); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error6: %s.\n", cudaGetErrorString(err)); exit(0); }else{ fprintf(stderr,"GPU Data Init ====> OK\n"); } // data copy } extern "C" void rtm_gpu_copy_in(int ny, int nz, int nx, float *ex_Vy0_in, float * ex_Vx0_in, float * ex_Vz0_in, float * ex_sigmayy0_in, float *ex_sigmaxx0_in, float * ex_sigmazz0_in, float * ex_sigmaxy0_in, float * ex_sigmaxz0_in, float * ex_sigmayz0_in,//(nz, nx, nt) float *ex_Vy0_in1, float * ex_Vx0_in1, float * ex_Vz0_in1, float * ex_sigmayy0_in1, float *ex_sigmaxx0_in1, float * ex_sigmazz0_in1, float * ex_sigmaxy0_in1, float * ex_sigmaxz0_in1, float * ex_sigmayz0_in1,//(nz, nx, nt) float *ex_Vy0_out, float * ex_Vx0_out, float * ex_Vz0_out, float * ex_sigmayy0_out, float *ex_sigmaxx0_out, float * ex_sigmazz0_out, float * ex_sigmaxy0_out, float * ex_sigmaxz0_out, float * ex_sigmayz0_out,//(nz, nx, nt) float * ex_m1_y, float * ex_m1_x, float * ex_m1_z, float * ex_m2, float * ex_m3, float * ex_m2m3)//(nz+10, nx+10) { cudaError_t err; // data copy cudaMemcpy(g_ex_Vy0_in, ex_Vy0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vx0_in, ex_Vx0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vz0_in, ex_Vz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxx0_in, ex_sigmaxx0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmayy0_in, ex_sigmayy0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxy0_in, ex_sigmaxy0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmayz0_in, ex_sigmayz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxz0_in, ex_sigmaxz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmazz0_in, ex_sigmazz0_in, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vy0_in1, ex_Vy0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vx0_in1, ex_Vx0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vz0_in1, ex_Vz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxx0_in1, ex_sigmaxx0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmayy0_in1, ex_sigmayy0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxy0_in1, ex_sigmaxy0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmayz0_in1, ex_sigmayz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxz0_in1, ex_sigmaxz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmazz0_in1, ex_sigmazz0_in1, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vy0_out, ex_Vy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vx0_out, ex_Vx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_Vz0_out, ex_Vz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxx0_out, ex_sigmaxx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmayy0_out, ex_sigmayy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxy0_out, ex_sigmaxy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmayz0_out, ex_sigmayz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmaxz0_out, ex_sigmaxz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_sigmazz0_out, ex_sigmazz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_m1_y, ex_m1_y, sizeof(float)*(ny+10)*(nx+10)*(nz+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_m1_x, ex_m1_x, sizeof(float)*(ny+10)*(nx+10)*(nz+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_m1_z, ex_m1_z, sizeof(float)*(ny+10)*(nx+10)*(nz+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_m2, ex_m2, sizeof(float)*(ny+10)*(nx+10)*(nz+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_m3, ex_m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10), cudaMemcpyHostToDevice); cudaMemcpy(g_ex_m2m3, ex_m2m3, sizeof(float)*(ny+10)*(nx+10)*(nz+10), cudaMemcpyHostToDevice); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error2: %s.\n", cudaGetErrorString(err)); exit(0); }else{ fprintf(stderr,"Data Copy To GPU ====> OK\n"); } } extern "C" void rtm_gpu_copy_out(int ny, int nz, int nx, float *ex_Vy0_out, float * ex_Vx0_out, float * ex_Vz0_out, float * ex_sigmayy0_out, float *ex_sigmaxx0_out, float * ex_sigmazz0_out, float * ex_sigmaxy0_out, float * ex_sigmaxz0_out, float * ex_sigmayz0_out)//(nz, nx, nt) { cudaError_t err; // data copy back from GPU mem cudaMemcpy(ex_Vy0_out, g_ex_Vy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_Vx0_out, g_ex_Vx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_Vz0_out, g_ex_Vz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_sigmaxx0_out, g_ex_sigmaxx0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_sigmayy0_out, g_ex_sigmayy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_sigmaxy0_out, g_ex_sigmaxy0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_sigmaxz0_out, g_ex_sigmaxz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_sigmayz0_out, g_ex_sigmayz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); cudaMemcpy(ex_sigmazz0_out, g_ex_sigmazz0_out, sizeof(float)*(nx+10)*(nz+10)*(ny+10), cudaMemcpyDeviceToHost); //cudaMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error3: %s.\n", cudaGetErrorString(err)); exit(0); }else{ fprintf(stderr,"Data Copy To CPU ====> OK\n"); } } extern "C" void rtm_gpu_final() { //release GPU memory space cudaError_t err; cudaFree(g_ex_Vx0_in); cudaFree(g_ex_Vz0_in); cudaFree(g_ex_Vy0_in); cudaFree(g_ex_sigmaxx0_in); cudaFree(g_ex_sigmazz0_in); cudaFree(g_ex_sigmayy0_in); cudaFree(g_ex_sigmaxy0_in); cudaFree(g_ex_sigmaxz0_in); cudaFree(g_ex_sigmayz0_in); //Time step +2 cudaFree(g_ex_Vx0_in1); cudaFree(g_ex_Vz0_in1); cudaFree(g_ex_Vy0_in1); cudaFree(g_ex_sigmaxx0_in1); cudaFree(g_ex_sigmazz0_in1); cudaFree(g_ex_sigmayy0_in1); cudaFree(g_ex_sigmaxy0_in1); cudaFree(g_ex_sigmaxz0_in1); cudaFree(g_ex_sigmayz0_in1); //time step 0 and output cudaFree(g_ex_Vx0_out); cudaFree(g_ex_Vz0_out); cudaFree(g_ex_Vy0_out); cudaFree(g_ex_sigmaxx0_out); cudaFree(g_ex_sigmazz0_out); cudaFree(g_ex_sigmayy0_out); cudaFree(g_ex_sigmaxy0_out); cudaFree(g_ex_sigmaxz0_out); cudaFree(g_ex_sigmayz0_out); //expaned arrays to store different Operators cudaFree(g_ex_m2); cudaFree(g_ex_m3); cudaFree(g_ex_m2m3); cudaFree(g_ex_m1_x); cudaFree(g_ex_m1_y); cudaFree(g_ex_m1_z); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error4: %s.\n", cudaGetErrorString(err)); exit(0); }else{ fprintf(stderr,"GPU Mem Released ====> OK\n"); } } void rtm_gpu_change_pointer(){ fprintf(stderr, "GPU pointer changed\n"); g_tmp = g_ex_Vx0_out; g_ex_Vx0_out = g_ex_Vx0_in; g_ex_Vx0_in = g_tmp; g_tmp = g_ex_Vx0_out; g_ex_Vx0_out = g_ex_Vx0_in1; g_ex_Vx0_in1 = g_tmp; g_tmp = g_ex_Vz0_out; g_ex_Vz0_out = g_ex_Vz0_in; g_ex_Vz0_in = g_tmp; g_tmp = g_ex_Vz0_out; g_ex_Vz0_out = g_ex_Vz0_in1; g_ex_Vz0_in1 = g_tmp; g_tmp = g_ex_Vy0_out; g_ex_Vy0_out = g_ex_Vy0_in; g_ex_Vy0_in = g_tmp; g_tmp = g_ex_Vy0_out; g_ex_Vy0_out = g_ex_Vy0_in1; g_ex_Vy0_in1 = g_tmp; g_tmp = g_ex_sigmaxx0_out; g_ex_sigmaxx0_out = g_ex_sigmaxx0_in; g_ex_sigmaxx0_in = g_tmp; g_tmp = g_ex_sigmaxx0_out; g_ex_sigmaxx0_out = g_ex_sigmaxx0_in1; g_ex_sigmaxx0_in1 = g_tmp; g_tmp = g_ex_sigmazz0_out; g_ex_sigmazz0_out = g_ex_sigmazz0_in; g_ex_sigmazz0_in = g_tmp; g_tmp = g_ex_sigmazz0_out; g_ex_sigmazz0_out = g_ex_sigmazz0_in1; g_ex_sigmazz0_in1 = g_tmp; g_tmp = g_ex_sigmayy0_out; g_ex_sigmayy0_out = g_ex_sigmayy0_in; g_ex_sigmayy0_in = g_tmp; g_tmp = g_ex_sigmayy0_out; g_ex_sigmayy0_out = g_ex_sigmayy0_in1; g_ex_sigmayy0_in1 = g_tmp; g_tmp = g_ex_sigmaxy0_out; g_ex_sigmaxy0_out = g_ex_sigmaxy0_in; g_ex_sigmaxy0_in = g_tmp; g_tmp = g_ex_sigmaxy0_out; g_ex_sigmaxy0_out = g_ex_sigmaxy0_in1; g_ex_sigmaxy0_in1 = g_tmp; g_tmp = g_ex_sigmaxz0_out; g_ex_sigmaxz0_out = g_ex_sigmaxz0_in; g_ex_sigmaxz0_in = g_tmp; g_tmp = g_ex_sigmaxz0_out; g_ex_sigmaxz0_out = g_ex_sigmaxz0_in1; g_ex_sigmaxz0_in1 = g_tmp; g_tmp = g_ex_sigmayz0_out; g_ex_sigmayz0_out = g_ex_sigmayz0_in; g_ex_sigmayz0_in = g_tmp; g_tmp = g_ex_sigmayz0_out; g_ex_sigmayz0_out = g_ex_sigmayz0_in1; g_ex_sigmayz0_in1 = g_tmp; } __global__ void rtm_gpu_kernel(int ny, int nz, int nx, float *g_ex_Vy0_in, float * g_ex_Vx0_in, float * g_ex_Vz0_in, float * g_ex_sigmayy0_in, float *g_ex_sigmaxx0_in, float * g_ex_sigmazz0_in, float * g_ex_sigmaxy0_in, float * g_ex_sigmaxz0_in, float * g_ex_sigmayz0_in,//(nz, nx, nt) float *g_ex_Vy0_in1, float * g_ex_Vx0_in1, float * g_ex_Vz0_in1, float * g_ex_sigmayy0_in1, float *g_ex_sigmaxx0_in1, float * g_ex_sigmazz0_in1, float * g_ex_sigmaxy0_in1, float * g_ex_sigmaxz0_in1, float * g_ex_sigmayz0_in1,//(nz, nx, nt) float *g_ex_Vy0_out, float * g_ex_Vx0_out, float * g_ex_Vz0_out, float * g_ex_sigmayy0_out, float *g_ex_sigmaxx0_out, float * g_ex_sigmazz0_out, float * g_ex_sigmaxy0_out, float * g_ex_sigmaxz0_out, float * g_ex_sigmayz0_out,//(nz, nx, nt) const float * __restrict__ g_ex_m1_y, const float * __restrict__ g_ex_m1_x, const float * __restrict__ g_ex_m1_z, const float * __restrict__ g_ex_m2, const float * __restrict__ g_ex_m3, const float * __restrict__ g_ex_m2m3)//(nz+10, nx+10) { float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0; //GPU thread index int iz, ix, iy; iz = blockIdx.x*blockDim.x + threadIdx.x; ix = blockIdx.y*blockDim.y + threadIdx.y; iy = blockIdx.z*blockDim.z + threadIdx.z; //gt = it; g_ex_Vx0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_Vx0_out[n3d_index_ex(iz,ix ,iy)] + g_ex_Vx0_in1[n3d_index_ex(iz, ix, iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-5, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-5,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-4, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-3, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-2, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m2m3[n3d_index_ex(iz,ix-1, iy)]*c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix-1,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+1, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+2, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+3, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix+4, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix+4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-5, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-5,iy)] + g_ex_m2[n3d_index_ex(iz,ix-4, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-3, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m2[n3d_index_ex(iz,ix-2, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m2[n3d_index_ex(iz,ix-1, iy)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix-1,iy)] - g_ex_m2[n3d_index_ex(iz, ix, iy)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix+1, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m2[n3d_index_ex(iz,ix+2, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m2[n3d_index_ex(iz,ix+3, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m2[n3d_index_ex(iz,ix+4, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix+4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-5, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-5,iy)] + g_ex_m2[n3d_index_ex(iz,ix-4, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m2[n3d_index_ex(iz,ix-3, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m2[n3d_index_ex(iz,ix-2, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m2[n3d_index_ex(iz,ix-1, iy)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix-1,iy)] - g_ex_m2[n3d_index_ex(iz, ix, iy)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix+1, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m2[n3d_index_ex(iz,ix+2, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m2[n3d_index_ex(iz,ix+3, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m2[n3d_index_ex(iz,ix+4, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix+4,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m3[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m3[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m3[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m3[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m3[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m3[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m3[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy+5)] + g_ex_m3[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m3[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m3[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m3[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m3[n3d_index_ex(iz, ix, iy)]*c5*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_sigmaxz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m3[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m3[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m3[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m3[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz+5,ix,iy)] ; g_ex_Vy0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_Vy0_out[n3d_index_ex(iz,ix ,iy)] + g_ex_Vy0_in1[n3d_index_ex(iz, ix, iy)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-5)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-5)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-4)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-3)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-2)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m2m3[n3d_index_ex(iz,ix, iy-1)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy-1)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+1)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+2)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+3)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy+4)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy+4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-5)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-5)] + g_ex_m2[n3d_index_ex(iz,ix, iy-4)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-3)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m2[n3d_index_ex(iz,ix, iy-2)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m2[n3d_index_ex(iz,ix, iy-1)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy-1)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy+1)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m2[n3d_index_ex(iz,ix, iy+2)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m2[n3d_index_ex(iz,ix, iy+3)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m2[n3d_index_ex(iz,ix, iy+4)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy+4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-5)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-5)] + g_ex_m2[n3d_index_ex(iz,ix, iy-4)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m2[n3d_index_ex(iz,ix, iy-3)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m2[n3d_index_ex(iz,ix, iy-2)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m2[n3d_index_ex(iz,ix, iy-1)]*c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy-1)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy+1)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m2[n3d_index_ex(iz,ix, iy+2)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m2[n3d_index_ex(iz,ix, iy+3)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m2[n3d_index_ex(iz,ix, iy+4)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy+4)] + g_ex_m3[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m3[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m3[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m3[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_sigmayz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m3[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m3[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m3[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m3[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz+5,ix,iy)] + g_ex_m3[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m3[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m3[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m3[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m3[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m3[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m3[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m3[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_sigmaxy0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_Vz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_Vz0_out[n3d_index_ex(iz,ix ,iy)] + g_ex_Vz0_in1[n3d_index_ex(iz, ix, iy)] + g_ex_m2m3[n3d_index_ex(iz-5,ix, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz-5,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-4,ix, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-3,ix, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-2,ix, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m2m3[n3d_index_ex(iz-1,ix, iy)]*c5*g_ex_sigmazz0_in[n3d_index_ex(iz-1,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmazz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+1,ix, iy)]*c4*g_ex_sigmazz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+2,ix, iy)]*c3*g_ex_sigmazz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+3,ix, iy)]*c2*g_ex_sigmazz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m2m3[n3d_index_ex(iz+4,ix, iy)]*c1*g_ex_sigmazz0_in[n3d_index_ex(iz+4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-5,ix, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz-5,ix,iy)] + g_ex_m2[n3d_index_ex(iz-4,ix, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-3,ix, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m2[n3d_index_ex(iz-2,ix, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m2[n3d_index_ex(iz-1,ix, iy)]*c5*g_ex_sigmaxx0_in[n3d_index_ex(iz-1,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz+1,ix, iy)]*c4*g_ex_sigmaxx0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m2[n3d_index_ex(iz+2,ix, iy)]*c3*g_ex_sigmaxx0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m2[n3d_index_ex(iz+3,ix, iy)]*c2*g_ex_sigmaxx0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m2[n3d_index_ex(iz+4,ix, iy)]*c1*g_ex_sigmaxx0_in[n3d_index_ex(iz+4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-5,ix, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz-5,ix,iy)] + g_ex_m2[n3d_index_ex(iz-4,ix, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m2[n3d_index_ex(iz-3,ix, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m2[n3d_index_ex(iz-2,ix, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m2[n3d_index_ex(iz-1,ix, iy)]*c5*g_ex_sigmayy0_in[n3d_index_ex(iz-1,ix,iy)] - g_ex_m2[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m2[n3d_index_ex(iz+1,ix, iy)]*c4*g_ex_sigmayy0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m2[n3d_index_ex(iz+2,ix, iy)]*c3*g_ex_sigmayy0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m2[n3d_index_ex(iz+3,ix, iy)]*c2*g_ex_sigmayy0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m2[n3d_index_ex(iz+4,ix, iy)]*c1*g_ex_sigmayy0_in[n3d_index_ex(iz+4,ix,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m3[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m3[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m3[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m3[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m3[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m3[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m3[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_sigmayz0_in[n3d_index_ex(iz,ix,iy+5)] + g_ex_m3[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m3[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m3[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m3[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m3[n3d_index_ex(iz,ix, iy)] *c5*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m3[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m3[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m3[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m3[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m3[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_sigmaxz0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_sigmaxx0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmaxx0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmaxx0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_Vx0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_sigmayy0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmayy0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmayy0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m1_y[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy+5)] ; g_ex_sigmazz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmazz0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmazz0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_z[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_Vz0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz+5,ix,iy)] ; g_ex_sigmaxy0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmaxy0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmaxy0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_Vy0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m1_y[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz,ix+5,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m1_x[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy+5)] ; g_ex_sigmaxz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmaxz0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmaxz0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_x[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m1_x[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vx0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_Vx0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_Vx0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_Vx0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_Vx0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m1_x[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_Vx0_in[n3d_index_ex(iz+5,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-4, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix-4,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-3, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix-3,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-2, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix-2,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix-1, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix-1,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+1, iy)]*c5*g_ex_Vz0_in[n3d_index_ex(iz,ix+1,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+2, iy)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix+2,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+3, iy)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix+3,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+4, iy)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix+4,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix+5, iy)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix+5,iy)] ; g_ex_sigmayz0_out[n3d_index_ex(iz,ix ,iy)] = g_ex_sigmayz0_out[n3d_index_ex(iz,ix , iy)] + g_ex_sigmayz0_in1[n3d_index_ex(iz,ix , iy)] + g_ex_m1_y[n3d_index_ex(iz-4,ix, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz-4,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz-3,ix, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz-3,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz-2,ix, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz-2,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz-1,ix, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz-1,ix,iy)] + g_ex_m1_y[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vy0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+1,ix, iy)]*c5*g_ex_Vy0_in[n3d_index_ex(iz+1,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+2,ix, iy)]*c4*g_ex_Vy0_in[n3d_index_ex(iz+2,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+3,ix, iy)]*c3*g_ex_Vy0_in[n3d_index_ex(iz+3,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+4,ix, iy)]*c2*g_ex_Vy0_in[n3d_index_ex(iz+4,ix,iy)] - g_ex_m1_y[n3d_index_ex(iz+5,ix, iy)]*c1*g_ex_Vy0_in[n3d_index_ex(iz+5,ix,iy)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-4)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-4)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-3)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-3)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-2)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-2)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy-1)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy-1)] + g_ex_m1_z[n3d_index_ex(iz,ix, iy)] *c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+1)]*c5*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+1)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+2)]*c4*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+2)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+3)]*c3*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+3)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+4)]*c2*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+4)] - g_ex_m1_z[n3d_index_ex(iz,ix, iy+5)]*c1*g_ex_Vz0_in[n3d_index_ex(iz,ix,iy+5)] ; } extern "C" void rtm_gpu_func(int ny, int nz, int nx, float *ex_Vy0_in, float * ex_Vx0_in, float * ex_Vz0_in, float * ex_sigmayy0_in, float *ex_sigmaxx0_in, float * ex_sigmazz0_in, float * ex_sigmaxy0_in, float * ex_sigmaxz0_in, float * ex_sigmayz0_in,//(nz, nx, nt) float *ex_Vy0_in1, float * ex_Vx0_in1, float * ex_Vz0_in1, float * ex_sigmayy0_in1, float *ex_sigmaxx0_in1, float * ex_sigmazz0_in1, float * ex_sigmaxy0_in1, float * ex_sigmaxz0_in1, float * ex_sigmayz0_in1,//(nz, nx, nt) float *ex_Vy0_out, float * ex_Vx0_out, float * ex_Vz0_out, float * ex_sigmayy0_out, float *ex_sigmaxx0_out, float * ex_sigmazz0_out, float * ex_sigmaxy0_out, float * ex_sigmaxz0_out, float * ex_sigmayz0_out,//(nz, nx, nt) float * ex_m1_y, float * ex_m1_x,float * ex_m1_z,float * ex_m2, float * ex_m3, float * ex_m2m3,//)//(nz+10,nx+10) float * debug, float * gpu_kernel_time) { cudaError_t err; cudaEvent_t start1, start2, start3, stop1, stop2, stop3; float elapsedTime1 = 0.0f; float elapsedTime2 = 0.0f; float elapsedTime3 = 0.0f; int g_it; cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&start3); cudaEventCreate(&stop1); cudaEventCreate(&stop2); cudaEventCreate(&stop3); //time record //data copy in cudaEventRecord(start1, 0); rtm_gpu_copy_in(ny, nz, nx, ex_Vy0_in, ex_Vx0_in, ex_Vz0_in, ex_sigmayy0_in, ex_sigmaxx0_in, ex_sigmazz0_in, ex_sigmaxy0_in, ex_sigmaxz0_in, ex_sigmayz0_in, ex_Vy0_in1, ex_Vx0_in1, ex_Vz0_in1, ex_sigmayy0_in1, ex_sigmaxx0_in1, ex_sigmazz0_in1, ex_sigmaxy0_in1, ex_sigmaxz0_in1, ex_sigmayz0_in1, ex_Vy0_out, ex_Vx0_out, ex_Vz0_out, ex_sigmayy0_out, ex_sigmaxx0_out, ex_sigmazz0_out, ex_sigmaxy0_out, ex_sigmaxz0_out, ex_sigmayz0_out, ex_m1_y, ex_m1_x, ex_m1_z, ex_m2, ex_m3, ex_m2m3); cudaEventRecord(stop1, 0); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error5: %s.\n", cudaGetErrorString(err)); exit(0); } //RTM computing dim3 dimGrid(nz/TZ, nx/TX, ny/TY); dim3 dimBlock(TZ, TX, TY); cudaEventRecord(start2, 0); fprintf(stderr,"GPU Computing ... ...(NZ=%d, NX=%d, NY=%d, TZ=%d, TX=%d, TY=%d)\n", nz, nx, ny, TZ, TX, TY); for(g_it = 0; g_it < Steps_write_back; g_it++){ fprintf(stderr, "Step %d\n", g_it); rtm_gpu_kernel<<<dimGrid, dimBlock>>>(ny, nz, nx, g_ex_Vy0_in, g_ex_Vx0_in, g_ex_Vz0_in, g_ex_sigmayy0_in, g_ex_sigmaxx0_in, g_ex_sigmazz0_in, g_ex_sigmaxy0_in, g_ex_sigmaxz0_in, g_ex_sigmayz0_in, g_ex_Vy0_in1, g_ex_Vx0_in1, g_ex_Vz0_in1, g_ex_sigmayy0_in1, g_ex_sigmaxx0_in1, g_ex_sigmazz0_in1, g_ex_sigmaxy0_in1, g_ex_sigmaxz0_in1, g_ex_sigmayz0_in1, g_ex_Vy0_out, g_ex_Vx0_out, g_ex_Vz0_out, g_ex_sigmayy0_out, g_ex_sigmaxx0_out, g_ex_sigmazz0_out, g_ex_sigmaxy0_out, g_ex_sigmaxz0_out, g_ex_sigmayz0_out, g_ex_m1_y, g_ex_m1_x, g_ex_m1_z, g_ex_m2, g_ex_m3, g_ex_m2m3); //cudaThreadSynchronize(); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error2: %s.\n", cudaGetErrorString(err)); exit(0); } if(g_it<Steps_write_back-1) rtm_gpu_change_pointer(); } cudaEventRecord(stop2, 0); //data copy out cudaEventRecord(start3, 0); rtm_gpu_copy_out(ny, nz, nx, ex_Vy0_out, ex_Vx0_out, ex_Vz0_out, ex_sigmayy0_out, ex_sigmaxx0_out, ex_sigmazz0_out, ex_sigmaxy0_out, ex_sigmaxz0_out, ex_sigmayz0_out); cudaEventRecord(stop3, 0); err = cudaGetLastError(); if(cudaSuccess != err){ fprintf(stderr, "Cuda error3: %s.\n", cudaGetErrorString(err)); } //cudaEventRecord(stop, 0); cudaEventSynchronize(stop1); cudaEventSynchronize(stop2); cudaEventSynchronize(stop3); cudaEventElapsedTime(&elapsedTime1, start1, stop1); cudaEventElapsedTime(&elapsedTime2, start2, stop2); cudaEventElapsedTime(&elapsedTime3, start3, stop3); gpu_kernel_time[0] = (float)(elapsedTime1/1000.); gpu_kernel_time[1] = (float)(elapsedTime2/1000.); gpu_kernel_time[2] = (float)(elapsedTime3/1000.); fprintf(stderr, "GPU copy in Time: %.4f\n", (float)elapsedTime1/1000.); fprintf(stderr, "GPU Comput. Time: %.4f\n", (float)elapsedTime2/1000.); fprintf(stderr, "GPU copy ot Time: %.4f\n", (float)elapsedTime3/1000.); } __global__ void rtm_gpu_kernel_all_shared(int it,int nt, int nz, int nx, float * g_ex_Vx0, float * g_ex_Vz0, float * g_ex_sigmaxx0, float * g_ex_sigmazz0, float * g_ex_sigmaxz0, //(nz, nx, nt) float * g_ex_m1_x,float * g_ex_m1_z,float * g_ex_aux_m2_c, float * g_ex_aux_m3_c, float * g_ex_aux_m2m3_c)//(nz+10, nx+10) { float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0; //GPU thread index int iz, ix; iz = blockIdx.x*blockDim.x + threadIdx.x; ix = blockIdx.y*blockDim.y + threadIdx.y; //gt = it; __shared__ float sh_ex_aux_m2m3_c[(TZ+10)*(TX+10)]; __shared__ float sh_ex_aux_m2_c[(TZ+10)*(TX+10)]; __shared__ float sh_ex_aux_m3_c[(TZ+10)*(TX+10)]; __shared__ float sh_ex_m1_x[(TZ+10)*(TX+10)]; __shared__ float sh_ex_m1_z[(TZ+10)*(TX+10)]; __shared__ float sh_ex_Vx0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_Vz0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_sigmaxx0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_sigmazz0[(TZ+10)*(TX+10)]; __shared__ float sh_ex_sigmaxz0[(TZ+10)*(TX+10)]; //sh_ex_aux_m2m3_c[threadIdx][]; sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_aux_m2m3_c[index_ex(iz,ix)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_aux_m2_c[index_ex(iz,ix)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_aux_m3_c[index_ex(iz,ix)]; sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_m1_x[index_ex(iz,ix)]; sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_m1_z[index_ex(iz,ix)]; sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_Vx0[index3d_ex(iz,ix,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_Vz0[index3d_ex(iz,ix,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_sigmaxx0[index3d_ex(iz,ix,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_sigmazz0[index3d_ex(iz,ix,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y)] = g_ex_sigmaxz0[index3d_ex(iz,ix,it+1)]; if(threadIdx.x<5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_aux_m2m3_c[index_ex(iz-5,ix)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_aux_m2_c[index_ex(iz-5,ix)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_aux_m3_c[index_ex(iz-5,ix)]; sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_m1_x[index_ex(iz-5,ix)]; sh_ex_m1_z[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_m1_z[index_ex(iz-5,ix)]; sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_Vx0[index3d_ex(iz-5,ix,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_Vz0[index3d_ex(iz-5,ix,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_sigmaxx0[index3d_ex(iz-5,ix,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_sigmazz0[index3d_ex(iz-5,ix,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] = g_ex_sigmaxz0[index3d_ex(iz-5,ix,it+1)]; } if(threadIdx.x>=TZ-5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_aux_m2m3_c[index_ex(iz+5,ix)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_aux_m2_c[index_ex(iz+5,ix)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_aux_m3_c[index_ex(iz+5,ix)]; sh_ex_m1_x[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_m1_x[index_ex(iz+5,ix)]; sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_m1_z[index_ex(iz+5,ix)]; sh_ex_Vx0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_Vx0[index3d_ex(iz+5,ix,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_Vz0[index3d_ex(iz+5,ix,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_sigmaxx0[index3d_ex(iz+5,ix,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_sigmazz0[index3d_ex(iz+5,ix,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] = g_ex_sigmaxz0[index3d_ex(iz+5,ix,it+1)]; } if(threadIdx.y<5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_aux_m2m3_c[index_ex(iz,ix-5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_aux_m2_c[index_ex(iz,ix-5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_aux_m3_c[index_ex(iz,ix-5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_m1_x[index_ex(iz,ix-5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_m1_z[index_ex(iz,ix-5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_Vx0[index3d_ex(iz,ix-5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_Vz0[index3d_ex(iz,ix-5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_sigmaxx0[index3d_ex(iz,ix-5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_sigmazz0[index3d_ex(iz,ix-5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] = g_ex_sigmaxz0[index3d_ex(iz,ix-5,it+1)]; } if(threadIdx.y>=TX-5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_aux_m2m3_c[index_ex(iz,ix+5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_aux_m2_c[index_ex(iz,ix+5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_aux_m3_c[index_ex(iz,ix+5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_m1_x[index_ex(iz,ix+5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_m1_z[index_ex(iz,ix+5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_Vx0[index3d_ex(iz,ix+5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_Vz0[index3d_ex(iz,ix+5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_sigmaxx0[index3d_ex(iz,ix+5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_sigmazz0[index3d_ex(iz,ix+5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] = g_ex_sigmaxz0[index3d_ex(iz,ix+5,it+1)]; } if(threadIdx.x <5 && threadIdx.y <5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_aux_m2m3_c[index_ex(iz-5,ix-5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_aux_m2_c[index_ex(iz-5,ix-5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_aux_m3_c[index_ex(iz-5,ix-5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_m1_x[index_ex(iz-5,ix-5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_m1_z[index_ex(iz-5,ix-5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_Vx0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_Vz0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_sigmaxx0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_sigmazz0[index3d_ex(iz-5,ix-5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-5,threadIdx.y-5)] = g_ex_sigmaxz0[index3d_ex(iz-5,ix-5,it+1)]; } if(threadIdx.x >= 5+TZ && threadIdx.y >= 5+TX){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_aux_m2m3_c[index_ex(iz+5,ix+5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_aux_m2_c[index_ex(iz+5,ix+5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_aux_m3_c[index_ex(iz+5,ix+5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_m1_x[index_ex(iz+5,ix+5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_m1_z[index_ex(iz+5,ix+5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_Vx0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_Vz0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_sigmaxx0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_sigmazz0[index3d_ex(iz+5,ix+5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y+5)] = g_ex_sigmaxz0[index3d_ex(iz+5,ix+5,it+1)]; } if(threadIdx.x >= TZ+5 && threadIdx.y <5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_aux_m2m3_c[index_ex(iz+5,ix-5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_aux_m2_c[index_ex(iz+5,ix-5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_aux_m3_c[index_ex(iz+5,ix-5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_m1_x[index_ex(iz+5,ix-5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_m1_z[index_ex(iz+5,ix-5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_Vx0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_Vz0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_sigmaxx0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_sigmazz0[index3d_ex(iz+5,ix-5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y-5)] = g_ex_sigmaxz0[index3d_ex(iz+5,ix-5,it+1)]; } if(threadIdx.x <5 && threadIdx.y >= TX-5){ sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_aux_m2m3_c[index_ex(iz-5,ix+5)]; sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_aux_m2_c[index_ex(iz-5,ix+5)]; sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_aux_m3_c[index_ex(iz-5,ix+5)]; sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_m1_x[index_ex(iz-5,ix+5)]; sh_ex_m1_z[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_m1_z[index_ex(iz-5,ix+5)]; sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_Vx0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_Vz0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_Vz0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_sigmaxx0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_sigmazz0[index3d_ex(iz-5,ix+5,it+1)]; sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-5,threadIdx.y+5)] = g_ex_sigmaxz0[index3d_ex(iz-5,ix+5,it+1)]; } __syncthreads(); g_ex_Vx0[index3d_ex(iz,ix ,it)] = g_ex_Vx0[index3d_ex(iz,ix ,it)] + g_ex_Vx0[index3d_ex(iz, ix, it+2)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-5)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-5)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y-1)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y+4)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-5)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y-1)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y+4)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x-1,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+4,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x+5,threadIdx.y)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] ; __syncthreads(); g_ex_Vz0[index3d_ex(iz,ix ,it)] = g_ex_Vz0[index3d_ex(iz,ix, it)] + g_ex_Vz0[index3d_ex(iz,ix ,it+2)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-5,threadIdx.y)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_aux_m2_c[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x-1,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c4*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c3*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c2*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_aux_m2_c[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c1*sh_ex_sigmaxx0[index_blk_ex(threadIdx.x+4,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-5,threadIdx.y)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-5,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x-1,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_sigmazz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c4*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c3*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c2*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_aux_m2m3_c[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c1*sh_ex_sigmazz0[index_blk_ex(threadIdx.x+4,threadIdx.y)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y-1)] + sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c5*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c4*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c3*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c2*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+4)] - sh_ex_aux_m3_c[index_blk_ex(threadIdx.x,threadIdx.y+5)]*c1*sh_ex_sigmaxz0[index_blk_ex(threadIdx.x,threadIdx.y+5)] ; g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxx0[index3d_ex(iz,ix ,it+2)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y-1)] + sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c5*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+4)] - sh_ex_m1_x[index_blk_ex(threadIdx.x,threadIdx.y+5)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y+5)] ; __syncthreads(); g_ex_sigmazz0[index3d_ex(iz,ix ,it)] = g_ex_sigmazz0[index3d_ex(iz,ix ,it)] + g_ex_sigmazz0[index3d_ex(iz,ix ,it+2)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x-1,threadIdx.y)] + sh_ex_m1_z[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c5*sh_ex_Vz0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x+4,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x+5,threadIdx.y)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x+5,threadIdx.y)] ; __syncthreads(); g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxz0[index3d_ex(iz,ix ,it+2)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-5,threadIdx.y)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x-5,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-4,threadIdx.y)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x-4,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-3,threadIdx.y)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x-3,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-2,threadIdx.y)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x-2,threadIdx.y)] + sh_ex_m1_x[index_blk_ex(threadIdx.x-1,threadIdx.y)]*c5*sh_ex_Vx0[index_blk_ex(threadIdx.x-1,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x, threadIdx.y)] *c5*sh_ex_Vx0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+1,threadIdx.y)]*c4*sh_ex_Vx0[index_blk_ex(threadIdx.x+1,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+2,threadIdx.y)]*c3*sh_ex_Vx0[index_blk_ex(threadIdx.x+2,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+3,threadIdx.y)]*c2*sh_ex_Vx0[index_blk_ex(threadIdx.x+3,threadIdx.y)] - sh_ex_m1_x[index_blk_ex(threadIdx.x+4,threadIdx.y)]*c1*sh_ex_Vx0[index_blk_ex(threadIdx.x+4,threadIdx.y)] //; + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-5)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-5)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-4)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-4)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-3)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-3)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-2)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-2)] + sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y-1)]*c5*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y-1)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y)] *c5*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+1)]*c4*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+1)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+2)]*c3*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+2)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+3)]*c2*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+3)] - sh_ex_m1_z[index_blk_ex(threadIdx.x,threadIdx.y+4)]*c1*sh_ex_Vz0[index_blk_ex(threadIdx.x,threadIdx.y+4)] ; __syncthreads(); } __global__ void rtm_gpu_kernel_l1(int it,int nt, int nz, int nx, float * g_ex_Vx0, float * g_ex_Vz0, float * g_ex_sigmaxx0, float * g_ex_sigmazz0, float * g_ex_sigmaxz0, //(nz, nx, nt) float * g_ex_m1_x,float * g_ex_m1_z,float * g_ex_aux_m2_c, float * g_ex_aux_m3_c, float * g_ex_aux_m2m3_c)//(nz+10, nx+10) { float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0; //GPU thread index int iz, ix; iz = blockIdx.x*blockDim.x + threadIdx.x; ix = blockIdx.y*blockDim.y + threadIdx.y; //gt = it; g_ex_Vx0[index3d_ex(iz,ix ,it)] = g_ex_Vx0[index3d_ex(iz,ix ,it)] + g_ex_Vx0[index3d_ex(iz, ix, it+2)] + g_ex_aux_m2m3_c[index_ex(iz,ix-5)]*c1*g_ex_sigmaxx0[index3d_ex(iz,ix-5,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-4)]*c2*g_ex_sigmaxx0[index3d_ex(iz,ix-4,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-3)]*c3*g_ex_sigmaxx0[index3d_ex(iz,ix-3,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-2)]*c4*g_ex_sigmaxx0[index3d_ex(iz,ix-2,it+1)] + g_ex_aux_m2m3_c[index_ex(iz,ix-1)]*c5*g_ex_sigmaxx0[index3d_ex(iz,ix-1,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix)] *c5*g_ex_sigmaxx0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+1)]*c4*g_ex_sigmaxx0[index3d_ex(iz,ix+1,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+2)]*c3*g_ex_sigmaxx0[index3d_ex(iz,ix+2,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+3)]*c2*g_ex_sigmaxx0[index3d_ex(iz,ix+3,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix+4)]*c1*g_ex_sigmaxx0[index3d_ex(iz,ix+4,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-5)]*c1*g_ex_sigmazz0[index3d_ex(iz,ix-5,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-4)]*c2*g_ex_sigmazz0[index3d_ex(iz,ix-4,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-3)]*c3*g_ex_sigmazz0[index3d_ex(iz,ix-3,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-2)]*c4*g_ex_sigmazz0[index3d_ex(iz,ix-2,it+1)] + g_ex_aux_m2_c[index_ex(iz,ix-1)]*c5*g_ex_sigmazz0[index3d_ex(iz,ix-1,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix)] *c5*g_ex_sigmazz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+1)]*c4*g_ex_sigmazz0[index3d_ex(iz,ix+1,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+2)]*c3*g_ex_sigmazz0[index3d_ex(iz,ix+2,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+3)]*c2*g_ex_sigmazz0[index3d_ex(iz,ix+3,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix+4)]*c1*g_ex_sigmazz0[index3d_ex(iz,ix+4,it+1)] + g_ex_aux_m3_c[index_ex(iz-4,ix)]*c1*g_ex_sigmaxz0[index3d_ex(iz-4,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz-3,ix)]*c2*g_ex_sigmaxz0[index3d_ex(iz-3,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz-2,ix)]*c3*g_ex_sigmaxz0[index3d_ex(iz-2,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz-1,ix)]*c4*g_ex_sigmaxz0[index3d_ex(iz-1,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix)] *c5*g_ex_sigmaxz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+1,ix)]*c5*g_ex_sigmaxz0[index3d_ex(iz+1,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+2,ix)]*c4*g_ex_sigmaxz0[index3d_ex(iz+2,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+3,ix)]*c3*g_ex_sigmaxz0[index3d_ex(iz+3,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+4,ix)]*c2*g_ex_sigmaxz0[index3d_ex(iz+4,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz+5,ix)]*c1*g_ex_sigmaxz0[index3d_ex(iz+5,ix,it+1)] ; g_ex_Vz0[index3d_ex(iz,ix ,it)] = g_ex_Vz0[index3d_ex(iz,ix ,it)] + g_ex_Vz0[index3d_ex(iz,ix ,it+2)] + g_ex_aux_m2_c[index_ex(iz-5,ix)]*c1*g_ex_sigmaxx0[index3d_ex(iz-5,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-4,ix)]*c2*g_ex_sigmaxx0[index3d_ex(iz-4,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-3,ix)]*c3*g_ex_sigmaxx0[index3d_ex(iz-3,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-2,ix)]*c4*g_ex_sigmaxx0[index3d_ex(iz-2,ix,it+1)] + g_ex_aux_m2_c[index_ex(iz-1,ix)]*c5*g_ex_sigmaxx0[index3d_ex(iz-1,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz,ix)] *c5*g_ex_sigmaxx0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+1,ix)]*c4*g_ex_sigmaxx0[index3d_ex(iz+1,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+2,ix)]*c3*g_ex_sigmaxx0[index3d_ex(iz+2,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+3,ix)]*c2*g_ex_sigmaxx0[index3d_ex(iz+3,ix,it+1)] - g_ex_aux_m2_c[index_ex(iz+4,ix)]*c1*g_ex_sigmaxx0[index3d_ex(iz+4,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-5,ix)]*c1*g_ex_sigmazz0[index3d_ex(iz-5,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-4,ix)]*c2*g_ex_sigmazz0[index3d_ex(iz-4,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-3,ix)]*c3*g_ex_sigmazz0[index3d_ex(iz-3,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-2,ix)]*c4*g_ex_sigmazz0[index3d_ex(iz-2,ix,it+1)] + g_ex_aux_m2m3_c[index_ex(iz-1,ix)]*c5*g_ex_sigmazz0[index3d_ex(iz-1,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz,ix)] *c5*g_ex_sigmazz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+1,ix)]*c4*g_ex_sigmazz0[index3d_ex(iz+1,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+2,ix)]*c3*g_ex_sigmazz0[index3d_ex(iz+2,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+3,ix)]*c2*g_ex_sigmazz0[index3d_ex(iz+3,ix,it+1)] - g_ex_aux_m2m3_c[index_ex(iz+4,ix)]*c1*g_ex_sigmazz0[index3d_ex(iz+4,ix,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-4)]*c1*g_ex_sigmaxz0[index3d_ex(iz,ix-4,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-3)]*c2*g_ex_sigmaxz0[index3d_ex(iz,ix-3,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-2)]*c3*g_ex_sigmaxz0[index3d_ex(iz,ix-2,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix-1)]*c4*g_ex_sigmaxz0[index3d_ex(iz,ix-1,it+1)] + g_ex_aux_m3_c[index_ex(iz,ix)] *c5*g_ex_sigmaxz0[index3d_ex(iz,ix,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+1)]*c5*g_ex_sigmaxz0[index3d_ex(iz,ix+1,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+2)]*c4*g_ex_sigmaxz0[index3d_ex(iz,ix+2,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+3)]*c3*g_ex_sigmaxz0[index3d_ex(iz,ix+3,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+4)]*c2*g_ex_sigmaxz0[index3d_ex(iz,ix+4,it+1)] - g_ex_aux_m3_c[index_ex(iz,ix+5)]*c1*g_ex_sigmaxz0[index3d_ex(iz,ix+5,it+1)] ; g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxx0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxx0[index3d_ex(iz,ix ,it+2)] + g_ex_m1_x[index_ex(iz,ix-4)]*c1*g_ex_Vx0[index3d_ex(iz,ix-4,it+1)] + g_ex_m1_x[index_ex(iz,ix-3)]*c2*g_ex_Vx0[index3d_ex(iz,ix-3,it+1)] + g_ex_m1_x[index_ex(iz,ix-2)]*c3*g_ex_Vx0[index3d_ex(iz,ix-2,it+1)] + g_ex_m1_x[index_ex(iz,ix-1)]*c4*g_ex_Vx0[index3d_ex(iz,ix-1,it+1)] + g_ex_m1_x[index_ex(iz,ix)] *c5*g_ex_Vx0[index3d_ex(iz,ix,it+1)] - g_ex_m1_x[index_ex(iz,ix+1)]*c5*g_ex_Vx0[index3d_ex(iz,ix+1,it+1)] - g_ex_m1_x[index_ex(iz,ix+2)]*c4*g_ex_Vx0[index3d_ex(iz,ix+2,it+1)] - g_ex_m1_x[index_ex(iz,ix+3)]*c3*g_ex_Vx0[index3d_ex(iz,ix+3,it+1)] - g_ex_m1_x[index_ex(iz,ix+4)]*c2*g_ex_Vx0[index3d_ex(iz,ix+4,it+1)] - g_ex_m1_x[index_ex(iz,ix+5)]*c1*g_ex_Vx0[index3d_ex(iz,ix+5,it+1)] ; g_ex_sigmazz0[index3d_ex(iz,ix ,it)] = g_ex_sigmazz0[index3d_ex(iz,ix ,it)] + g_ex_sigmazz0[index3d_ex(iz,ix ,it+2)] + g_ex_m1_z[index_ex(iz-4,ix)]*c1*g_ex_Vz0[index3d_ex(iz-4,ix,it+1)] + g_ex_m1_z[index_ex(iz-3,ix)]*c2*g_ex_Vz0[index3d_ex(iz-3,ix,it+1)] + g_ex_m1_z[index_ex(iz-2,ix)]*c3*g_ex_Vz0[index3d_ex(iz-2,ix,it+1)] + g_ex_m1_z[index_ex(iz-1,ix)]*c4*g_ex_Vz0[index3d_ex(iz-1,ix,it+1)] + g_ex_m1_z[index_ex(iz,ix)] *c5*g_ex_Vz0[index3d_ex(iz,ix,it+1)] - g_ex_m1_z[index_ex(iz+1,ix)]*c5*g_ex_Vz0[index3d_ex(iz+1,ix,it+1)] - g_ex_m1_z[index_ex(iz+2,ix)]*c4*g_ex_Vz0[index3d_ex(iz+2,ix,it+1)] - g_ex_m1_z[index_ex(iz+3,ix)]*c3*g_ex_Vz0[index3d_ex(iz+3,ix,it+1)] - g_ex_m1_z[index_ex(iz+4,ix)]*c2*g_ex_Vz0[index3d_ex(iz+4,ix,it+1)] - g_ex_m1_z[index_ex(iz+5,ix)]*c1*g_ex_Vz0[index3d_ex(iz+5,ix,it+1)] ; g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] = g_ex_sigmaxz0[index3d_ex(iz,ix ,it)] + g_ex_sigmaxz0[index3d_ex(iz,ix ,it+2)] + g_ex_m1_x[index_ex(iz-5,ix)]*c1*g_ex_Vx0[index3d_ex(iz-5,ix,it+1)] + g_ex_m1_x[index_ex(iz-4,ix)]*c2*g_ex_Vx0[index3d_ex(iz-4,ix,it+1)] + g_ex_m1_x[index_ex(iz-3,ix)]*c3*g_ex_Vx0[index3d_ex(iz-3,ix,it+1)] + g_ex_m1_x[index_ex(iz-2,ix)]*c4*g_ex_Vx0[index3d_ex(iz-2,ix,it+1)] + g_ex_m1_x[index_ex(iz-1,ix)]*c5*g_ex_Vx0[index3d_ex(iz-1,ix,it+1)] - g_ex_m1_x[index_ex(iz,ix)] *c5*g_ex_Vx0[index3d_ex(iz,ix,it+1)] - g_ex_m1_x[index_ex(iz+1,ix)]*c4*g_ex_Vx0[index3d_ex(iz+1,ix,it+1)] - g_ex_m1_x[index_ex(iz+2,ix)]*c3*g_ex_Vx0[index3d_ex(iz+2,ix,it+1)] - g_ex_m1_x[index_ex(iz+3,ix)]*c2*g_ex_Vx0[index3d_ex(iz+3,ix,it+1)] - g_ex_m1_x[index_ex(iz+4,ix)]*c1*g_ex_Vx0[index3d_ex(iz+4,ix,it+1)] //; + g_ex_m1_z[index_ex(iz,ix-5)]*c1*g_ex_Vz0[index3d_ex(iz,ix-5,it+1)] + g_ex_m1_z[index_ex(iz,ix-4)]*c2*g_ex_Vz0[index3d_ex(iz,ix-4,it+1)] + g_ex_m1_z[index_ex(iz,ix-3)]*c3*g_ex_Vz0[index3d_ex(iz,ix-3,it+1)] + g_ex_m1_z[index_ex(iz,ix-2)]*c4*g_ex_Vz0[index3d_ex(iz,ix-2,it+1)] + g_ex_m1_z[index_ex(iz,ix-1)]*c5*g_ex_Vz0[index3d_ex(iz,ix-1,it+1)] - g_ex_m1_z[index_ex(iz,ix)] *c5*g_ex_Vz0[index3d_ex(iz,ix,it+1)] - g_ex_m1_z[index_ex(iz,ix+1)]*c4*g_ex_Vz0[index3d_ex(iz,ix+1,it+1)] - g_ex_m1_z[index_ex(iz,ix+2)]*c3*g_ex_Vz0[index3d_ex(iz,ix+2,it+1)] - g_ex_m1_z[index_ex(iz,ix+3)]*c2*g_ex_Vz0[index3d_ex(iz,ix+3,it+1)] - g_ex_m1_z[index_ex(iz,ix+4)]*c1*g_ex_Vz0[index3d_ex(iz,ix+4,it+1)] ; }
2b5ef91f8a2b9a4518ff76e11d2efdfed03abe60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2020 Jaslo Ziska * * This file is part of cuda-wrapper. * * This software may be modified and distributed under the terms of the * 3-clause BSD license. See accompanying file LICENSE for details. */ #include <cuda_wrapper/cuda_wrapper.hpp> __global__ void __kernel_simple() {} __global__ void __kernel_add(double const* d_a, double const* d_b, double* d_c) { unsigned int gid = threadIdx.x + blockIdx.x * blockDim.x; d_c[gid] = d_a[gid] + d_b[gid]; } cuda::function<void ()> kernel_simple(__kernel_simple); cuda::function<void (double const*, double const*, double*)> kernel_add(__kernel_add);
2b5ef91f8a2b9a4518ff76e11d2efdfed03abe60.cu
/* * Copyright (C) 2020 Jaslo Ziska * * This file is part of cuda-wrapper. * * This software may be modified and distributed under the terms of the * 3-clause BSD license. See accompanying file LICENSE for details. */ #include <cuda_wrapper/cuda_wrapper.hpp> __global__ void __kernel_simple() {} __global__ void __kernel_add(double const* d_a, double const* d_b, double* d_c) { unsigned int gid = threadIdx.x + blockIdx.x * blockDim.x; d_c[gid] = d_a[gid] + d_b[gid]; } cuda::function<void ()> kernel_simple(__kernel_simple); cuda::function<void (double const*, double const*, double*)> kernel_add(__kernel_add);
3c110959f741afa8f7e372fb052e58f5bd440506.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<math.h> #define N 512 __global__ void Sum (int *a,int *o) { int tid = blockDim.x*blockIdx.x+threadIdx.x; for(int i = N/2; i > 0; i = i/2) { if(tid < i) { a[tid]+=a[tid+i]; } } o[0] = a[0]; } __global__ void standardDeviation(int *a,int avg) { int tid = blockDim.x*blockIdx.x+threadIdx.x; if(tid<N) { a[tid] -= avg; a[tid] = a[tid]*a[tid]; } } int main() { int *h_a,*d_a,*o_a,*oh_a,*d_a1; int size = N*sizeof(int); h_a = (int *)malloc(size); oh_a = (int *)malloc(size); hipMalloc((void**)&d_a,size); hipMalloc((void**)&o_a,size); //new hipMalloc((void**)&d_a1,size); for(int i = 1; i <= N; i++) { h_a[i-1] = i; } hipMemcpy(d_a,h_a,size,hipMemcpyHostToDevice); hipMemcpy(d_a1,h_a,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( Sum), dim3(1),dim3(N/2), 0, 0, d_a,o_a); hipDeviceSynchronize(); hipMemcpy(oh_a,o_a,size,hipMemcpyDeviceToHost); int arithmetcMean = oh_a[0]/N; hipLaunchKernelGGL(( standardDeviation), dim3(1),dim3(N), 0, 0, d_a1,arithmetcMean); hipLaunchKernelGGL(( Sum), dim3(1),dim3(N/2), 0, 0, d_a1,o_a); hipDeviceSynchronize(); hipMemcpy(oh_a,o_a,size,hipMemcpyDeviceToHost); int tmp = oh_a[0]/N; printf("Standard Deviation is - %.2f\n", sqrt(tmp)); hipFree(d_a); free(h_a); hipFree(o_a); free(oh_a); hipFree(d_a1); return 0; }
3c110959f741afa8f7e372fb052e58f5bd440506.cu
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<math.h> #define N 512 __global__ void Sum (int *a,int *o) { int tid = blockDim.x*blockIdx.x+threadIdx.x; for(int i = N/2; i > 0; i = i/2) { if(tid < i) { a[tid]+=a[tid+i]; } } o[0] = a[0]; } __global__ void standardDeviation(int *a,int avg) { int tid = blockDim.x*blockIdx.x+threadIdx.x; if(tid<N) { a[tid] -= avg; a[tid] = a[tid]*a[tid]; } } int main() { int *h_a,*d_a,*o_a,*oh_a,*d_a1; int size = N*sizeof(int); h_a = (int *)malloc(size); oh_a = (int *)malloc(size); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&o_a,size); //new cudaMalloc((void**)&d_a1,size); for(int i = 1; i <= N; i++) { h_a[i-1] = i; } cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_a1,h_a,size,cudaMemcpyHostToDevice); Sum<<<1,N/2>>>(d_a,o_a); cudaDeviceSynchronize(); cudaMemcpy(oh_a,o_a,size,cudaMemcpyDeviceToHost); int arithmetcMean = oh_a[0]/N; standardDeviation<<<1,N>>>(d_a1,arithmetcMean); Sum<<<1,N/2>>>(d_a1,o_a); cudaDeviceSynchronize(); cudaMemcpy(oh_a,o_a,size,cudaMemcpyDeviceToHost); int tmp = oh_a[0]/N; printf("Standard Deviation is - %.2f\n", sqrt(tmp)); cudaFree(d_a); free(h_a); cudaFree(o_a); free(oh_a); cudaFree(d_a1); return 0; }
aa51871b0dcbdf7028a4fd947ecb9ddd2e5d0cfa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_device_functions.cuh" #include "paddle/legacy/utils/Logging.h" __global__ void KeMaxSequenceForward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { int dimIdx = threadIdx.x; int sequenceId = blockIdx.x; if (sequenceId >= numSequences) return; int start = sequence[sequenceId]; int end = sequence[sequenceId + 1]; for (int i = dimIdx; i < dim; i += blockDim.x) { real tmp = -HL_FLOAT_MAX; int tmpId = -1; for (int insId = start; insId < end; insId++) { if (tmp < input[insId * dim + i]) { tmp = input[insId * dim + i]; tmpId = insId; } } output[sequenceId * dim + i] = tmp; index[sequenceId * dim + i] = tmpId; } } void hl_max_sequence_forward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { CHECK_NOTNULL(input); CHECK_NOTNULL(sequence); CHECK_NOTNULL(output); CHECK_NOTNULL(index); dim3 threads(256, 1); dim3 grid(numSequences, 1); hipLaunchKernelGGL(( KeMaxSequenceForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, input, sequence, output, index, numSequences, dim); CHECK_SYNC("hl_max_sequence_forward failed"); } __global__ void KeMaxSequenceBackward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int colIdx = idx % dim; if (idx < numSequences * dim) { int insId = index[idx]; inputGrad[insId * dim + colIdx] += outputGrad[idx]; } } void hl_max_sequence_backward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { CHECK_NOTNULL(outputGrad); CHECK_NOTNULL(index); CHECK_NOTNULL(inputGrad); unsigned int blocks = (numSequences * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KeMaxSequenceBackward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, outputGrad, index, inputGrad, numSequences, dim); CHECK_SYNC("hl_max_sequence_backward failed"); } template <int blockDimX, int blockDimY, int gridDimX, bool AddRow> __global__ void KeMatrixAddRows(real* output, real* table, int* ids, int numSamples, int tableSize, int dim) { int idx = threadIdx.x; int idy = threadIdx.y; int sampleId = blockIdx.x + idy * gridDimX; while (sampleId < numSamples) { int tableId = ids[sampleId]; if ((0 <= tableId) && (tableId < tableSize)) { real* outputData = output + sampleId * dim; real* tableData = table + tableId * dim; for (int i = idx; i < dim; i += blockDimX) { if (AddRow == 0) { outputData[i] += tableData[i]; } else { paddle::paddleAtomicAdd(&tableData[i], outputData[i]); } } } sampleId += blockDimY * gridDimX; } } template <int blockDimX, int blockDimY, int gridDimX, bool seq2batch, bool isAdd> __global__ void KeSequence2Batch(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * gridDimX; while (id < batchCount) { int seqId = batchIndex[id]; real* batchData = batch + id * seqWidth; real* seqData = sequence + seqId * seqWidth; for (int i = idx; i < seqWidth; i += blockDimX) { if (seq2batch) { if (isAdd) { batchData[i] += seqData[i]; } else { batchData[i] = seqData[i]; } } else { if (isAdd) { seqData[i] += batchData[i]; } else { seqData[i] = batchData[i]; } } } id += blockDimY * gridDimX; } } void hl_sequence2batch_copy(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 1, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } else { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 0, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_copy failed"); } void hl_sequence2batch_add(real* batch, real* sequence, int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 1, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } else { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 0, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_add failed"); } template <bool normByTimes, bool seq2batch> __global__ void KeSequence2BatchPadding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences) { int batchIdx = blockIdx.y; int sequenceStart = sequenceStartPositions[batchIdx]; int sequenceLength = sequenceStartPositions[batchIdx + 1] - sequenceStart; int sequenceIdx = blockIdx.x * blockDim.y + threadIdx.y; int batchBaseIdx = (sequenceIdx * numSequences + batchIdx) * sequenceWidth; int sequenceBaseIdx = (sequenceStart + sequenceIdx) * sequenceWidth; real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f; if (sequenceIdx < sequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = scale * sequence[sequenceBaseIdx + i]; } } else { /* batch -> sequence */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { sequence[sequenceBaseIdx + i] = scale * batch[batchBaseIdx + i]; } } } else if (sequenceIdx < maxSequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = 0; } } } } void hl_sequence2batch_copy_padding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences, bool normByTimes, bool seq2batch) { CHECK_NOTNULL(batch); CHECK_NOTNULL(sequence); CHECK_NOTNULL(sequenceStartPositions); if (!normByTimes && numSequences == 1) { size_t elementCount = maxSequenceLength * sequenceWidth; if (seq2batch) { /* sequence -> batch */ hl_memcpy_device2device(batch, sequence, sizeof(real) * elementCount); } else { /* batch -> sequence */ hl_memcpy_device2device(sequence, batch, sizeof(real) * elementCount); } return; } const int CUDA_BLOCK_SIZE = 512; /* At least use 32 threads to copy sequenceWidth elements, and at least 8 elements for each thread. */ int blockDimX = ((((sequenceWidth + 7) >> 3) + 31) >> 5) << 5; blockDimX = (blockDimX < CUDA_BLOCK_SIZE) ? blockDimX : CUDA_BLOCK_SIZE; int blockDimY = CUDA_BLOCK_SIZE / blockDimX; dim3 threads(blockDimX, blockDimY); int gridDimX = (maxSequenceLength + blockDimY - 1) / blockDimY; int gridDimY = numSequences; dim3 grid(gridDimX, gridDimY); if (seq2batch) { /* sequence -> batch */ if (normByTimes) { hipLaunchKernelGGL(( KeSequence2BatchPadding<1, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { hipLaunchKernelGGL(( KeSequence2BatchPadding<0, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } else { /* batch -> sequence */ if (normByTimes) { hipLaunchKernelGGL(( KeSequence2BatchPadding<1, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { hipLaunchKernelGGL(( KeSequence2BatchPadding<0, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } CHECK_SYNC("hl_sequence2batch_copy_padding failed"); } __device__ inline float my_rsqrt(float x) { return rsqrtf(x); } __device__ inline double my_rsqrt(double x) { return rsqrt(x); } __global__ void KeSequenceAvgForward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real sum = 0.0; for (int i = start; i < end; i++) { sum += src[i * width + col]; } sum = mode == 1 ? sum : (mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength)); dst[gid] += sum; } } void hl_sequence_avg_forward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_forward!"; hipLaunchKernelGGL(( KeSequenceAvgForward), dim3(grid), dim3(block), 0, STREAM_DEFAULT, dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_forward failed"); } __global__ void KeSequenceAvgBackward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real grad = src[gid]; grad = mode == 1 ? grad : (mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength)); for (int i = start; i < end; i++) { dst[i * width + col] += grad; } } } void hl_sequence_avg_backward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_backward!"; hipLaunchKernelGGL(( KeSequenceAvgBackward), dim3(grid), dim3(block), 0, STREAM_DEFAULT, dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_backward failed"); }
aa51871b0dcbdf7028a4fd947ecb9ddd2e5d0cfa.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_device_functions.cuh" #include "paddle/legacy/utils/Logging.h" __global__ void KeMaxSequenceForward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { int dimIdx = threadIdx.x; int sequenceId = blockIdx.x; if (sequenceId >= numSequences) return; int start = sequence[sequenceId]; int end = sequence[sequenceId + 1]; for (int i = dimIdx; i < dim; i += blockDim.x) { real tmp = -HL_FLOAT_MAX; int tmpId = -1; for (int insId = start; insId < end; insId++) { if (tmp < input[insId * dim + i]) { tmp = input[insId * dim + i]; tmpId = insId; } } output[sequenceId * dim + i] = tmp; index[sequenceId * dim + i] = tmpId; } } void hl_max_sequence_forward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { CHECK_NOTNULL(input); CHECK_NOTNULL(sequence); CHECK_NOTNULL(output); CHECK_NOTNULL(index); dim3 threads(256, 1); dim3 grid(numSequences, 1); KeMaxSequenceForward<<<grid, threads, 0, STREAM_DEFAULT>>>( input, sequence, output, index, numSequences, dim); CHECK_SYNC("hl_max_sequence_forward failed"); } __global__ void KeMaxSequenceBackward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int colIdx = idx % dim; if (idx < numSequences * dim) { int insId = index[idx]; inputGrad[insId * dim + colIdx] += outputGrad[idx]; } } void hl_max_sequence_backward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { CHECK_NOTNULL(outputGrad); CHECK_NOTNULL(index); CHECK_NOTNULL(inputGrad); unsigned int blocks = (numSequences * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); KeMaxSequenceBackward<<<grid, threads, 0, STREAM_DEFAULT>>>( outputGrad, index, inputGrad, numSequences, dim); CHECK_SYNC("hl_max_sequence_backward failed"); } template <int blockDimX, int blockDimY, int gridDimX, bool AddRow> __global__ void KeMatrixAddRows(real* output, real* table, int* ids, int numSamples, int tableSize, int dim) { int idx = threadIdx.x; int idy = threadIdx.y; int sampleId = blockIdx.x + idy * gridDimX; while (sampleId < numSamples) { int tableId = ids[sampleId]; if ((0 <= tableId) && (tableId < tableSize)) { real* outputData = output + sampleId * dim; real* tableData = table + tableId * dim; for (int i = idx; i < dim; i += blockDimX) { if (AddRow == 0) { outputData[i] += tableData[i]; } else { paddle::paddleAtomicAdd(&tableData[i], outputData[i]); } } } sampleId += blockDimY * gridDimX; } } template <int blockDimX, int blockDimY, int gridDimX, bool seq2batch, bool isAdd> __global__ void KeSequence2Batch(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * gridDimX; while (id < batchCount) { int seqId = batchIndex[id]; real* batchData = batch + id * seqWidth; real* seqData = sequence + seqId * seqWidth; for (int i = idx; i < seqWidth; i += blockDimX) { if (seq2batch) { if (isAdd) { batchData[i] += seqData[i]; } else { batchData[i] = seqData[i]; } } else { if (isAdd) { seqData[i] += batchData[i]; } else { seqData[i] = batchData[i]; } } } id += blockDimY * gridDimX; } } void hl_sequence2batch_copy(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { KeSequence2Batch<128, 8, 8, 1, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } else { KeSequence2Batch<128, 8, 8, 0, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_copy failed"); } void hl_sequence2batch_add(real* batch, real* sequence, int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { KeSequence2Batch<128, 8, 8, 1, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } else { KeSequence2Batch<128, 8, 8, 0, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_add failed"); } template <bool normByTimes, bool seq2batch> __global__ void KeSequence2BatchPadding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences) { int batchIdx = blockIdx.y; int sequenceStart = sequenceStartPositions[batchIdx]; int sequenceLength = sequenceStartPositions[batchIdx + 1] - sequenceStart; int sequenceIdx = blockIdx.x * blockDim.y + threadIdx.y; int batchBaseIdx = (sequenceIdx * numSequences + batchIdx) * sequenceWidth; int sequenceBaseIdx = (sequenceStart + sequenceIdx) * sequenceWidth; real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f; if (sequenceIdx < sequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = scale * sequence[sequenceBaseIdx + i]; } } else { /* batch -> sequence */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { sequence[sequenceBaseIdx + i] = scale * batch[batchBaseIdx + i]; } } } else if (sequenceIdx < maxSequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = 0; } } } } void hl_sequence2batch_copy_padding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences, bool normByTimes, bool seq2batch) { CHECK_NOTNULL(batch); CHECK_NOTNULL(sequence); CHECK_NOTNULL(sequenceStartPositions); if (!normByTimes && numSequences == 1) { size_t elementCount = maxSequenceLength * sequenceWidth; if (seq2batch) { /* sequence -> batch */ hl_memcpy_device2device(batch, sequence, sizeof(real) * elementCount); } else { /* batch -> sequence */ hl_memcpy_device2device(sequence, batch, sizeof(real) * elementCount); } return; } const int CUDA_BLOCK_SIZE = 512; /* At least use 32 threads to copy sequenceWidth elements, and at least 8 elements for each thread. */ int blockDimX = ((((sequenceWidth + 7) >> 3) + 31) >> 5) << 5; blockDimX = (blockDimX < CUDA_BLOCK_SIZE) ? blockDimX : CUDA_BLOCK_SIZE; int blockDimY = CUDA_BLOCK_SIZE / blockDimX; dim3 threads(blockDimX, blockDimY); int gridDimX = (maxSequenceLength + blockDimY - 1) / blockDimY; int gridDimY = numSequences; dim3 grid(gridDimX, gridDimY); if (seq2batch) { /* sequence -> batch */ if (normByTimes) { KeSequence2BatchPadding<1, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { KeSequence2BatchPadding<0, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } else { /* batch -> sequence */ if (normByTimes) { KeSequence2BatchPadding<1, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { KeSequence2BatchPadding<0, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } CHECK_SYNC("hl_sequence2batch_copy_padding failed"); } __device__ inline float my_rsqrt(float x) { return rsqrtf(x); } __device__ inline double my_rsqrt(double x) { return rsqrt(x); } __global__ void KeSequenceAvgForward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real sum = 0.0; for (int i = start; i < end; i++) { sum += src[i * width + col]; } sum = mode == 1 ? sum : (mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength)); dst[gid] += sum; } } void hl_sequence_avg_forward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_forward!"; KeSequenceAvgForward<<<grid, block, 0, STREAM_DEFAULT>>>( dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_forward failed"); } __global__ void KeSequenceAvgBackward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real grad = src[gid]; grad = mode == 1 ? grad : (mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength)); for (int i = start; i < end; i++) { dst[i * width + col] += grad; } } } void hl_sequence_avg_backward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_backward!"; KeSequenceAvgBackward<<<grid, block, 0, STREAM_DEFAULT>>>( dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_backward failed"); }
748e048632ca684ce99834fdb4de12d488b93590.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include "cuda_helpers.h" #include "../utils/dispatch.h" namespace tvdcn { namespace ops { namespace cuda { namespace { constexpr float threadsFraction = 1.0; template<typename scalar_t, typename index_t> __device__ __forceinline__ scalar_t sample( const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const index_t b, const index_t c, const index_t width, const index_t x) { return (0 <= x && x < width) ? input[b][c][x] : static_cast<scalar_t>(0); } template<typename scalar_t, typename index_t> __device__ __forceinline__ scalar_t interpolate_sample( const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const index_t b, const index_t c, const index_t width, const scalar_t x) { if (x <= -1 || width <= x) return 0; index_t x_l = floor(x); index_t x_h = x_l + 1; scalar_t dx_h = x - x_l; scalar_t dx_l = 1 - dx_h; bool valid_x_l = x_l >= 0; bool valid_x_h = x_h < width; scalar_t val = 0; if (valid_x_l) val += dx_l * input[b][c][x_l]; if (valid_x_h) val += dx_h * input[b][c][x_h]; return val; } template<typename scalar_t, typename index_t> __device__ __forceinline__ void insert( at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output, const index_t b, const index_t c, const index_t width, const index_t x, const scalar_t val) { if (0 <= x && x < width) gpuAtomicAdd(&output[b][c][x], val); } template<typename scalar_t, typename index_t> __device__ __forceinline__ void interpolate_insert( at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output, const index_t b, const index_t c, const index_t width, const scalar_t x, const scalar_t val) { index_t x_l = floor(x); index_t x_h = x_l + 1; scalar_t dx_h = x - x_l; scalar_t dx_l = 1 - dx_h; bool valid_x_l = 0 <= x_l && x_l < width; bool valid_x_h = 0 <= x_h && x_h < width; if (valid_x_l) gpuAtomicAdd(&output[b][c][x_l], dx_l * val); if (valid_x_h) gpuAtomicAdd(&output[b][c][x_h], dx_h * val); } template<typename scalar_t, typename index_t> __device__ __forceinline__ scalar_t coordinate_weight( const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const index_t b, const index_t c, const index_t width, const scalar_t x) { index_t x_l = floor(x); index_t x_h = x_l + 1; scalar_t dx_h = 1; scalar_t dx_l = -1; bool valid_x_l = 0 <= x_l && x_l < width; bool valid_x_h = 0 <= x_h && x_h < width; scalar_t val = 0; if (valid_x_l) val += dx_l * input[b][c][x_l]; if (valid_x_h) val += dx_h * input[b][c][x_h]; return val; } } template<bool deformable, bool modulated, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void arr2col_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> mask, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t in_channels, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t w = index % out_w; const index_t c = (index / out_w) % in_channels; const index_t b = index / (out_w * in_channels); const index_t offset_group_idx = c / c_per_offset_group; const index_t mask_group_idx = c / c_per_mask_group; for (index_t i = 0; i < weight_w; ++i) { const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t val, mask_val; if constexpr (deformable) val = interpolate_sample( input, b, c, width, x + offset[b][offset_group_idx][i][0][w]); else val = sample(input, b, c, width, x); if constexpr (modulated) mask_val = mask[b][mask_group_idx][i][w]; else mask_val = static_cast<scalar_t>(1); columns[c][i][b][w] = val * mask_val; } } } void arr2col( const at::Tensor &input, const at::Tensor &offset, const at::Tensor &mask, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &columns) { at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.get_device()); const int64_t n_kernels = (int64_t) batch_sz * in_channels * out_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "arr2col", ([&] { TVDCN_DISPATCH_INDEX_TYPE2(n_kernels, columns.numel(), ([&] { auto columns_accessor = columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION2(deformable, modulated, ([&] { hipLaunchKernelGGL(( arr2col_kernel<deformable, modulated, scalar_t, index_t>), dim3(blocks), dim3(threads), 0, 0, n_kernels, input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), width, weight_w, stride_w, pad_w, dilation_w, out_w, in_channels, c_per_offset_group, c_per_mask_group, columns_accessor); })); })); })); C10_HIP_KERNEL_LAUNCH_CHECK(); } template<bool deformable, bool modulated, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void col2arr_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> mask, const index_t in_channels, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> grad_input) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t i = index % weight_w; const index_t w = (index / weight_w) % out_w; const index_t c = (index / (weight_w * out_w)) % in_channels; const index_t b = (index / (weight_w * out_w * in_channels)); const index_t offset_group_idx = c / c_per_offset_group; const index_t mask_group_idx = c / c_per_mask_group; const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t mask_val; if constexpr (modulated) mask_val = mask[b][mask_group_idx][i][w]; else mask_val = static_cast<scalar_t>(1); scalar_t val = columns[c][i][b][w] * mask_val; if constexpr (deformable) interpolate_insert( grad_input, b, c, width, x + offset[b][offset_group_idx][i][0][w], val); else insert(grad_input, b, c, width, x, val); } } void col2arr( const at::Tensor &columns, const at::Tensor &offset, const at::Tensor &mask, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &grad_input) { at::hip::HIPGuardMasqueradingAsCUDA device_guard(columns.get_device()); const int64_t n_kernels = (int64_t) batch_sz * in_channels * out_w * weight_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "col2arr", ([&] { TVDCN_DISPATCH_INDEX_TYPE(n_kernels, ([&] { auto grad_input_accessor = grad_input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION2(deformable, modulated, ([&] { hipLaunchKernelGGL(( col2arr_kernel<deformable, modulated, scalar_t, index_t>), dim3(blocks), dim3(threads), 0, 0, n_kernels, columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), in_channels, width, weight_w, stride_w, pad_w, dilation_w, out_w, c_per_offset_group, c_per_mask_group, grad_input_accessor); })); })); })); C10_HIP_KERNEL_LAUNCH_CHECK(); } template<bool modulated, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void deform_conv1d_compute_grad_offset_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns, const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> mask, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t offset_groups, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> grad_offset) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t i = index % weight_w; const index_t w = (index / weight_w) % out_w; const index_t g = (index / (weight_w * out_w)) % offset_groups; const index_t b = index / (weight_w * out_w * offset_groups); scalar_t grad_offset_val = 0; const index_t c_start = g * c_per_offset_group; const index_t c_end = c_start + c_per_offset_group; for (index_t c = c_start; c < c_end; ++c) { const index_t mask_group_idx = c / c_per_mask_group; const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t weight = coordinate_weight( input, b, c, width, x + offset[b][g][i][0][w]); scalar_t mask_val; if constexpr (modulated) mask_val = mask[b][mask_group_idx][i][w]; else mask_val = static_cast<scalar_t>(1); grad_offset_val += columns[c][i][b][w] * weight * mask_val; } grad_offset[b][g][i][0][w] = grad_offset_val; } } void deform_conv1d_compute_grad_offset( const at::Tensor &columns, const at::Tensor &input, const at::Tensor &offset, const at::Tensor &mask, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &grad_offset) { if (!deformable) return; at::hip::HIPGuardMasqueradingAsCUDA device_guard(columns.get_device()); const int64_t n_kernels = (int64_t) batch_sz * offset_groups * out_w * weight_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "deform_conv1d_compute_grad_offset", ([&] { TVDCN_DISPATCH_INDEX_TYPE2(n_kernels, columns.numel(), ([&] { auto grad_offset_accessor = grad_offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION(modulated, ([&] { hipLaunchKernelGGL(( deform_conv1d_compute_grad_offset_kernel<modulated, scalar_t, index_t>), dim3(blocks), dim3(threads), 0, 0, n_kernels, columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), width, weight_w, stride_w, pad_w, dilation_w, out_w, offset_groups, c_per_offset_group, c_per_mask_group, grad_offset_accessor); })); })); })); C10_HIP_KERNEL_LAUNCH_CHECK(); } template<bool deformable, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void deform_conv1d_compute_grad_mask_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns, const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t mask_groups, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> grad_mask) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t i = index % weight_w; const index_t w = (index / weight_w) % out_w; const index_t g = (index / (weight_w * out_w)) % mask_groups; const index_t b = index / (out_w * weight_w * mask_groups); scalar_t grad_mask_val = 0; const index_t c_start = g * c_per_mask_group; const index_t c_end = c_start + c_per_mask_group; for (index_t c = c_start; c < c_end; ++c) { const index_t offset_group_idx = c / c_per_offset_group; const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t val; if constexpr (deformable) val = interpolate_sample( input, b, c, width, x + offset[b][offset_group_idx][i][0][w]); else val = sample(input, b, c, width, x); grad_mask_val += columns[c][i][b][w] * val; } grad_mask[b][g][i][w] = grad_mask_val; } } void deform_conv1d_compute_grad_mask( const at::Tensor &columns, const at::Tensor &input, const at::Tensor &offset, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &grad_mask) { if (!modulated) return; at::hip::HIPGuardMasqueradingAsCUDA device_guard(columns.get_device()); const int64_t n_kernels = (int64_t) batch_sz * mask_groups * out_w * weight_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "deform_conv1d_compute_grad_mask", ([&] { TVDCN_DISPATCH_INDEX_TYPE2(n_kernels, columns.numel(), ([&] { auto grad_mask_accessor = grad_mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION(deformable, ([&] { hipLaunchKernelGGL(( deform_conv1d_compute_grad_mask_kernel<deformable, scalar_t, index_t>), dim3(blocks), dim3(threads), 0, 0, n_kernels, columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), width, weight_w, stride_w, pad_w, dilation_w, out_w, mask_groups, c_per_offset_group, c_per_mask_group, grad_mask_accessor); })); })); })); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } }
748e048632ca684ce99834fdb4de12d488b93590.cu
#include <ATen/ATen.h> #include "cuda_helpers.h" #include "../utils/dispatch.h" namespace tvdcn { namespace ops { namespace cuda { namespace { constexpr float threadsFraction = 1.0; template<typename scalar_t, typename index_t> __device__ __forceinline__ scalar_t sample( const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const index_t b, const index_t c, const index_t width, const index_t x) { return (0 <= x && x < width) ? input[b][c][x] : static_cast<scalar_t>(0); } template<typename scalar_t, typename index_t> __device__ __forceinline__ scalar_t interpolate_sample( const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const index_t b, const index_t c, const index_t width, const scalar_t x) { if (x <= -1 || width <= x) return 0; index_t x_l = floor(x); index_t x_h = x_l + 1; scalar_t dx_h = x - x_l; scalar_t dx_l = 1 - dx_h; bool valid_x_l = x_l >= 0; bool valid_x_h = x_h < width; scalar_t val = 0; if (valid_x_l) val += dx_l * input[b][c][x_l]; if (valid_x_h) val += dx_h * input[b][c][x_h]; return val; } template<typename scalar_t, typename index_t> __device__ __forceinline__ void insert( at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output, const index_t b, const index_t c, const index_t width, const index_t x, const scalar_t val) { if (0 <= x && x < width) gpuAtomicAdd(&output[b][c][x], val); } template<typename scalar_t, typename index_t> __device__ __forceinline__ void interpolate_insert( at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output, const index_t b, const index_t c, const index_t width, const scalar_t x, const scalar_t val) { index_t x_l = floor(x); index_t x_h = x_l + 1; scalar_t dx_h = x - x_l; scalar_t dx_l = 1 - dx_h; bool valid_x_l = 0 <= x_l && x_l < width; bool valid_x_h = 0 <= x_h && x_h < width; if (valid_x_l) gpuAtomicAdd(&output[b][c][x_l], dx_l * val); if (valid_x_h) gpuAtomicAdd(&output[b][c][x_h], dx_h * val); } template<typename scalar_t, typename index_t> __device__ __forceinline__ scalar_t coordinate_weight( const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const index_t b, const index_t c, const index_t width, const scalar_t x) { index_t x_l = floor(x); index_t x_h = x_l + 1; scalar_t dx_h = 1; scalar_t dx_l = -1; bool valid_x_l = 0 <= x_l && x_l < width; bool valid_x_h = 0 <= x_h && x_h < width; scalar_t val = 0; if (valid_x_l) val += dx_l * input[b][c][x_l]; if (valid_x_h) val += dx_h * input[b][c][x_h]; return val; } } template<bool deformable, bool modulated, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void arr2col_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> mask, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t in_channels, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t w = index % out_w; const index_t c = (index / out_w) % in_channels; const index_t b = index / (out_w * in_channels); const index_t offset_group_idx = c / c_per_offset_group; const index_t mask_group_idx = c / c_per_mask_group; for (index_t i = 0; i < weight_w; ++i) { const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t val, mask_val; if constexpr (deformable) val = interpolate_sample( input, b, c, width, x + offset[b][offset_group_idx][i][0][w]); else val = sample(input, b, c, width, x); if constexpr (modulated) mask_val = mask[b][mask_group_idx][i][w]; else mask_val = static_cast<scalar_t>(1); columns[c][i][b][w] = val * mask_val; } } } void arr2col( const at::Tensor &input, const at::Tensor &offset, const at::Tensor &mask, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &columns) { at::cuda::CUDAGuard device_guard(input.get_device()); const int64_t n_kernels = (int64_t) batch_sz * in_channels * out_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "arr2col", ([&] { TVDCN_DISPATCH_INDEX_TYPE2(n_kernels, columns.numel(), ([&] { auto columns_accessor = columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION2(deformable, modulated, ([&] { arr2col_kernel<deformable, modulated, scalar_t, index_t><<<blocks, threads>>>( n_kernels, input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), width, weight_w, stride_w, pad_w, dilation_w, out_w, in_channels, c_per_offset_group, c_per_mask_group, columns_accessor); })); })); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template<bool deformable, bool modulated, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void col2arr_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> mask, const index_t in_channels, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> grad_input) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t i = index % weight_w; const index_t w = (index / weight_w) % out_w; const index_t c = (index / (weight_w * out_w)) % in_channels; const index_t b = (index / (weight_w * out_w * in_channels)); const index_t offset_group_idx = c / c_per_offset_group; const index_t mask_group_idx = c / c_per_mask_group; const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t mask_val; if constexpr (modulated) mask_val = mask[b][mask_group_idx][i][w]; else mask_val = static_cast<scalar_t>(1); scalar_t val = columns[c][i][b][w] * mask_val; if constexpr (deformable) interpolate_insert( grad_input, b, c, width, x + offset[b][offset_group_idx][i][0][w], val); else insert(grad_input, b, c, width, x, val); } } void col2arr( const at::Tensor &columns, const at::Tensor &offset, const at::Tensor &mask, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &grad_input) { at::cuda::CUDAGuard device_guard(columns.get_device()); const int64_t n_kernels = (int64_t) batch_sz * in_channels * out_w * weight_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "col2arr", ([&] { TVDCN_DISPATCH_INDEX_TYPE(n_kernels, ([&] { auto grad_input_accessor = grad_input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION2(deformable, modulated, ([&] { col2arr_kernel<deformable, modulated, scalar_t, index_t><<<blocks, threads>>>( n_kernels, columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), in_channels, width, weight_w, stride_w, pad_w, dilation_w, out_w, c_per_offset_group, c_per_mask_group, grad_input_accessor); })); })); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template<bool modulated, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void deform_conv1d_compute_grad_offset_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns, const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> mask, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t offset_groups, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> grad_offset) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t i = index % weight_w; const index_t w = (index / weight_w) % out_w; const index_t g = (index / (weight_w * out_w)) % offset_groups; const index_t b = index / (weight_w * out_w * offset_groups); scalar_t grad_offset_val = 0; const index_t c_start = g * c_per_offset_group; const index_t c_end = c_start + c_per_offset_group; for (index_t c = c_start; c < c_end; ++c) { const index_t mask_group_idx = c / c_per_mask_group; const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t weight = coordinate_weight( input, b, c, width, x + offset[b][g][i][0][w]); scalar_t mask_val; if constexpr (modulated) mask_val = mask[b][mask_group_idx][i][w]; else mask_val = static_cast<scalar_t>(1); grad_offset_val += columns[c][i][b][w] * weight * mask_val; } grad_offset[b][g][i][0][w] = grad_offset_val; } } void deform_conv1d_compute_grad_offset( const at::Tensor &columns, const at::Tensor &input, const at::Tensor &offset, const at::Tensor &mask, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &grad_offset) { if (!deformable) return; at::cuda::CUDAGuard device_guard(columns.get_device()); const int64_t n_kernels = (int64_t) batch_sz * offset_groups * out_w * weight_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "deform_conv1d_compute_grad_offset", ([&] { TVDCN_DISPATCH_INDEX_TYPE2(n_kernels, columns.numel(), ([&] { auto grad_offset_accessor = grad_offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION(modulated, ([&] { deform_conv1d_compute_grad_offset_kernel<modulated, scalar_t, index_t><<<blocks, threads>>>( n_kernels, columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), width, weight_w, stride_w, pad_w, dilation_w, out_w, offset_groups, c_per_offset_group, c_per_mask_group, grad_offset_accessor); })); })); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template<bool deformable, typename scalar_t, typename index_t> static __launch_bounds__(1024) __global__ void deform_conv1d_compute_grad_mask_kernel( const index_t n_kernels, const at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> columns, const at::GenericPackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, const at::GenericPackedTensorAccessor<scalar_t, 5, at::RestrictPtrTraits, index_t> offset, const index_t width, const index_t weight_w, const index_t stride_w, const index_t pad_w, const index_t dilation_w, const index_t out_w, const index_t mask_groups, const index_t c_per_offset_group, const index_t c_per_mask_group, at::GenericPackedTensorAccessor<scalar_t, 4, at::RestrictPtrTraits, index_t> grad_mask) { CUDA_1D_KERNEL_LOOP_T(index, n_kernels, index_t) { const index_t i = index % weight_w; const index_t w = (index / weight_w) % out_w; const index_t g = (index / (weight_w * out_w)) % mask_groups; const index_t b = index / (out_w * weight_w * mask_groups); scalar_t grad_mask_val = 0; const index_t c_start = g * c_per_mask_group; const index_t c_end = c_start + c_per_mask_group; for (index_t c = c_start; c < c_end; ++c) { const index_t offset_group_idx = c / c_per_offset_group; const index_t x = (w * stride_w - pad_w) + i * dilation_w; scalar_t val; if constexpr (deformable) val = interpolate_sample( input, b, c, width, x + offset[b][offset_group_idx][i][0][w]); else val = sample(input, b, c, width, x); grad_mask_val += columns[c][i][b][w] * val; } grad_mask[b][g][i][w] = grad_mask_val; } } void deform_conv1d_compute_grad_mask( const at::Tensor &columns, const at::Tensor &input, const at::Tensor &offset, const int64_t in_channels, const int64_t width, const int64_t weight_w, const int64_t stride_w, const int64_t pad_w, const int64_t dilation_w, const int64_t out_w, const int64_t batch_sz, const int64_t offset_groups, const int64_t mask_groups, const bool deformable, const bool modulated, at::Tensor &grad_mask) { if (!modulated) return; at::cuda::CUDAGuard device_guard(columns.get_device()); const int64_t n_kernels = (int64_t) batch_sz * mask_groups * out_w * weight_w; const int64_t c_per_offset_group = deformable ? in_channels / offset_groups : 1; const int64_t c_per_mask_group = modulated ? in_channels / mask_groups : 1; const unsigned int threads = GET_THREADS(threadsFraction); const unsigned int blocks = GET_BLOCKS(threads, n_kernels); AT_DISPATCH_FLOATING_TYPES_AND_HALF( columns.scalar_type(), "deform_conv1d_compute_grad_mask", ([&] { TVDCN_DISPATCH_INDEX_TYPE2(n_kernels, columns.numel(), ([&] { auto grad_mask_accessor = grad_mask.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(); TVDCN_DISPATCH_CONDITION(deformable, ([&] { deform_conv1d_compute_grad_mask_kernel<deformable, scalar_t, index_t><<<blocks, threads>>>( n_kernels, columns.generic_packed_accessor<scalar_t, 4, at::RestrictPtrTraits, index_t>(), input.generic_packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(), offset.generic_packed_accessor<scalar_t, 5, at::RestrictPtrTraits, index_t>(), width, weight_w, stride_w, pad_w, dilation_w, out_w, mask_groups, c_per_offset_group, c_per_mask_group, grad_mask_accessor); })); })); })); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } }
8e8cfd7d9cd8caec007de5657a2bd8a3b7a9d2ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #include "ew_op_gpu.h" //#include <stdio.h> template <typename T, typename V, uint THREADS, uint WIDTH> __global__ void __launch_bounds__(THREADS) layer_norm_moments1_CN( V* P1, V* P2, const T* __restrict__ X, uint K, uint N) { // Stripe the reduction lines with tid and block_n uint tid = threadIdx.x; uint block_n = blockIdx.x; uint block_k = blockIdx.y; uint warps = THREADS / 32; uint lines = THREADS / WIDTH; uint line = tid / WIDTH; uint n = block_n*WIDTH + (tid % WIDTH); uint k = block_k * lines + line; uint kn = k*N + n; bool bn = n < N; uint inc_k = gridDim.y * lines; uint inc_kn = inc_k*N; V mean1, mean2; ew_zero(mean1); ew_zero(mean2); #pragma unroll 1 while (k < K) { V x = load(add_ptr_u(X, kn), 0, bn); mean1 = ew_add(mean1, x); mean2 = ew_add(mean2, ew_sqr(x)); kn += inc_kn; k += inc_k; } __shared__ V sMean1[THREADS]; __shared__ V sMean2[THREADS]; sMean1[tid] = mean1; sMean2[tid] = mean2; __syncthreads(); if (tid < 32) { for (uint i = 1; i < warps; i++) mean1 = ew_add(mean1, sMean1[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) mean1 = ew_warp_sum(mean1, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P1, block_k*N + n), mean1); } else if (tid < 64) { tid -= 32; mean2 = ew_add(mean2, sMean2[tid + 0*32]); for (uint i = 2; i < warps; i++) mean2 = ew_add(mean2, sMean2[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) mean2 = ew_warp_sum(mean2, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P2, block_k*N + n), mean2); } } // Reduce partial sums __global__ void __launch_bounds__(256) layer_norm_moments2_CN( float* Mean, float* Rstd, const float* __restrict__ P1, const float* __restrict__ P2, uint nPartials, uint N, float rcpK, float epsilon) { uint tid = threadIdx.x; uint bid = blockIdx.x; // load in 8 units of n wide to allow efficient transpose in L1 cache uint n = bid*8 + tid/32; uint k = tid & 31; uint kn = k*N + n; bool bn = n < N; // force compute outside of loop asm("mov.b32 %0, %0;" : "+r"(kn) : ); float mean1 = 0.0f, mean2 = 0.0f; // We should generally have #SMs * 2 partials. #pragma unroll 1 while (k < nPartials) { #if __CUDA_ARCH__ >= 700 const int UNROLL = 5; // 2*80 partials #else const int UNROLL = 4; // 2*56 partials #endif bool bnk[UNROLL]; bnk[0] = bn; for (int i = 1; i < UNROLL; i++) bnk[i] = bn && (k+32*i < nPartials); for (int i = 0; i < UNROLL; i++) { mean1 += load(add_ptr_u(P1, kn + N*32*i), 0, bnk[i]); mean2 += load(add_ptr_u(P2, kn + N*32*i), 0, bnk[i]); } kn += 32*UNROLL*N; k += 32*UNROLL; } for (uint i = 16; i > 0; i >>= 1) { mean1 += shfl_xor(mean1, i); mean2 += shfl_xor(mean2, i); } if (bn & (tid & 31) == 0) { // var = mean(x**2) - mean(x)**2 // rstd = 1/sqrt(var) mean1 *= rcpK; mean2 *= rcpK; float rstd = rsqrtf(precise_sub(mean2, ew_sqr(mean1)) + epsilon); store(add_ptr_u(Mean, n), mean1); store(add_ptr_u(Rstd, n), rstd); } } // xstdr = rcp(sqrt(xvar + epsilon)) // xhat = xmean * xstdr // y = xhat*g + b template <typename T, int UNROLL> __global__ void __launch_bounds__(32) layer_norm_CN( T* Y, const T* __restrict__ X, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, const float* __restrict__ G, const float* __restrict__ B, int K, int N, int relu) { __shared__ float Gain[UNROLL*2]; __shared__ float Bias[UNROLL*2]; int tid = threadIdx.x; int idx_K = blockIdx.x * UNROLL*2; int idx_N = blockIdx.y * 16; // load gain/bias for this K-block int ki = idx_K + tid; if (tid < UNROLL*2 && ki < K) { Gain[tid] = G[ki]; Bias[tid] = B[ki]; } int tid16 = tid >> 4; int tid15 = tid & 15; int k = idx_K + tid16; int n = idx_N + tid15; bool bn = n < N; int xi = k*N + n; int inc = N * 2; float4 rstd = load(Rstd, n, bn); float4 mean = load(Mean, n, bn); #pragma unroll for (int j = 0; j < UNROLL; j++) { bool bnk = bn && k < K; float4 x = load(X, xi, bnk); float g = Gain[tid16]; float b = Bias[tid16]; // xhat = (x - mean) / sqrt(var + epsilon) // y = g * xhat + b float4 xhat = ew_mul(ew_sub(x, mean), rstd); float4 y = ew_add(ew_mul(xhat, g), b); if (relu) y = ew_relu(y); store(Y, y, xi, bnk); k += 2; tid16 += 2; xi += inc; } } template <typename T, typename V> bool LayerNormForward_CN(hipStream_t stream, int SMs, T* y, float* mean, float* rstd, float* p1, float* p2, const T* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu) { const V* X = (const V*)x; const float4* Mean = (const float4*)mean; const float4* Rstd = (const float4*)rstd; float4* P1 = ( float4*)p1; float4* P2 = ( float4*)p2; uint gridN64 = (N >> 6) + ((N & 63) != 0); uint gridN8 = (N >> 3) + ((N & 7) != 0); uint gridK8 = (K >> 3) + ((K & 7) != 0); uint nPartials = gridN64 > 1 ? SMs : SMs*2; if (K <= 8*nPartials) hipLaunchKernelGGL(( layer_norm_moments1_CN<V,float4,128,16>), dim3(dim3(gridN64, nPartials)),dim3(128),0,stream, P1, P2, X, K, N>>2); else hipLaunchKernelGGL(( layer_norm_moments1_CN<V,float4,256,16>), dim3(dim3(gridN64, nPartials)),dim3(256),0,stream, P1, P2, X, K, N>>2); hipLaunchKernelGGL(( layer_norm_moments2_CN), dim3(gridN8),dim3(256),0,stream, mean, rstd, p1, p2, nPartials, N, rcpK, epsilon); hipLaunchKernelGGL(( layer_norm_CN<V,4>), dim3(dim3(gridK8, gridN64)),dim3(32), 0,stream, (V*)y, X, Mean, Rstd, g, b, K, N>>2, relu); return true; // TODO } template bool LayerNormForward_CN<float,float4>(hipStream_t stream, int SMs, float* y, float* mean, float* rstd, float* p1, float* p2, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormForward_CN<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* y, float* mean, float* rstd, float* p1, float* p2, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormForward_CN<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* y, float* mean, float* rstd, float* p1, float* p2, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu); // dg = sum(dy * xhat(x), axis=1) // db = sum(dy, axis=1) template <typename T> __global__ void __launch_bounds__(128) layer_norm_dg_db_CN( float* DG, float* DB, const T* __restrict__ DY, const T* __restrict__ X, const float* __restrict__ Gain, const float* __restrict__ Bias, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, int K, int N, int relu) { __shared__ float gain[8]; __shared__ float bias[8]; int tid = threadIdx.x; int idx_K = blockIdx.x * 8; // load gain/bias for this K-block int ki = idx_K + tid; if (relu && tid < 8 && ki < K) { gain[tid] = Gain[ki]; bias[tid] = Bias[ki]; } int tid16 = tid >> 4; int tid15 = tid & 15; int k = idx_K + tid16; __syncthreads(); float dg = 0.0f, db = 0.0f; if (k < K) { int N4 = N >> 2; int xi = k*N4; X += xi; DY += xi; float4 dg4, db4; ew_zero(dg4); ew_zero(db4); for (int n = tid15; n < N4; n += 16) { float4 x = load(X, n); float4 dy = load(DY, n); float4 rstd = load(Rstd, n); float4 mean = load(Mean, n); // xhat = (x - mean) * rstd float4 xhat = ew_mul(ew_sub(x, mean), rstd); if (relu) { float g = gain[tid16]; float b = bias[tid16]; dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b)); } dg4 = ew_add(ew_mul(dy, xhat), dg4); db4 = ew_add(dy, db4); } dg = ew_sum(dg4); db = ew_sum(db4); } // reduce each half warp for (int i = 8; i > 0; i >>= 1) { dg += shfl_xor(dg, i); db += shfl_xor(db, i); } if (k < K && tid15 == 0) { DG[k] = dg; DB[k] = db; } } // dy = dy * g // sum1 = sum(xhat * dy, axis=0) // sum2 = sum(dy, axis=0) template <typename T, uint THREADS, uint WIDTH> __global__ void __launch_bounds__(THREADS) layer_norm_dx_sum1_CN( float4* P1, float4* P2, const T* __restrict__ DY, const T* __restrict__ X, const float* __restrict__ Gain, const float* __restrict__ Bias, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, int K, int N, int relu) { // Stripe the reduction lines with tid and block_n uint tid = threadIdx.x; uint block_n = blockIdx.x; uint block_k = blockIdx.y; uint warps = THREADS / 32; uint lines = THREADS / WIDTH; uint line = tid / WIDTH; uint n = block_n*WIDTH + (tid % WIDTH); uint k = block_k * lines + line; uint kn = k*N + n; bool bn = n < N; uint inc_k = gridDim.y * lines; uint inc_kn = inc_k*N; float4 rstd = load(Rstd, n, bn); float4 mean = load(Mean, n, bn); float4 sum1, sum2; ew_zero(sum1); ew_zero(sum2); #pragma unroll 1 while (k < K) { float4 dy = load(add_ptr_u(DY, kn), 0, bn); float4 x = load(add_ptr_u(X, kn), 0, bn); float gain = load(add_ptr_u(Gain, k), 0, bn); float bias = load(add_ptr_u(Bias, k), 0, bn && relu != 0); float4 xhat = ew_mul(ew_sub(x, mean), rstd); if (relu != 0) dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, gain), bias)); dy = ew_mul(dy, gain); sum1 = ew_add(sum1, ew_mul(dy, xhat)); sum2 = ew_add(sum2, dy); kn += inc_kn; k += inc_k; } __shared__ float4 sSum1[THREADS]; __shared__ float4 sSum2[THREADS]; sSum1[tid] = sum1; sSum2[tid] = sum2; __syncthreads(); if (tid < 32) { for (uint i = 1; i < warps; i++) sum1 = ew_add(sum1, sSum1[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) sum1 = ew_warp_sum(sum1, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P1, block_k*N + n), sum1); } else if (tid < 64) { tid -= 32; sum2 = ew_add(sum2, sSum2[tid + 0*32]); for (uint i = 2; i < warps; i++) sum2 = ew_add(sum2, sSum2[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) sum2 = ew_warp_sum(sum2, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P2, block_k*N + n), sum2); } } // Reduce partial sums __global__ void __launch_bounds__(256) layer_norm_dx_sum2_CN(float* Sum1, float* Sum2, uint nPartials, uint N) { uint tid = threadIdx.x; uint bid = blockIdx.x; // load in 8 units of n wide to allow efficient transpose in L1 cache uint n = bid*8 + tid/32; uint k = tid & 31; float* Sum = Sum1; if (n >= N) { n -= N; Sum = Sum2; } uint kn = k*N + n; bool bn = n < N; // force compute outside of loop asm("mov.b32 %0, %0;" : "+r"(kn) : ); float sum = 0.0f; // We should generally have #SMs * 2 partials. #pragma unroll 1 while (k < nPartials) { #if __CUDA_ARCH__ >= 700 const int UNROLL = 5; // 2*80 partials #else const int UNROLL = 4; // 2*56 partials #endif bool bnk[UNROLL]; bnk[0] = bn; for (int i = 1; i < UNROLL; i++) bnk[i] = bn && (k+32*i < nPartials); for (int i = 0; i < UNROLL; i++) sum += load(add_ptr_u((const float*)Sum, kn + N*32*i), 0, bnk[i]); kn += 32*UNROLL*N; k += 32*UNROLL; } for (uint i = 16; i > 0; i >>= 1) { sum += shfl_xor(sum, i); } if (bn & (tid & 31) == 0) store(add_ptr_u(Sum, n), sum); } // dy = dy * g // dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr template <typename T, int UNROLL> __global__ void __launch_bounds__(32) layer_norm_dx_CN( T* DX, const T* __restrict__ DY, const T* __restrict__ X, const float* __restrict__ Gain, const float* __restrict__ Bias, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, const float4* __restrict__ Sum1, const float4* __restrict__ Sum2, int K, int N, float rcpK, int relu) { __shared__ float gain[UNROLL*2]; __shared__ float bias[UNROLL*2]; int tid = threadIdx.x; int idx_K = blockIdx.x * UNROLL*2; int idx_N = blockIdx.y * 16; // load gain/bias for this K-block int ki = idx_K + tid; if (tid < UNROLL*2 && ki < K) { gain[tid] = Gain[ki]; bias[tid] = Bias[ki]; } int tid16 = tid >> 4; int tid15 = tid & 15; int k = idx_K + tid16; int n = idx_N + tid15; int N4 = N >> 2; bool bn = n < N4; int xi = k*N4 + n; int inc = N4 * 2; float4 rstd = load(Rstd, n, bn); float4 mean = load(Mean, n, bn); float4 sum1 = load(Sum1, n, bn); float4 sum2 = load(Sum2, n, bn); #pragma unroll 4 for (int j = 0; j < UNROLL; j++) { bool bnk = bn && k < K; float4 x = load( X, xi, bnk); float4 dy = load(DY, xi, bnk); float g = gain[tid16]; float b = bias[tid16]; float4 xhat = ew_mul(ew_sub(x, mean), rstd); if (relu) dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b)); dy = ew_mul(dy, g); // dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd; float4 dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd); store(DX, dx, xi, bnk); k += 2; tid16 += 2; xi += inc; } } template <typename T, typename V> bool LayerNormBackward_CN(hipStream_t stream, int SMs, T* dx, float* dg, float* db, float* sum1, float* sum2, const T* dy, const T* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu) { uint gridN64 = (N >> 6) + ((N & 63) != 0); uint gridN8 = (N >> 3) + ((N & 7) != 0); uint gridK8 = (K >> 3) + ((K & 7) != 0); uint nPartials = gridN64 > 1 ? SMs : SMs*2; V* DX = ( V*)dx; const V* DY = (const V*)dy; const V* X = (const V*)x; const float4* Mean = (const float4*)mean; const float4* Rstd = (const float4*)rstd; const float4* Sum1 = (const float4*)sum1; const float4* Sum2 = (const float4*)sum2; hipLaunchKernelGGL(( layer_norm_dg_db_CN<V>), dim3(gridK8),dim3(128),0,stream, dg, db, DY, X, g, b, Mean, Rstd, K, N, relu); if (K <= 8*nPartials) hipLaunchKernelGGL(( layer_norm_dx_sum1_CN<V,128,16>), dim3(dim3(gridN64, nPartials)),dim3(128),0,stream, (float4*)sum1, (float4*)sum2, DY, X, g, b, Mean, Rstd, K, N>>2, relu); else hipLaunchKernelGGL(( layer_norm_dx_sum1_CN<V,256,16>), dim3(dim3(gridN64, nPartials)),dim3(256),0,stream, (float4*)sum1, (float4*)sum2, DY, X, g, b, Mean, Rstd, K, N>>2, relu); hipLaunchKernelGGL(( layer_norm_dx_sum2_CN), dim3(gridN8*2),dim3(256),0,stream, sum1, sum2, nPartials, N); hipLaunchKernelGGL(( layer_norm_dx_CN<V,4>), dim3(dim3(gridK8, gridN64)),dim3(32),0,stream, DX, DY, X, g, b, Mean, Rstd, Sum1, Sum2, K, N, rcpK, relu); return true; // TODO } template bool LayerNormBackward_CN<float,float4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormBackward_CN<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* dx, float* dg, float* db, float* sum1, float* sum2, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormBackward_CN<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* dx, float* dg, float* db, float* sum1, float* sum2, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu); // Sparse Projection Code template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) gather_scatter( T* Z, const T* __restrict__ X, const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int zk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (zk < K && n < N) { int xk = load(Lut, zk); int zi = zk*N + n; int xi = xk*N + n; V x = load(X, xi, xk >= 0); store(Z, x, zi); } } template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) scatter_add( T* Z, // large tensor const T* __restrict__ X, // large tensor const T* __restrict__ Y, // small tensor const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int yk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (yk < K && n < N) { int xk = load(Lut, yk); int yi = yk*N + n; int xi = xk*N + n; V y = load(Y, yi); V x = load(X, xi); store(Z, ew_add(x, y), xi); } } template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) scatter_mul( T* Z, // large tensor const T* __restrict__ X, // large tensor const T* __restrict__ Y, // small tensor const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int xk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (xk < K && n < N) { int yk = load(Lut, xk); int xi = xk*N + n; int yi = yk*N + n; V x = load(X, xi); V y = load(Y, yi, yk >= 0); V z = yk >= 0 ? ew_mul(x, y) : x; // pass through if unmapped store(Z, z, xi); } } template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) sparse_mul_grad( T* DX, // large tensor T* DY, // small tensor const T* __restrict__ DZ, // large tensor (same pointer as DX) const T* __restrict__ X, // large tensor const T* __restrict__ Y, // small tensor const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int yk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (yk < K && n < N) { int xk = load(Lut, yk); int yi = yk*N + n; int xi = xk*N + n; V y = load(Y, yi); V x = load(X, xi); V dz = load(DZ, xi); store(DX, ew_mul(dz, y), xi); store(DY, ew_mul(dz, x), yi); } } #define OP_GAT 0 #define OP_SCT 1 #define OP_ADD 2 #define OP_MUL 3 template <typename T, typename V4, typename V8> bool SparseOp(hipStream_t stream, T* z, const T* x, const T* y, const int* lut, int op, int K, int N) { int gridN = (N >> 6) + ((N & 63) != 0); if (sizeof(T) == 2 && (N & 7) == 0) { V8* Z = ( V8*)z; const V8* X = (const V8*)x; const V8* Y = (const V8*)y; // blockK = 128 / 8 = 16 int gridK = (K >> 4) + ((K & 15) != 0); dim3 grid(gridK, gridN, 1); switch(op) { case OP_GAT:hipLaunchKernelGGL(( gather_scatter<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>3); break; case OP_SCT:hipLaunchKernelGGL(( gather_scatter<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>3); break; case OP_ADD: hipLaunchKernelGGL(( scatter_add<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>3); break; case OP_MUL: hipLaunchKernelGGL(( scatter_mul<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>3); break; } } else if ((N & 3) == 0) { V4* Z = ( V4*)z; const V4* X = (const V4*)x; const V4* Y = (const V4*)y; // blockK = 128 / 16 = 8 int gridK = (K >> 3) + ((K & 7) != 0); dim3 grid(gridK, gridN, 1); switch(op) { case OP_GAT:hipLaunchKernelGGL(( gather_scatter<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>2); break; case OP_SCT:hipLaunchKernelGGL(( gather_scatter<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>2); break; case OP_ADD: hipLaunchKernelGGL(( scatter_add<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>2); break; case OP_MUL: hipLaunchKernelGGL(( scatter_mul<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>2); break; } } return true; // TODO } template <typename T, typename V4, typename V8> bool SparseMulGrad(hipStream_t stream, T* dx, T* dy, const T* dz, const T* x, const T* y, const int* lut, int K, int N) { int gridN = (N >> 6) + ((N & 63) != 0); if (sizeof(T) == 2 && (N & 7) == 0) { V8* DX = ( V8*)dx; V8* DY = ( V8*)dy; const V8* DZ = (const V8*)dz; const V8* X = (const V8*)x; const V8* Y = (const V8*)y; // blockK = 128 / 8 = 16 int gridK = (K >> 4) + ((K & 15) != 0); dim3 grid(gridK, gridN, 1); hipLaunchKernelGGL(( sparse_mul_grad<V8,float8,3>), dim3(grid),dim3(128),0,stream, DX, DY, DZ, X, Y, lut, K, N>>3); } else if ((N & 3) == 0) { V4* DX = ( V4*)dx; V4* DY = ( V4*)dy; const V4* DZ = (const V4*)dz; const V4* X = (const V4*)x; const V4* Y = (const V4*)y; // blockK = 128 / 16 = 8 int gridK = (K >> 3) + ((K & 7) != 0); dim3 grid(gridK, gridN, 1); hipLaunchKernelGGL(( sparse_mul_grad<V4,float4,4>), dim3(grid),dim3(128),0,stream, DX, DY, DZ, X, Y, lut, K, N>>2); } return true; // TODO } template bool SparseOp<float,float4,float8>(hipStream_t stream, float* z, const float* x, const float* y, const int* lut, int op, int K, int N); template bool SparseOp<ehalf,ehalf4,ehalf8>(hipStream_t stream, ehalf* z, const ehalf* x, const ehalf* y, const int* lut, int op, int K, int N); template bool SparseOp<bhalf,bhalf4,bhalf8>(hipStream_t stream, bhalf* z, const bhalf* x, const bhalf* y, const int* lut, int op, int K, int N); template bool SparseMulGrad<float,float4,float8>(hipStream_t stream, float* dx, float* dy, const float* dz, const float* x, const float* y, const int* lut, int K, int N); template bool SparseMulGrad<ehalf,ehalf4,ehalf8>(hipStream_t stream, ehalf* dx, ehalf* dy, const ehalf* dz, const ehalf* x, const ehalf* y, const int* lut, int K, int N); template bool SparseMulGrad<bhalf,bhalf4,bhalf8>(hipStream_t stream, bhalf* dx, bhalf* dy, const bhalf* dz, const bhalf* x, const bhalf* y, const int* lut, int K, int N); #endif // GOOGLE_CUDA // cuobjdump -xelf blocksparse_ops.5.sm_60.cubin blocksparse_ops.so // cuobjdump -xelf blocksparse_ops.6.sm_61.cubin blocksparse_ops.so // nvdisasm -c -raw blocksparse_ops.5.sm_60.cubin > blocksparse_ops.5.sm_60.sass // nvdisasm -c -raw blocksparse_ops.6.sm_61.cubin > blocksparse_ops.6.sm_61.sass
8e8cfd7d9cd8caec007de5657a2bd8a3b7a9d2ff.cu
#if GOOGLE_CUDA #include "ew_op_gpu.h" //#include <stdio.h> template <typename T, typename V, uint THREADS, uint WIDTH> __global__ void __launch_bounds__(THREADS) layer_norm_moments1_CN( V* P1, V* P2, const T* __restrict__ X, uint K, uint N) { // Stripe the reduction lines with tid and block_n uint tid = threadIdx.x; uint block_n = blockIdx.x; uint block_k = blockIdx.y; uint warps = THREADS / 32; uint lines = THREADS / WIDTH; uint line = tid / WIDTH; uint n = block_n*WIDTH + (tid % WIDTH); uint k = block_k * lines + line; uint kn = k*N + n; bool bn = n < N; uint inc_k = gridDim.y * lines; uint inc_kn = inc_k*N; V mean1, mean2; ew_zero(mean1); ew_zero(mean2); #pragma unroll 1 while (k < K) { V x = load(add_ptr_u(X, kn), 0, bn); mean1 = ew_add(mean1, x); mean2 = ew_add(mean2, ew_sqr(x)); kn += inc_kn; k += inc_k; } __shared__ V sMean1[THREADS]; __shared__ V sMean2[THREADS]; sMean1[tid] = mean1; sMean2[tid] = mean2; __syncthreads(); if (tid < 32) { for (uint i = 1; i < warps; i++) mean1 = ew_add(mean1, sMean1[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) mean1 = ew_warp_sum(mean1, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P1, block_k*N + n), mean1); } else if (tid < 64) { tid -= 32; mean2 = ew_add(mean2, sMean2[tid + 0*32]); for (uint i = 2; i < warps; i++) mean2 = ew_add(mean2, sMean2[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) mean2 = ew_warp_sum(mean2, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P2, block_k*N + n), mean2); } } // Reduce partial sums __global__ void __launch_bounds__(256) layer_norm_moments2_CN( float* Mean, float* Rstd, const float* __restrict__ P1, const float* __restrict__ P2, uint nPartials, uint N, float rcpK, float epsilon) { uint tid = threadIdx.x; uint bid = blockIdx.x; // load in 8 units of n wide to allow efficient transpose in L1 cache uint n = bid*8 + tid/32; uint k = tid & 31; uint kn = k*N + n; bool bn = n < N; // force compute outside of loop asm("mov.b32 %0, %0;" : "+r"(kn) : ); float mean1 = 0.0f, mean2 = 0.0f; // We should generally have #SMs * 2 partials. #pragma unroll 1 while (k < nPartials) { #if __CUDA_ARCH__ >= 700 const int UNROLL = 5; // 2*80 partials #else const int UNROLL = 4; // 2*56 partials #endif bool bnk[UNROLL]; bnk[0] = bn; for (int i = 1; i < UNROLL; i++) bnk[i] = bn && (k+32*i < nPartials); for (int i = 0; i < UNROLL; i++) { mean1 += load(add_ptr_u(P1, kn + N*32*i), 0, bnk[i]); mean2 += load(add_ptr_u(P2, kn + N*32*i), 0, bnk[i]); } kn += 32*UNROLL*N; k += 32*UNROLL; } for (uint i = 16; i > 0; i >>= 1) { mean1 += shfl_xor(mean1, i); mean2 += shfl_xor(mean2, i); } if (bn & (tid & 31) == 0) { // var = mean(x**2) - mean(x)**2 // rstd = 1/sqrt(var) mean1 *= rcpK; mean2 *= rcpK; float rstd = rsqrtf(precise_sub(mean2, ew_sqr(mean1)) + epsilon); store(add_ptr_u(Mean, n), mean1); store(add_ptr_u(Rstd, n), rstd); } } // xstdr = rcp(sqrt(xvar + epsilon)) // xhat = xmean * xstdr // y = xhat*g + b template <typename T, int UNROLL> __global__ void __launch_bounds__(32) layer_norm_CN( T* Y, const T* __restrict__ X, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, const float* __restrict__ G, const float* __restrict__ B, int K, int N, int relu) { __shared__ float Gain[UNROLL*2]; __shared__ float Bias[UNROLL*2]; int tid = threadIdx.x; int idx_K = blockIdx.x * UNROLL*2; int idx_N = blockIdx.y * 16; // load gain/bias for this K-block int ki = idx_K + tid; if (tid < UNROLL*2 && ki < K) { Gain[tid] = G[ki]; Bias[tid] = B[ki]; } int tid16 = tid >> 4; int tid15 = tid & 15; int k = idx_K + tid16; int n = idx_N + tid15; bool bn = n < N; int xi = k*N + n; int inc = N * 2; float4 rstd = load(Rstd, n, bn); float4 mean = load(Mean, n, bn); #pragma unroll for (int j = 0; j < UNROLL; j++) { bool bnk = bn && k < K; float4 x = load(X, xi, bnk); float g = Gain[tid16]; float b = Bias[tid16]; // xhat = (x - mean) / sqrt(var + epsilon) // y = g * xhat + b float4 xhat = ew_mul(ew_sub(x, mean), rstd); float4 y = ew_add(ew_mul(xhat, g), b); if (relu) y = ew_relu(y); store(Y, y, xi, bnk); k += 2; tid16 += 2; xi += inc; } } template <typename T, typename V> bool LayerNormForward_CN(CUstream stream, int SMs, T* y, float* mean, float* rstd, float* p1, float* p2, const T* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu) { const V* X = (const V*)x; const float4* Mean = (const float4*)mean; const float4* Rstd = (const float4*)rstd; float4* P1 = ( float4*)p1; float4* P2 = ( float4*)p2; uint gridN64 = (N >> 6) + ((N & 63) != 0); uint gridN8 = (N >> 3) + ((N & 7) != 0); uint gridK8 = (K >> 3) + ((K & 7) != 0); uint nPartials = gridN64 > 1 ? SMs : SMs*2; if (K <= 8*nPartials) layer_norm_moments1_CN<V,float4,128,16><<<dim3(gridN64, nPartials),128,0,stream>>>(P1, P2, X, K, N>>2); else layer_norm_moments1_CN<V,float4,256,16><<<dim3(gridN64, nPartials),256,0,stream>>>(P1, P2, X, K, N>>2); layer_norm_moments2_CN<<<gridN8,256,0,stream>>>(mean, rstd, p1, p2, nPartials, N, rcpK, epsilon); layer_norm_CN<V,4><<<dim3(gridK8, gridN64),32, 0,stream>>>((V*)y, X, Mean, Rstd, g, b, K, N>>2, relu); return true; // TODO } template bool LayerNormForward_CN<float,float4>(CUstream stream, int SMs, float* y, float* mean, float* rstd, float* p1, float* p2, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormForward_CN<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* y, float* mean, float* rstd, float* p1, float* p2, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormForward_CN<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* y, float* mean, float* rstd, float* p1, float* p2, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu); // dg = sum(dy * xhat(x), axis=1) // db = sum(dy, axis=1) template <typename T> __global__ void __launch_bounds__(128) layer_norm_dg_db_CN( float* DG, float* DB, const T* __restrict__ DY, const T* __restrict__ X, const float* __restrict__ Gain, const float* __restrict__ Bias, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, int K, int N, int relu) { __shared__ float gain[8]; __shared__ float bias[8]; int tid = threadIdx.x; int idx_K = blockIdx.x * 8; // load gain/bias for this K-block int ki = idx_K + tid; if (relu && tid < 8 && ki < K) { gain[tid] = Gain[ki]; bias[tid] = Bias[ki]; } int tid16 = tid >> 4; int tid15 = tid & 15; int k = idx_K + tid16; __syncthreads(); float dg = 0.0f, db = 0.0f; if (k < K) { int N4 = N >> 2; int xi = k*N4; X += xi; DY += xi; float4 dg4, db4; ew_zero(dg4); ew_zero(db4); for (int n = tid15; n < N4; n += 16) { float4 x = load(X, n); float4 dy = load(DY, n); float4 rstd = load(Rstd, n); float4 mean = load(Mean, n); // xhat = (x - mean) * rstd float4 xhat = ew_mul(ew_sub(x, mean), rstd); if (relu) { float g = gain[tid16]; float b = bias[tid16]; dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b)); } dg4 = ew_add(ew_mul(dy, xhat), dg4); db4 = ew_add(dy, db4); } dg = ew_sum(dg4); db = ew_sum(db4); } // reduce each half warp for (int i = 8; i > 0; i >>= 1) { dg += shfl_xor(dg, i); db += shfl_xor(db, i); } if (k < K && tid15 == 0) { DG[k] = dg; DB[k] = db; } } // dy = dy * g // sum1 = sum(xhat * dy, axis=0) // sum2 = sum(dy, axis=0) template <typename T, uint THREADS, uint WIDTH> __global__ void __launch_bounds__(THREADS) layer_norm_dx_sum1_CN( float4* P1, float4* P2, const T* __restrict__ DY, const T* __restrict__ X, const float* __restrict__ Gain, const float* __restrict__ Bias, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, int K, int N, int relu) { // Stripe the reduction lines with tid and block_n uint tid = threadIdx.x; uint block_n = blockIdx.x; uint block_k = blockIdx.y; uint warps = THREADS / 32; uint lines = THREADS / WIDTH; uint line = tid / WIDTH; uint n = block_n*WIDTH + (tid % WIDTH); uint k = block_k * lines + line; uint kn = k*N + n; bool bn = n < N; uint inc_k = gridDim.y * lines; uint inc_kn = inc_k*N; float4 rstd = load(Rstd, n, bn); float4 mean = load(Mean, n, bn); float4 sum1, sum2; ew_zero(sum1); ew_zero(sum2); #pragma unroll 1 while (k < K) { float4 dy = load(add_ptr_u(DY, kn), 0, bn); float4 x = load(add_ptr_u(X, kn), 0, bn); float gain = load(add_ptr_u(Gain, k), 0, bn); float bias = load(add_ptr_u(Bias, k), 0, bn && relu != 0); float4 xhat = ew_mul(ew_sub(x, mean), rstd); if (relu != 0) dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, gain), bias)); dy = ew_mul(dy, gain); sum1 = ew_add(sum1, ew_mul(dy, xhat)); sum2 = ew_add(sum2, dy); kn += inc_kn; k += inc_k; } __shared__ float4 sSum1[THREADS]; __shared__ float4 sSum2[THREADS]; sSum1[tid] = sum1; sSum2[tid] = sum2; __syncthreads(); if (tid < 32) { for (uint i = 1; i < warps; i++) sum1 = ew_add(sum1, sSum1[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) sum1 = ew_warp_sum(sum1, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P1, block_k*N + n), sum1); } else if (tid < 64) { tid -= 32; sum2 = ew_add(sum2, sSum2[tid + 0*32]); for (uint i = 2; i < warps; i++) sum2 = ew_add(sum2, sSum2[tid + i*32]); // if the line width is less than a warp, reduce the lines within a warp for (int i = 16; i >= WIDTH; i >>= 1) sum2 = ew_warp_sum(sum2, i); // output a partial sums if (tid < WIDTH && bn) store(add_ptr_u(P2, block_k*N + n), sum2); } } // Reduce partial sums __global__ void __launch_bounds__(256) layer_norm_dx_sum2_CN(float* Sum1, float* Sum2, uint nPartials, uint N) { uint tid = threadIdx.x; uint bid = blockIdx.x; // load in 8 units of n wide to allow efficient transpose in L1 cache uint n = bid*8 + tid/32; uint k = tid & 31; float* Sum = Sum1; if (n >= N) { n -= N; Sum = Sum2; } uint kn = k*N + n; bool bn = n < N; // force compute outside of loop asm("mov.b32 %0, %0;" : "+r"(kn) : ); float sum = 0.0f; // We should generally have #SMs * 2 partials. #pragma unroll 1 while (k < nPartials) { #if __CUDA_ARCH__ >= 700 const int UNROLL = 5; // 2*80 partials #else const int UNROLL = 4; // 2*56 partials #endif bool bnk[UNROLL]; bnk[0] = bn; for (int i = 1; i < UNROLL; i++) bnk[i] = bn && (k+32*i < nPartials); for (int i = 0; i < UNROLL; i++) sum += load(add_ptr_u((const float*)Sum, kn + N*32*i), 0, bnk[i]); kn += 32*UNROLL*N; k += 32*UNROLL; } for (uint i = 16; i > 0; i >>= 1) { sum += shfl_xor(sum, i); } if (bn & (tid & 31) == 0) store(add_ptr_u(Sum, n), sum); } // dy = dy * g // dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr template <typename T, int UNROLL> __global__ void __launch_bounds__(32) layer_norm_dx_CN( T* DX, const T* __restrict__ DY, const T* __restrict__ X, const float* __restrict__ Gain, const float* __restrict__ Bias, const float4* __restrict__ Mean, const float4* __restrict__ Rstd, const float4* __restrict__ Sum1, const float4* __restrict__ Sum2, int K, int N, float rcpK, int relu) { __shared__ float gain[UNROLL*2]; __shared__ float bias[UNROLL*2]; int tid = threadIdx.x; int idx_K = blockIdx.x * UNROLL*2; int idx_N = blockIdx.y * 16; // load gain/bias for this K-block int ki = idx_K + tid; if (tid < UNROLL*2 && ki < K) { gain[tid] = Gain[ki]; bias[tid] = Bias[ki]; } int tid16 = tid >> 4; int tid15 = tid & 15; int k = idx_K + tid16; int n = idx_N + tid15; int N4 = N >> 2; bool bn = n < N4; int xi = k*N4 + n; int inc = N4 * 2; float4 rstd = load(Rstd, n, bn); float4 mean = load(Mean, n, bn); float4 sum1 = load(Sum1, n, bn); float4 sum2 = load(Sum2, n, bn); #pragma unroll 4 for (int j = 0; j < UNROLL; j++) { bool bnk = bn && k < K; float4 x = load( X, xi, bnk); float4 dy = load(DY, xi, bnk); float g = gain[tid16]; float b = bias[tid16]; float4 xhat = ew_mul(ew_sub(x, mean), rstd); if (relu) dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b)); dy = ew_mul(dy, g); // dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd; float4 dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd); store(DX, dx, xi, bnk); k += 2; tid16 += 2; xi += inc; } } template <typename T, typename V> bool LayerNormBackward_CN(CUstream stream, int SMs, T* dx, float* dg, float* db, float* sum1, float* sum2, const T* dy, const T* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu) { uint gridN64 = (N >> 6) + ((N & 63) != 0); uint gridN8 = (N >> 3) + ((N & 7) != 0); uint gridK8 = (K >> 3) + ((K & 7) != 0); uint nPartials = gridN64 > 1 ? SMs : SMs*2; V* DX = ( V*)dx; const V* DY = (const V*)dy; const V* X = (const V*)x; const float4* Mean = (const float4*)mean; const float4* Rstd = (const float4*)rstd; const float4* Sum1 = (const float4*)sum1; const float4* Sum2 = (const float4*)sum2; layer_norm_dg_db_CN<V><<<gridK8,128,0,stream>>>(dg, db, DY, X, g, b, Mean, Rstd, K, N, relu); if (K <= 8*nPartials) layer_norm_dx_sum1_CN<V,128,16><<<dim3(gridN64, nPartials),128,0,stream>>>((float4*)sum1, (float4*)sum2, DY, X, g, b, Mean, Rstd, K, N>>2, relu); else layer_norm_dx_sum1_CN<V,256,16><<<dim3(gridN64, nPartials),256,0,stream>>>((float4*)sum1, (float4*)sum2, DY, X, g, b, Mean, Rstd, K, N>>2, relu); layer_norm_dx_sum2_CN<<<gridN8*2,256,0,stream>>>(sum1, sum2, nPartials, N); layer_norm_dx_CN<V,4><<<dim3(gridK8, gridN64),32,0,stream>>>(DX, DY, X, g, b, Mean, Rstd, Sum1, Sum2, K, N, rcpK, relu); return true; // TODO } template bool LayerNormBackward_CN<float,float4>(CUstream stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormBackward_CN<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* dx, float* dg, float* db, float* sum1, float* sum2, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu); template bool LayerNormBackward_CN<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* dx, float* dg, float* db, float* sum1, float* sum2, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu); // Sparse Projection Code template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) gather_scatter( T* Z, const T* __restrict__ X, const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int zk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (zk < K && n < N) { int xk = load(Lut, zk); int zi = zk*N + n; int xi = xk*N + n; V x = load(X, xi, xk >= 0); store(Z, x, zi); } } template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) scatter_add( T* Z, // large tensor const T* __restrict__ X, // large tensor const T* __restrict__ Y, // small tensor const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int yk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (yk < K && n < N) { int xk = load(Lut, yk); int yi = yk*N + n; int xi = xk*N + n; V y = load(Y, yi); V x = load(X, xi); store(Z, ew_add(x, y), xi); } } template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) scatter_mul( T* Z, // large tensor const T* __restrict__ X, // large tensor const T* __restrict__ Y, // small tensor const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int xk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (xk < K && n < N) { int yk = load(Lut, xk); int xi = xk*N + n; int yi = yk*N + n; V x = load(X, xi); V y = load(Y, yi, yk >= 0); V z = yk >= 0 ? ew_mul(x, y) : x; // pass through if unmapped store(Z, z, xi); } } template <typename T, typename V, int SHFT> __global__ void __launch_bounds__(128) sparse_mul_grad( T* DX, // large tensor T* DY, // small tensor const T* __restrict__ DZ, // large tensor (same pointer as DX) const T* __restrict__ X, // large tensor const T* __restrict__ Y, // small tensor const int* __restrict__ Lut, int K, int N) { int tid = threadIdx.x; int idx_K = blockIdx.x; int idx_N = blockIdx.y; int tidK = tid >> SHFT; int tidN = tid & ((1<<SHFT)-1); int yk = (idx_K << (7-SHFT)) + tidK; int n = (idx_N << SHFT) + tidN; if (yk < K && n < N) { int xk = load(Lut, yk); int yi = yk*N + n; int xi = xk*N + n; V y = load(Y, yi); V x = load(X, xi); V dz = load(DZ, xi); store(DX, ew_mul(dz, y), xi); store(DY, ew_mul(dz, x), yi); } } #define OP_GAT 0 #define OP_SCT 1 #define OP_ADD 2 #define OP_MUL 3 template <typename T, typename V4, typename V8> bool SparseOp(CUstream stream, T* z, const T* x, const T* y, const int* lut, int op, int K, int N) { int gridN = (N >> 6) + ((N & 63) != 0); if (sizeof(T) == 2 && (N & 7) == 0) { V8* Z = ( V8*)z; const V8* X = (const V8*)x; const V8* Y = (const V8*)y; // blockK = 128 / 8 = 16 int gridK = (K >> 4) + ((K & 15) != 0); dim3 grid(gridK, gridN, 1); switch(op) { case OP_GAT: gather_scatter<V8,float8,3><<<grid,128,0,stream>>>(Z, X, lut, K, N>>3); break; case OP_SCT: gather_scatter<V8,float8,3><<<grid,128,0,stream>>>(Z, X, lut, K, N>>3); break; case OP_ADD: scatter_add<V8,float8,3><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>3); break; case OP_MUL: scatter_mul<V8,float8,3><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>3); break; } } else if ((N & 3) == 0) { V4* Z = ( V4*)z; const V4* X = (const V4*)x; const V4* Y = (const V4*)y; // blockK = 128 / 16 = 8 int gridK = (K >> 3) + ((K & 7) != 0); dim3 grid(gridK, gridN, 1); switch(op) { case OP_GAT: gather_scatter<V4,float4,4><<<grid,128,0,stream>>>(Z, X, lut, K, N>>2); break; case OP_SCT: gather_scatter<V4,float4,4><<<grid,128,0,stream>>>(Z, X, lut, K, N>>2); break; case OP_ADD: scatter_add<V4,float4,4><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>2); break; case OP_MUL: scatter_mul<V4,float4,4><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>2); break; } } return true; // TODO } template <typename T, typename V4, typename V8> bool SparseMulGrad(CUstream stream, T* dx, T* dy, const T* dz, const T* x, const T* y, const int* lut, int K, int N) { int gridN = (N >> 6) + ((N & 63) != 0); if (sizeof(T) == 2 && (N & 7) == 0) { V8* DX = ( V8*)dx; V8* DY = ( V8*)dy; const V8* DZ = (const V8*)dz; const V8* X = (const V8*)x; const V8* Y = (const V8*)y; // blockK = 128 / 8 = 16 int gridK = (K >> 4) + ((K & 15) != 0); dim3 grid(gridK, gridN, 1); sparse_mul_grad<V8,float8,3><<<grid,128,0,stream>>>(DX, DY, DZ, X, Y, lut, K, N>>3); } else if ((N & 3) == 0) { V4* DX = ( V4*)dx; V4* DY = ( V4*)dy; const V4* DZ = (const V4*)dz; const V4* X = (const V4*)x; const V4* Y = (const V4*)y; // blockK = 128 / 16 = 8 int gridK = (K >> 3) + ((K & 7) != 0); dim3 grid(gridK, gridN, 1); sparse_mul_grad<V4,float4,4><<<grid,128,0,stream>>>(DX, DY, DZ, X, Y, lut, K, N>>2); } return true; // TODO } template bool SparseOp<float,float4,float8>(CUstream stream, float* z, const float* x, const float* y, const int* lut, int op, int K, int N); template bool SparseOp<ehalf,ehalf4,ehalf8>(CUstream stream, ehalf* z, const ehalf* x, const ehalf* y, const int* lut, int op, int K, int N); template bool SparseOp<bhalf,bhalf4,bhalf8>(CUstream stream, bhalf* z, const bhalf* x, const bhalf* y, const int* lut, int op, int K, int N); template bool SparseMulGrad<float,float4,float8>(CUstream stream, float* dx, float* dy, const float* dz, const float* x, const float* y, const int* lut, int K, int N); template bool SparseMulGrad<ehalf,ehalf4,ehalf8>(CUstream stream, ehalf* dx, ehalf* dy, const ehalf* dz, const ehalf* x, const ehalf* y, const int* lut, int K, int N); template bool SparseMulGrad<bhalf,bhalf4,bhalf8>(CUstream stream, bhalf* dx, bhalf* dy, const bhalf* dz, const bhalf* x, const bhalf* y, const int* lut, int K, int N); #endif // GOOGLE_CUDA // cuobjdump -xelf blocksparse_ops.5.sm_60.cubin blocksparse_ops.so // cuobjdump -xelf blocksparse_ops.6.sm_61.cubin blocksparse_ops.so // nvdisasm -c -raw blocksparse_ops.5.sm_60.cubin > blocksparse_ops.5.sm_60.sass // nvdisasm -c -raw blocksparse_ops.6.sm_61.cubin > blocksparse_ops.6.sm_61.sass
06149f8d7e3a972df70f4a38b95994d583bb5017.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_minus_2_back; int xdim0_update_halo_kernel2_zvel_minus_2_back_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_minus_2_back; int ydim0_update_halo_kernel2_zvel_minus_2_back_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_minus_2_back; int xdim1_update_halo_kernel2_zvel_minus_2_back_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_minus_2_back; int ydim1_update_halo_kernel2_zvel_minus_2_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_minus_2_back * (y) + \ xdim0_update_halo_kernel2_zvel_minus_2_back * \ ydim0_update_halo_kernel2_zvel_minus_2_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_minus_2_back * (y) + \ xdim1_update_halo_kernel2_zvel_minus_2_back * \ ydim1_update_halo_kernel2_zvel_minus_2_back * (z)) // user function __device__ inline void update_halo_kernel2_zvel_minus_2_back_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = -zvel0[OPS_ACC0(0, 0, 2)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = -zvel1[OPS_ACC1(0, 0, 2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_minus_2_back( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_2_back + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_2_back * ydim0_update_halo_kernel2_zvel_minus_2_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_2_back + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_2_back * ydim1_update_halo_kernel2_zvel_minus_2_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_minus_2_back_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_minus_2_back( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 102)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(102, "update_halo_kernel2_zvel_minus_2_back"); OPS_kernels[102].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_2_back_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_2_back_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_2_back_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_2_back_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_minus_2_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_minus_2_back_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_minus_2_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_minus_2_back_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_minus_2_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_minus_2_back_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_minus_2_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_minus_2_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_2_back), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[102].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
06149f8d7e3a972df70f4a38b95994d583bb5017.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_minus_2_back; int xdim0_update_halo_kernel2_zvel_minus_2_back_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_minus_2_back; int ydim0_update_halo_kernel2_zvel_minus_2_back_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_minus_2_back; int xdim1_update_halo_kernel2_zvel_minus_2_back_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_minus_2_back; int ydim1_update_halo_kernel2_zvel_minus_2_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_minus_2_back * (y) + \ xdim0_update_halo_kernel2_zvel_minus_2_back * \ ydim0_update_halo_kernel2_zvel_minus_2_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_minus_2_back * (y) + \ xdim1_update_halo_kernel2_zvel_minus_2_back * \ ydim1_update_halo_kernel2_zvel_minus_2_back * (z)) // user function __device__ inline void update_halo_kernel2_zvel_minus_2_back_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = -zvel0[OPS_ACC0(0, 0, 2)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = -zvel1[OPS_ACC1(0, 0, 2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_minus_2_back( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_2_back + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_2_back * ydim0_update_halo_kernel2_zvel_minus_2_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_2_back + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_2_back * ydim1_update_halo_kernel2_zvel_minus_2_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_minus_2_back_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_minus_2_back( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 102)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(102, "update_halo_kernel2_zvel_minus_2_back"); OPS_kernels[102].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_2_back_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_2_back_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_2_back_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_2_back_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_minus_2_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_minus_2_back_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_minus_2_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_minus_2_back_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_minus_2_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_minus_2_back_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_minus_2_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_minus_2_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_minus_2_back<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[102].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
88982af0456eb70fc9ddc6f38f81854ded3f7da7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/functional.hpp" namespace cv { namespace gpu { namespace device { template <typename T> struct shift_and_sizeof; template <> struct shift_and_sizeof<signed char> { enum { shift = 0 }; }; template <> struct shift_and_sizeof<unsigned char> { enum { shift = 0 }; }; template <> struct shift_and_sizeof<short> { enum { shift = 1 }; }; template <> struct shift_and_sizeof<unsigned short> { enum { shift = 1 }; }; template <> struct shift_and_sizeof<int> { enum { shift = 2 }; }; template <> struct shift_and_sizeof<float> { enum { shift = 2 }; }; template <> struct shift_and_sizeof<double> { enum { shift = 3 }; }; /////////////////////////////////////////////////////////////////////////// ////////////////////////////////// CopyTo ///////////////////////////////// /////////////////////////////////////////////////////////////////////////// template <typename T> void copyToWithMask(DevMem2Db src, DevMem2Db dst, int cn, DevMem2Db mask, bool colorMask, hipStream_t stream) { if (colorMask) cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, identity<T>(), SingleMask(mask), stream); else cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, identity<T>(), SingleMaskChannels(mask, cn), stream); } void copyToWithMask_gpu(DevMem2Db src, DevMem2Db dst, size_t elemSize1, int cn, DevMem2Db mask, bool colorMask, hipStream_t stream) { typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, int cn, DevMem2Db mask, bool colorMask, hipStream_t stream); static func_t tab[] = { 0, copyToWithMask<unsigned char>, copyToWithMask<unsigned short>, 0, copyToWithMask<int>, 0, 0, 0, copyToWithMask<double> }; tab[elemSize1](src, dst, cn, mask, colorMask, stream); } /////////////////////////////////////////////////////////////////////////// ////////////////////////////////// SetTo ////////////////////////////////// /////////////////////////////////////////////////////////////////////////// __constant__ uchar scalar_8u[4]; __constant__ schar scalar_8s[4]; __constant__ ushort scalar_16u[4]; __constant__ short scalar_16s[4]; __constant__ int scalar_32s[4]; __constant__ float scalar_32f[4]; __constant__ double scalar_64f[4]; template <typename T> __device__ __forceinline__ T readScalar(int i); template <> __device__ __forceinline__ uchar readScalar<uchar>(int i) {return scalar_8u[i];} template <> __device__ __forceinline__ schar readScalar<schar>(int i) {return scalar_8s[i];} template <> __device__ __forceinline__ ushort readScalar<ushort>(int i) {return scalar_16u[i];} template <> __device__ __forceinline__ short readScalar<short>(int i) {return scalar_16s[i];} template <> __device__ __forceinline__ int readScalar<int>(int i) {return scalar_32s[i];} template <> __device__ __forceinline__ float readScalar<float>(int i) {return scalar_32f[i];} template <> __device__ __forceinline__ double readScalar<double>(int i) {return scalar_64f[i];} void writeScalar(const uchar* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_8u, vals, sizeof(uchar) * 4) ); } void writeScalar(const schar* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_8s, vals, sizeof(schar) * 4) ); } void writeScalar(const ushort* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_16u, vals, sizeof(ushort) * 4) ); } void writeScalar(const short* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_16s, vals, sizeof(short) * 4) ); } void writeScalar(const int* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_32s, vals, sizeof(int) * 4) ); } void writeScalar(const float* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_32f, vals, sizeof(float) * 4) ); } void writeScalar(const double* vals) { cudaSafeCall( hipMemcpyToSymbol(scalar_64f, vals, sizeof(double) * 4) ); } template<typename T> __global__ void set_to_without_mask(T* mat, int cols, int rows, size_t step, int channels) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < cols * channels ) && (y < rows)) { size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x; mat[idx] = readScalar<T>(x % channels); } } template<typename T> __global__ void set_to_with_mask(T* mat, const uchar* mask, int cols, int rows, size_t step, int channels, size_t step_mask) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < cols * channels ) && (y < rows)) if (mask[y * step_mask + x / channels] != 0) { size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x; mat[idx] = readScalar<T>(x % channels); } } template <typename T> void set_to_gpu(DevMem2Db mat, const T* scalar, DevMem2Db mask, int channels, hipStream_t stream) { writeScalar(scalar); dim3 threadsPerBlock(32, 8, 1); dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); hipLaunchKernelGGL(( set_to_with_mask<T>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, (T*)mat.data, (uchar*)mask.data, mat.cols, mat.rows, mat.step, channels, mask.step); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall ( hipDeviceSynchronize() ); } template void set_to_gpu<uchar >(DevMem2Db mat, const uchar* scalar, DevMem2Db mask, int channels, hipStream_t stream); template void set_to_gpu<schar >(DevMem2Db mat, const schar* scalar, DevMem2Db mask, int channels, hipStream_t stream); template void set_to_gpu<ushort>(DevMem2Db mat, const ushort* scalar, DevMem2Db mask, int channels, hipStream_t stream); template void set_to_gpu<short >(DevMem2Db mat, const short* scalar, DevMem2Db mask, int channels, hipStream_t stream); template void set_to_gpu<int >(DevMem2Db mat, const int* scalar, DevMem2Db mask, int channels, hipStream_t stream); template void set_to_gpu<float >(DevMem2Db mat, const float* scalar, DevMem2Db mask, int channels, hipStream_t stream); template void set_to_gpu<double>(DevMem2Db mat, const double* scalar, DevMem2Db mask, int channels, hipStream_t stream); template <typename T> void set_to_gpu(DevMem2Db mat, const T* scalar, int channels, hipStream_t stream) { writeScalar(scalar); dim3 threadsPerBlock(32, 8, 1); dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); hipLaunchKernelGGL(( set_to_without_mask<T>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, (T*)mat.data, mat.cols, mat.rows, mat.step, channels); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall ( hipDeviceSynchronize() ); } template void set_to_gpu<uchar >(DevMem2Db mat, const uchar* scalar, int channels, hipStream_t stream); template void set_to_gpu<schar >(DevMem2Db mat, const schar* scalar, int channels, hipStream_t stream); template void set_to_gpu<ushort>(DevMem2Db mat, const ushort* scalar, int channels, hipStream_t stream); template void set_to_gpu<short >(DevMem2Db mat, const short* scalar, int channels, hipStream_t stream); template void set_to_gpu<int >(DevMem2Db mat, const int* scalar, int channels, hipStream_t stream); template void set_to_gpu<float >(DevMem2Db mat, const float* scalar, int channels, hipStream_t stream); template void set_to_gpu<double>(DevMem2Db mat, const double* scalar, int channels, hipStream_t stream); /////////////////////////////////////////////////////////////////////////// //////////////////////////////// ConvertTo //////////////////////////////// /////////////////////////////////////////////////////////////////////////// template <typename T, typename D> struct Convertor : unary_function<T, D> { Convertor(double alpha_, double beta_) : alpha(alpha_), beta(beta_) {} __device__ __forceinline__ D operator()(const T& src) const { return saturate_cast<D>(alpha * src + beta); } double alpha, beta; }; namespace detail { template <size_t src_size, size_t dst_size, typename F> struct ConvertTraitsDispatcher : DefaultTransformFunctorTraits<F> { }; template <typename F> struct ConvertTraitsDispatcher<1, 1, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 8 }; }; template <typename F> struct ConvertTraitsDispatcher<1, 2, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<1, 4, F> : DefaultTransformFunctorTraits<F> { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<2, 2, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<2, 4, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 2 }; }; template <typename F> struct ConvertTraitsDispatcher<4, 2, F> : DefaultTransformFunctorTraits<F> { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<4, 4, F> : DefaultTransformFunctorTraits<F> { enum { smart_block_dim_y = 8 }; enum { smart_shift = 2 }; }; template <typename F> struct ConvertTraits : ConvertTraitsDispatcher<sizeof(typename F::argument_type), sizeof(typename F::result_type), F> { }; } template <typename T, typename D> struct TransformFunctorTraits< Convertor<T, D> > : detail::ConvertTraits< Convertor<T, D> > { }; template<typename T, typename D> void cvt_(DevMem2Db src, DevMem2Db dst, double alpha, double beta, hipStream_t stream) { cudaSafeCall( hipSetDoubleForDevice(&alpha) ); cudaSafeCall( hipSetDoubleForDevice(&beta) ); Convertor<T, D> op(alpha, beta); cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<D>)dst, op, WithOutMask(), stream); } #if defined __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wmissing-declarations" #endif void convert_gpu(DevMem2Db src, int sdepth, DevMem2Db dst, int ddepth, double alpha, double beta, hipStream_t stream) { typedef void (*caller_t)(DevMem2Db src, DevMem2Db dst, double alpha, double beta, hipStream_t stream); static const caller_t tab[8][8] = { {cvt_<uchar, uchar>, cvt_<uchar, schar>, cvt_<uchar, ushort>, cvt_<uchar, short>, cvt_<uchar, int>, cvt_<uchar, float>, cvt_<uchar, double>, 0}, {cvt_<schar, uchar>, cvt_<schar, schar>, cvt_<schar, ushort>, cvt_<schar, short>, cvt_<schar, int>, cvt_<schar, float>, cvt_<schar, double>, 0}, {cvt_<ushort, uchar>, cvt_<ushort, schar>, cvt_<ushort, ushort>, cvt_<ushort, short>, cvt_<ushort, int>, cvt_<ushort, float>, cvt_<ushort, double>, 0}, {cvt_<short, uchar>, cvt_<short, schar>, cvt_<short, ushort>, cvt_<short, short>, cvt_<short, int>, cvt_<short, float>, cvt_<short, double>, 0}, {cvt_<int, uchar>, cvt_<int, schar>, cvt_<int, ushort>, cvt_<int, short>, cvt_<int, int>, cvt_<int, float>, cvt_<int, double>, 0}, {cvt_<float, uchar>, cvt_<float, schar>, cvt_<float, ushort>, cvt_<float, short>, cvt_<float, int>, cvt_<float, float>, cvt_<float, double>, 0}, {cvt_<double, uchar>, cvt_<double, schar>, cvt_<double, ushort>, cvt_<double, short>, cvt_<double, int>, cvt_<double, float>, cvt_<double, double>, 0}, {0,0,0,0,0,0,0,0} }; caller_t func = tab[sdepth][ddepth]; if (!func) cv::gpu::error("Unsupported convert operation", __FILE__, __LINE__, "convert_gpu"); func(src, dst, alpha, beta, stream); } #if defined __clang__ # pragma clang diagnostic pop #endif }}} // namespace cv { namespace gpu { namespace device
88982af0456eb70fc9ddc6f38f81854ded3f7da7.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/functional.hpp" namespace cv { namespace gpu { namespace device { template <typename T> struct shift_and_sizeof; template <> struct shift_and_sizeof<signed char> { enum { shift = 0 }; }; template <> struct shift_and_sizeof<unsigned char> { enum { shift = 0 }; }; template <> struct shift_and_sizeof<short> { enum { shift = 1 }; }; template <> struct shift_and_sizeof<unsigned short> { enum { shift = 1 }; }; template <> struct shift_and_sizeof<int> { enum { shift = 2 }; }; template <> struct shift_and_sizeof<float> { enum { shift = 2 }; }; template <> struct shift_and_sizeof<double> { enum { shift = 3 }; }; /////////////////////////////////////////////////////////////////////////// ////////////////////////////////// CopyTo ///////////////////////////////// /////////////////////////////////////////////////////////////////////////// template <typename T> void copyToWithMask(DevMem2Db src, DevMem2Db dst, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream) { if (colorMask) cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, identity<T>(), SingleMask(mask), stream); else cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, identity<T>(), SingleMaskChannels(mask, cn), stream); } void copyToWithMask_gpu(DevMem2Db src, DevMem2Db dst, size_t elemSize1, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream) { typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream); static func_t tab[] = { 0, copyToWithMask<unsigned char>, copyToWithMask<unsigned short>, 0, copyToWithMask<int>, 0, 0, 0, copyToWithMask<double> }; tab[elemSize1](src, dst, cn, mask, colorMask, stream); } /////////////////////////////////////////////////////////////////////////// ////////////////////////////////// SetTo ////////////////////////////////// /////////////////////////////////////////////////////////////////////////// __constant__ uchar scalar_8u[4]; __constant__ schar scalar_8s[4]; __constant__ ushort scalar_16u[4]; __constant__ short scalar_16s[4]; __constant__ int scalar_32s[4]; __constant__ float scalar_32f[4]; __constant__ double scalar_64f[4]; template <typename T> __device__ __forceinline__ T readScalar(int i); template <> __device__ __forceinline__ uchar readScalar<uchar>(int i) {return scalar_8u[i];} template <> __device__ __forceinline__ schar readScalar<schar>(int i) {return scalar_8s[i];} template <> __device__ __forceinline__ ushort readScalar<ushort>(int i) {return scalar_16u[i];} template <> __device__ __forceinline__ short readScalar<short>(int i) {return scalar_16s[i];} template <> __device__ __forceinline__ int readScalar<int>(int i) {return scalar_32s[i];} template <> __device__ __forceinline__ float readScalar<float>(int i) {return scalar_32f[i];} template <> __device__ __forceinline__ double readScalar<double>(int i) {return scalar_64f[i];} void writeScalar(const uchar* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_8u, vals, sizeof(uchar) * 4) ); } void writeScalar(const schar* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_8s, vals, sizeof(schar) * 4) ); } void writeScalar(const ushort* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_16u, vals, sizeof(ushort) * 4) ); } void writeScalar(const short* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_16s, vals, sizeof(short) * 4) ); } void writeScalar(const int* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_32s, vals, sizeof(int) * 4) ); } void writeScalar(const float* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_32f, vals, sizeof(float) * 4) ); } void writeScalar(const double* vals) { cudaSafeCall( cudaMemcpyToSymbol(scalar_64f, vals, sizeof(double) * 4) ); } template<typename T> __global__ void set_to_without_mask(T* mat, int cols, int rows, size_t step, int channels) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < cols * channels ) && (y < rows)) { size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x; mat[idx] = readScalar<T>(x % channels); } } template<typename T> __global__ void set_to_with_mask(T* mat, const uchar* mask, int cols, int rows, size_t step, int channels, size_t step_mask) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < cols * channels ) && (y < rows)) if (mask[y * step_mask + x / channels] != 0) { size_t idx = y * ( step >> shift_and_sizeof<T>::shift ) + x; mat[idx] = readScalar<T>(x % channels); } } template <typename T> void set_to_gpu(DevMem2Db mat, const T* scalar, DevMem2Db mask, int channels, cudaStream_t stream) { writeScalar(scalar); dim3 threadsPerBlock(32, 8, 1); dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); set_to_with_mask<T><<<numBlocks, threadsPerBlock, 0, stream>>>((T*)mat.data, (uchar*)mask.data, mat.cols, mat.rows, mat.step, channels, mask.step); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall ( cudaDeviceSynchronize() ); } template void set_to_gpu<uchar >(DevMem2Db mat, const uchar* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template void set_to_gpu<schar >(DevMem2Db mat, const schar* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template void set_to_gpu<ushort>(DevMem2Db mat, const ushort* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template void set_to_gpu<short >(DevMem2Db mat, const short* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template void set_to_gpu<int >(DevMem2Db mat, const int* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template void set_to_gpu<float >(DevMem2Db mat, const float* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template void set_to_gpu<double>(DevMem2Db mat, const double* scalar, DevMem2Db mask, int channels, cudaStream_t stream); template <typename T> void set_to_gpu(DevMem2Db mat, const T* scalar, int channels, cudaStream_t stream) { writeScalar(scalar); dim3 threadsPerBlock(32, 8, 1); dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); set_to_without_mask<T><<<numBlocks, threadsPerBlock, 0, stream>>>((T*)mat.data, mat.cols, mat.rows, mat.step, channels); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall ( cudaDeviceSynchronize() ); } template void set_to_gpu<uchar >(DevMem2Db mat, const uchar* scalar, int channels, cudaStream_t stream); template void set_to_gpu<schar >(DevMem2Db mat, const schar* scalar, int channels, cudaStream_t stream); template void set_to_gpu<ushort>(DevMem2Db mat, const ushort* scalar, int channels, cudaStream_t stream); template void set_to_gpu<short >(DevMem2Db mat, const short* scalar, int channels, cudaStream_t stream); template void set_to_gpu<int >(DevMem2Db mat, const int* scalar, int channels, cudaStream_t stream); template void set_to_gpu<float >(DevMem2Db mat, const float* scalar, int channels, cudaStream_t stream); template void set_to_gpu<double>(DevMem2Db mat, const double* scalar, int channels, cudaStream_t stream); /////////////////////////////////////////////////////////////////////////// //////////////////////////////// ConvertTo //////////////////////////////// /////////////////////////////////////////////////////////////////////////// template <typename T, typename D> struct Convertor : unary_function<T, D> { Convertor(double alpha_, double beta_) : alpha(alpha_), beta(beta_) {} __device__ __forceinline__ D operator()(const T& src) const { return saturate_cast<D>(alpha * src + beta); } double alpha, beta; }; namespace detail { template <size_t src_size, size_t dst_size, typename F> struct ConvertTraitsDispatcher : DefaultTransformFunctorTraits<F> { }; template <typename F> struct ConvertTraitsDispatcher<1, 1, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 8 }; }; template <typename F> struct ConvertTraitsDispatcher<1, 2, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<1, 4, F> : DefaultTransformFunctorTraits<F> { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<2, 2, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<2, 4, F> : DefaultTransformFunctorTraits<F> { enum { smart_shift = 2 }; }; template <typename F> struct ConvertTraitsDispatcher<4, 2, F> : DefaultTransformFunctorTraits<F> { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; template <typename F> struct ConvertTraitsDispatcher<4, 4, F> : DefaultTransformFunctorTraits<F> { enum { smart_block_dim_y = 8 }; enum { smart_shift = 2 }; }; template <typename F> struct ConvertTraits : ConvertTraitsDispatcher<sizeof(typename F::argument_type), sizeof(typename F::result_type), F> { }; } template <typename T, typename D> struct TransformFunctorTraits< Convertor<T, D> > : detail::ConvertTraits< Convertor<T, D> > { }; template<typename T, typename D> void cvt_(DevMem2Db src, DevMem2Db dst, double alpha, double beta, cudaStream_t stream) { cudaSafeCall( cudaSetDoubleForDevice(&alpha) ); cudaSafeCall( cudaSetDoubleForDevice(&beta) ); Convertor<T, D> op(alpha, beta); cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<D>)dst, op, WithOutMask(), stream); } #if defined __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wmissing-declarations" #endif void convert_gpu(DevMem2Db src, int sdepth, DevMem2Db dst, int ddepth, double alpha, double beta, cudaStream_t stream) { typedef void (*caller_t)(DevMem2Db src, DevMem2Db dst, double alpha, double beta, cudaStream_t stream); static const caller_t tab[8][8] = { {cvt_<uchar, uchar>, cvt_<uchar, schar>, cvt_<uchar, ushort>, cvt_<uchar, short>, cvt_<uchar, int>, cvt_<uchar, float>, cvt_<uchar, double>, 0}, {cvt_<schar, uchar>, cvt_<schar, schar>, cvt_<schar, ushort>, cvt_<schar, short>, cvt_<schar, int>, cvt_<schar, float>, cvt_<schar, double>, 0}, {cvt_<ushort, uchar>, cvt_<ushort, schar>, cvt_<ushort, ushort>, cvt_<ushort, short>, cvt_<ushort, int>, cvt_<ushort, float>, cvt_<ushort, double>, 0}, {cvt_<short, uchar>, cvt_<short, schar>, cvt_<short, ushort>, cvt_<short, short>, cvt_<short, int>, cvt_<short, float>, cvt_<short, double>, 0}, {cvt_<int, uchar>, cvt_<int, schar>, cvt_<int, ushort>, cvt_<int, short>, cvt_<int, int>, cvt_<int, float>, cvt_<int, double>, 0}, {cvt_<float, uchar>, cvt_<float, schar>, cvt_<float, ushort>, cvt_<float, short>, cvt_<float, int>, cvt_<float, float>, cvt_<float, double>, 0}, {cvt_<double, uchar>, cvt_<double, schar>, cvt_<double, ushort>, cvt_<double, short>, cvt_<double, int>, cvt_<double, float>, cvt_<double, double>, 0}, {0,0,0,0,0,0,0,0} }; caller_t func = tab[sdepth][ddepth]; if (!func) cv::gpu::error("Unsupported convert operation", __FILE__, __LINE__, "convert_gpu"); func(src, dst, alpha, beta, stream); } #if defined __clang__ # pragma clang diagnostic pop #endif }}} // namespace cv { namespace gpu { namespace device
21b493d385ac57aa6778fb8ec072b7197865d622.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "opencv2/imgproc/imgproc.hpp" using namespace std; using namespace cv; __global__ void rgba_to_greyscale(uchar3 *rgbaImage, unsigned char* d_grayscale, int numRows, int numCols) { //printf("%u",rgbaImage[threadIdx.x]); int absolute_position_x = (blockIdx.x * blockDim.x) + threadIdx.x; int absolute_position_y = (blockIdx.y * blockDim.y) + threadIdx.y; int Id = absolute_position_x + absolute_position_y*numCols; if ( absolute_position_x >= numCols || absolute_position_y >= numRows ) { return; } uchar3 rgba = rgbaImage[Id]; float channelSum = 0.299f * rgba.x + 0.587f * rgba.y + 0.114f * rgba.z; d_grayscale[Id] = channelSum; } int main() { Mat input,input_gray; input = imread("/home/jeetkanjani7/scene.jpg",-1); cout<<"total: "<<input.total(); uchar3 *d_inputImage = new uchar3[input.total()]; unsigned char *d_grayscale; unsigned char *h_grayscale = (unsigned char*)malloc(input.total()); hipMalloc((void**)&d_grayscale,input.total()); hipMalloc((void**)&d_inputImage,input.total()*sizeof(uchar3)); hipMemcpy(d_inputImage, input.data,input.total()*sizeof(uchar3), hipMemcpyHostToDevice); int numrows = input.rows; int numcols = input.cols; const dim3 blockSize(16, 16, 1); //TODO const dim3 gridSize((numcols/blockSize.x),(numrows/blockSize.y),1); //TODO cout<<"\ngridSize::"<<gridSize.x<<" "<<gridSize.y; cout<<"\nBlockSize::"<<blockSize.x<<" "<<blockSize.y<<endl; hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImage, d_grayscale, numrows, numcols); hipDeviceSynchronize(); hipMemcpy(h_grayscale, d_grayscale,input.total(), hipMemcpyDeviceToHost); cout<<static_cast<int>(h_grayscale[7])<<endl; Mat img(numrows,numcols,CV_8UC1,h_grayscale,cv::Mat::AUTO_STEP); imshow("rgb",img); cout<<"image dims: "<<img.size(); hipDeviceSynchronize(); printf("That's all!\n"); waitKey(0); return 0; }
21b493d385ac57aa6778fb8ec072b7197865d622.cu
#include <stdio.h> #include <cuda_runtime.h> #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "opencv2/imgproc/imgproc.hpp" using namespace std; using namespace cv; __global__ void rgba_to_greyscale(uchar3 *rgbaImage, unsigned char* d_grayscale, int numRows, int numCols) { //printf("%u",rgbaImage[threadIdx.x]); int absolute_position_x = (blockIdx.x * blockDim.x) + threadIdx.x; int absolute_position_y = (blockIdx.y * blockDim.y) + threadIdx.y; int Id = absolute_position_x + absolute_position_y*numCols; if ( absolute_position_x >= numCols || absolute_position_y >= numRows ) { return; } uchar3 rgba = rgbaImage[Id]; float channelSum = 0.299f * rgba.x + 0.587f * rgba.y + 0.114f * rgba.z; d_grayscale[Id] = channelSum; } int main() { Mat input,input_gray; input = imread("/home/jeetkanjani7/scene.jpg",-1); cout<<"total: "<<input.total(); uchar3 *d_inputImage = new uchar3[input.total()]; unsigned char *d_grayscale; unsigned char *h_grayscale = (unsigned char*)malloc(input.total()); cudaMalloc((void**)&d_grayscale,input.total()); cudaMalloc((void**)&d_inputImage,input.total()*sizeof(uchar3)); cudaMemcpy(d_inputImage, input.data,input.total()*sizeof(uchar3), cudaMemcpyHostToDevice); int numrows = input.rows; int numcols = input.cols; const dim3 blockSize(16, 16, 1); //TODO const dim3 gridSize((numcols/blockSize.x),(numrows/blockSize.y),1); //TODO cout<<"\ngridSize::"<<gridSize.x<<" "<<gridSize.y; cout<<"\nBlockSize::"<<blockSize.x<<" "<<blockSize.y<<endl; rgba_to_greyscale<<<gridSize, blockSize>>>(d_inputImage, d_grayscale, numrows, numcols); cudaThreadSynchronize(); cudaMemcpy(h_grayscale, d_grayscale,input.total(), cudaMemcpyDeviceToHost); cout<<static_cast<int>(h_grayscale[7])<<endl; Mat img(numrows,numcols,CV_8UC1,h_grayscale,cv::Mat::AUTO_STEP); imshow("rgb",img); cout<<"image dims: "<<img.size(); cudaDeviceSynchronize(); printf("That's all!\n"); waitKey(0); return 0; }
078b6338406b2687af1b1e9127c384d23243628b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ static void TI(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number) { // float C=20; float k=0.5; float vr=-60; float vt=-50; float G_up=5; float G_down=5; float a=0.05; float b=7; float c=-65; float d=50; float v_peak=20; float I; float v=neuro[number].v; float u=neuro[number].u; I=Ix[number].I; //Izhikevich model v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C; u=u+tau*a*(b*(v-vr)-u); spike[number]=0; if(v>v_peak) { v=c; u=u+d; spike[number]=1; } u=fmin(530,u); neuro[number].v=v; neuro[number].u=u; Ix[number].I=0; } __global__ static void TI_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1]+THREAD_NUM[2]*BLOCK_NUM[2])*10+(bid * THREAD_NUM[3] + tid)*10; /*****************/ if((number+0)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+0);} /****************/ if((number+1)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+1);} /****************/ if((number+2)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+2);} /*****************/ if((number+3)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+3);} /*****************/ if((number+4)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+4);} /*****************/ if((number+5)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+5);} /****************/ if((number+6)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+6);} /*****************/ if((number+7)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+7);} /*****************/ if((number+8)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+8);} /*****************/ if((number+9)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+9);} }
078b6338406b2687af1b1e9127c384d23243628b.cu
#include "cuda_runtime.h" #include <stdio.h> __device__ static void TI(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number) { //设置神经元计算参数 float C=20; float k=0.5; float vr=-60; float vt=-50; float G_up=5; float G_down=5; float a=0.05; float b=7; float c=-65; float d=50; float v_peak=20; float I; float v=neuro[number].v; float u=neuro[number].u; I=Ix[number].I; //Izhikevich model v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C; u=u+tau*a*(b*(v-vr)-u); spike[number]=0; if(v>v_peak) { v=c; u=u+d; spike[number]=1; } u=fmin(530,u); neuro[number].v=v; neuro[number].u=u; Ix[number].I=0; } __global__ static void TI_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1]+THREAD_NUM[2]*BLOCK_NUM[2])*10+(bid * THREAD_NUM[3] + tid)*10; /********第一个神经元虚拟计算内核*********/ if((number+0)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+0);} /********第二个神经元虚拟计算内核********/ if((number+1)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+1);} /********第三个神经元虚拟计算内核********/ if((number+2)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+2);} /********第四个神经元虚拟计算内核*********/ if((number+3)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+3);} /********第五个神经元虚拟计算内核*********/ if((number+4)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+4);} /********第六个神经元虚拟计算内核*********/ if((number+5)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+5);} /********第七个神经元虚拟计算内核********/ if((number+6)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+6);} /********第八个神经元虚拟计算内核*********/ if((number+7)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+7);} /********第九个神经元虚拟计算内核*********/ if((number+8)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+8);} /********第十个神经元虚拟计算内核*********/ if((number+9)<=boxnum[2]) {TI(input,neuro,spike,Ix,number+9);} }
CGPU_Decoder_2NMS.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ldcp_decoder.h * ldpc3 * * Created by legal on 02/04/11. * Copyright 2011 ENSEIRB. All rights reserved. * */ /*----------------------------------------------------------------------------*/ #include "CGPU_Decoder_2NMS.h" #include "../transpose/GPU_Transpose.h" #include "./cuda/CUDA_2NMS.h" CGPU_Decoder_2NMS::CGPU_Decoder_2NMS(int _nb_frames, int block_size, unsigned int n, unsigned int k, unsigned int m): CGPUDecoder(_nb_frames, block_size, n, k, m) { BLOCK_SIZE = block_size; size_t nb_blocks = nb_frames / BLOCK_SIZE; printf("(II) Decoder configuration: BLOCK_SIZE = %ld, nb_frames = %ld, nb_blocks = %ld\n", BLOCK_SIZE, nb_frames, nb_blocks); struct hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); struct hipFuncAttributes attr; hipFuncGetAttributes(&attr, Horiz_layered_LDPC_decoder_2NMS); int nMP = devProp.multiProcessorCount; // NOMBRE DE STREAM PROCESSOR int nWarp = attr.maxThreadsPerBlock/32; // PACKET DE THREADs EXECUTABLES EN PARALLELE int nThreads = nWarp * 32; // NOMBRE DE THREAD MAXI PAR SP int nDOF = nb_frames; int nBperMP = 65536 / (attr.numRegs); // Nr of blocks per MP int minB = min(nBperMP*nThreads,1024); int nBlocks = max(minB/nThreads * nMP, nDOF/nThreads); //Total number of blocks printf("(II) Nombre de Warp : %d\n", nWarp); printf("(II) Nombre de Threads : %d\n", nThreads); printf("(II) LDPC_Sched_Stage_1_MS (PTX version %d)\n", attr.ptxVersion); printf("(II) - Nombre de regist/thr : %d\n", attr.numRegs); printf("(II) - Nombre de local/thr : %ld\n", attr.localSizeBytes); printf("(II) - Nombre de shared/thr : %ld\n", attr.sharedSizeBytes); printf("(II) - Nombre de pBLOCKs : %f\n", (float)nb_frames / (float)BLOCK_SIZE); printf("(II) - Nombre de pBLOCKs/uP : %f\n", (float)nb_frames / (float)BLOCK_SIZE / (float)devProp.multiProcessorCount); } CGPU_Decoder_2NMS::~CGPU_Decoder_2NMS() { } void CGPU_Decoder_2NMS::initialize() { } void CGPU_Decoder_2NMS::decode(float Intrinsic_fix[_N], int Rprime_fix[_N], int nombre_iterations) { hipError_t Status; int nb_blocks = nb_frames / BLOCK_SIZE; Status = hipMemcpy/*Async*/(d_MSG_C_2_V, Intrinsic_fix, sz_nodes * sizeof(float), hipMemcpyHostToDevice); ERROR_CHECK(Status, __FILE__, __LINE__); { // ORDERING THE LDPC CODEWORDS FOR DECODING (INTERLEAVING DATA) unsigned int NB_TRAMES = _N; unsigned int FRAME_LENGTH = nb_frames; dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transposeDiagonal), dim3(grid), dim3(threads), 0, 0, (float*)device_V, (float*)d_MSG_C_2_V, _N, nb_frames); } hipLaunchKernelGGL(( Horiz_layered_LDPC_decoder_2NMS), dim3(nb_blocks), dim3(BLOCK_SIZE), 0, 0, device_V, d_MSG_C_2_V, d_transpose, nombre_iterations); { // REORDERING THE LDPC CODEWORDS unsigned int NB_TRAMES = nb_frames; unsigned int FRAME_LENGTH = _N; dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transposeDiagonal_and_hard_decision), dim3(grid), dim3(threads), 0, 0, (float*)d_MSG_C_2_V, (float*)device_V, NB_TRAMES, FRAME_LENGTH); } Status = hipMemcpy(Rprime_fix, d_MSG_C_2_V, sz_nodes * sizeof(float), hipMemcpyDeviceToHost); ERROR_CHECK(Status, __FILE__, __LINE__); }
CGPU_Decoder_2NMS.cu
/* * ldcp_decoder.h * ldpc3 * * Created by legal on 02/04/11. * Copyright 2011 ENSEIRB. All rights reserved. * */ /*----------------------------------------------------------------------------*/ #include "CGPU_Decoder_2NMS.h" #include "../transpose/GPU_Transpose.h" #include "./cuda/CUDA_2NMS.h" CGPU_Decoder_2NMS::CGPU_Decoder_2NMS(int _nb_frames, int block_size, unsigned int n, unsigned int k, unsigned int m): CGPUDecoder(_nb_frames, block_size, n, k, m) { BLOCK_SIZE = block_size; size_t nb_blocks = nb_frames / BLOCK_SIZE; printf("(II) Decoder configuration: BLOCK_SIZE = %ld, nb_frames = %ld, nb_blocks = %ld\n", BLOCK_SIZE, nb_frames, nb_blocks); struct cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); struct cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, Horiz_layered_LDPC_decoder_2NMS); int nMP = devProp.multiProcessorCount; // NOMBRE DE STREAM PROCESSOR int nWarp = attr.maxThreadsPerBlock/32; // PACKET DE THREADs EXECUTABLES EN PARALLELE int nThreads = nWarp * 32; // NOMBRE DE THREAD MAXI PAR SP int nDOF = nb_frames; int nBperMP = 65536 / (attr.numRegs); // Nr of blocks per MP int minB = min(nBperMP*nThreads,1024); int nBlocks = max(minB/nThreads * nMP, nDOF/nThreads); //Total number of blocks printf("(II) Nombre de Warp : %d\n", nWarp); printf("(II) Nombre de Threads : %d\n", nThreads); printf("(II) LDPC_Sched_Stage_1_MS (PTX version %d)\n", attr.ptxVersion); printf("(II) - Nombre de regist/thr : %d\n", attr.numRegs); printf("(II) - Nombre de local/thr : %ld\n", attr.localSizeBytes); printf("(II) - Nombre de shared/thr : %ld\n", attr.sharedSizeBytes); printf("(II) - Nombre de pBLOCKs : %f\n", (float)nb_frames / (float)BLOCK_SIZE); printf("(II) - Nombre de pBLOCKs/uP : %f\n", (float)nb_frames / (float)BLOCK_SIZE / (float)devProp.multiProcessorCount); } CGPU_Decoder_2NMS::~CGPU_Decoder_2NMS() { } void CGPU_Decoder_2NMS::initialize() { } void CGPU_Decoder_2NMS::decode(float Intrinsic_fix[_N], int Rprime_fix[_N], int nombre_iterations) { cudaError_t Status; int nb_blocks = nb_frames / BLOCK_SIZE; Status = cudaMemcpy/*Async*/(d_MSG_C_2_V, Intrinsic_fix, sz_nodes * sizeof(float), cudaMemcpyHostToDevice); ERROR_CHECK(Status, __FILE__, __LINE__); { // ORDERING THE LDPC CODEWORDS FOR DECODING (INTERLEAVING DATA) unsigned int NB_TRAMES = _N; unsigned int FRAME_LENGTH = nb_frames; dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); transposeDiagonal<<<grid, threads>>>((float*)device_V, (float*)d_MSG_C_2_V, _N, nb_frames); } Horiz_layered_LDPC_decoder_2NMS<<<nb_blocks, BLOCK_SIZE>>>(device_V, d_MSG_C_2_V, d_transpose, nombre_iterations); { // REORDERING THE LDPC CODEWORDS unsigned int NB_TRAMES = nb_frames; unsigned int FRAME_LENGTH = _N; dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); transposeDiagonal_and_hard_decision<<<grid, threads>>>((float*)d_MSG_C_2_V, (float*)device_V, NB_TRAMES, FRAME_LENGTH); } Status = cudaMemcpy(Rprime_fix, d_MSG_C_2_V, sz_nodes * sizeof(float), cudaMemcpyDeviceToHost); ERROR_CHECK(Status, __FILE__, __LINE__); }
de234928b87e7ce84a738a90943f12351748f483.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author sgazeos@gmail.com // #include <system/op_boilerplate.h> #include <array/NDArray.h> namespace sd { namespace ops { namespace helpers { typedef NDArray ColorTable_t; static NDArray DefaultColorTable(int depth, sd::LaunchContext* context) { //std::vector<std::vector<float>> colorTable; const Nd4jLong kDefaultTableLength = 10; const Nd4jLong kDefaultChannelLength = 4; NDArray colorTable('c', {kDefaultTableLength, kDefaultChannelLength}, { 1,1,0,1, // yellow 0, 0, 1, 1, // 1: blue 1, 0, 0, 1, // 2: red 0, 1, 0, 1, // 3: lime 0.5, 0, 0.5, 1, // 4: purple 0.5, 0.5, 0, 1, // 5: olive 0.5, 0, 0, 1, // 6: maroon 0, 0, 0.5, 1, // 7: navy blue 0, 1, 1, 1, // 8: aqua 1, 0, 1, 1 // 9: fuchsia }, DataType::FLOAT32, context); if (depth == 1) { colorTable.assign(1.f); // all to white when black and white colors } return colorTable; } template <typename T> static __global__ void drawBoundingBoxesKernel(T const* images, const Nd4jLong* imagesShape, float const* boxes, const Nd4jLong* boxesShape, float const* colorTable, const Nd4jLong* colorTableShape, T* output, const Nd4jLong* outputShape, Nd4jLong batchSize, Nd4jLong width, Nd4jLong height, Nd4jLong channels, Nd4jLong boxSize, Nd4jLong colorTableLen) { for (auto batch = blockIdx.x; batch < (int)batchSize; batch += gridDim.x) { // loop by batch for (auto boxIndex = 0; boxIndex < boxSize; ++boxIndex) { // box with shape //auto internalBox = &boxes[b * colorSetSize * 4 + c * 4];//(*boxes)(b, {0})(c, {0});//internalBoxes->at(c); auto colorIndex = boxIndex % colorTableLen;//colorSet->at(c); // auto rowStart = sd::math::nd4j_max(Nd4jLong (0), Nd4jLong ((height - 1) * internalBox[0])); // auto rowEnd = sd::math::nd4j_min(Nd4jLong (height - 1), Nd4jLong ((height - 1) * internalBox[2])); // auto colStart = sd::math::nd4j_max(Nd4jLong (0), Nd4jLong ((width - 1) * internalBox[1])); // auto colEnd = sd::math::nd4j_min(Nd4jLong(width - 1), Nd4jLong ((width - 1) * internalBox[3])); Nd4jLong indices0[] = {batch, boxIndex, 0}; Nd4jLong indices1[] = {batch, boxIndex, 1}; Nd4jLong indices2[] = {batch, boxIndex, 2}; Nd4jLong indices3[] = {batch, boxIndex, 3}; auto rowStart = Nd4jLong ((height - 1) * boxes[shape::getOffset(boxesShape, indices0, 0)]); auto rowStartBound = sd::math::nd4j_max(Nd4jLong (0), rowStart); auto rowEnd = Nd4jLong ((height - 1) * boxes[shape::getOffset(boxesShape, indices2, 0)]); auto rowEndBound = sd::math::nd4j_min(Nd4jLong (height - 1), rowEnd); auto colStart = Nd4jLong ((width - 1) * boxes[shape::getOffset(boxesShape, indices1, 0)]); auto colStartBound = sd::math::nd4j_max(Nd4jLong (0), colStart); auto colEnd = Nd4jLong ((width - 1) * boxes[shape::getOffset(boxesShape, indices3, 0)]); auto colEndBound = sd::math::nd4j_min(Nd4jLong(width - 1), colEnd); if (rowStart > rowEnd || colStart > colEnd) { // printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is inverted " // "and will not be drawn\n", rowStart, colStart, rowEnd, colEnd); continue; } if (rowStart >= height || rowEnd < 0 || colStart >= width || colEnd < 0) { // printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is completely " // "outside the image and not be drawn\n", rowStart, colStart, rowEnd, colEnd); continue; } // Draw upper line if (rowStart >= 0) { for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, rowStart, j, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } // Draw bottom line. if (rowEnd < height) { for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, rowEnd, j, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } // Draw left line. if (colStart >= 0) { for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, i, colStart, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } // Draw right line. if (colEnd < width) { for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, i, colEnd, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } } } } template <typename T> void drawBoundingBoxesH(sd::LaunchContext* context, NDArray const* images, NDArray const* boxes, NDArray const* colors, NDArray* output) { auto batchSize = images->sizeAt(0); auto height = images->sizeAt(1); auto width = images->sizeAt(2); auto channels = images->sizeAt(3); auto stream = context->getCudaStream(); auto boxSize = boxes->sizeAt(1); NDArray colorsTable = DefaultColorTable(channels, context); if ((colors != nullptr && colors->lengthOf() > 0)) { colorsTable = *colors; } auto imagesBuf = images->getDataBuffer()->specialAsT<T>(); auto boxesBuf = boxes->getDataBuffer()->specialAsT<float>(); // boxes should be float32 auto colorsTableBuf = colorsTable.getDataBuffer()->specialAsT<float>(); // color table is float32 auto outputBuf = output->dataBuffer()->specialAsT<T>(); hipLaunchKernelGGL(( drawBoundingBoxesKernel), dim3(128), dim3(128), 1024, *stream, imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), colorsTableBuf, colorsTable.specialShapeInfo(), outputBuf, output->specialShapeInfo(), batchSize, width, height, channels, boxSize, colorsTable.lengthOf()); } void drawBoundingBoxesFunctor(sd::LaunchContext * context, NDArray* images, NDArray* boxes, NDArray* colors, NDArray* output) { // images - batch of 3D images with BW (last dim = 1), RGB (last dim = 3) or RGBA (last dim = 4) channel set // boxes - batch of 2D bounds with last dim (y_start, x_start, y_end, x_end) to compute i and j as // floor((height - 1 ) * y_start) => rowStart, floor((height - 1) * y_end) => rowEnd // floor((width - 1 ) * x_start) => colStart, floor((width - 1) * x_end) => colEnd // height = images->sizeAt(1), width = images->sizeAt(2) // colors - colors for each box given // set up color for each box as frame NDArray::prepareSpecialUse({output}, {images, boxes, colors}); output->assign(images); BUILD_SINGLE_SELECTOR(output->dataType(), drawBoundingBoxesH, (context, images, boxes, colors, output), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {images, boxes, colors}); } } } }
de234928b87e7ce84a738a90943f12351748f483.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author sgazeos@gmail.com // #include <system/op_boilerplate.h> #include <array/NDArray.h> namespace sd { namespace ops { namespace helpers { typedef NDArray ColorTable_t; static NDArray DefaultColorTable(int depth, sd::LaunchContext* context) { //std::vector<std::vector<float>> colorTable; const Nd4jLong kDefaultTableLength = 10; const Nd4jLong kDefaultChannelLength = 4; NDArray colorTable('c', {kDefaultTableLength, kDefaultChannelLength}, { 1,1,0,1, // yellow 0, 0, 1, 1, // 1: blue 1, 0, 0, 1, // 2: red 0, 1, 0, 1, // 3: lime 0.5, 0, 0.5, 1, // 4: purple 0.5, 0.5, 0, 1, // 5: olive 0.5, 0, 0, 1, // 6: maroon 0, 0, 0.5, 1, // 7: navy blue 0, 1, 1, 1, // 8: aqua 1, 0, 1, 1 // 9: fuchsia }, DataType::FLOAT32, context); if (depth == 1) { colorTable.assign(1.f); // all to white when black and white colors } return colorTable; } template <typename T> static __global__ void drawBoundingBoxesKernel(T const* images, const Nd4jLong* imagesShape, float const* boxes, const Nd4jLong* boxesShape, float const* colorTable, const Nd4jLong* colorTableShape, T* output, const Nd4jLong* outputShape, Nd4jLong batchSize, Nd4jLong width, Nd4jLong height, Nd4jLong channels, Nd4jLong boxSize, Nd4jLong colorTableLen) { for (auto batch = blockIdx.x; batch < (int)batchSize; batch += gridDim.x) { // loop by batch for (auto boxIndex = 0; boxIndex < boxSize; ++boxIndex) { // box with shape //auto internalBox = &boxes[b * colorSetSize * 4 + c * 4];//(*boxes)(b, {0})(c, {0});//internalBoxes->at(c); auto colorIndex = boxIndex % colorTableLen;//colorSet->at(c); // auto rowStart = sd::math::nd4j_max(Nd4jLong (0), Nd4jLong ((height - 1) * internalBox[0])); // auto rowEnd = sd::math::nd4j_min(Nd4jLong (height - 1), Nd4jLong ((height - 1) * internalBox[2])); // auto colStart = sd::math::nd4j_max(Nd4jLong (0), Nd4jLong ((width - 1) * internalBox[1])); // auto colEnd = sd::math::nd4j_min(Nd4jLong(width - 1), Nd4jLong ((width - 1) * internalBox[3])); Nd4jLong indices0[] = {batch, boxIndex, 0}; Nd4jLong indices1[] = {batch, boxIndex, 1}; Nd4jLong indices2[] = {batch, boxIndex, 2}; Nd4jLong indices3[] = {batch, boxIndex, 3}; auto rowStart = Nd4jLong ((height - 1) * boxes[shape::getOffset(boxesShape, indices0, 0)]); auto rowStartBound = sd::math::nd4j_max(Nd4jLong (0), rowStart); auto rowEnd = Nd4jLong ((height - 1) * boxes[shape::getOffset(boxesShape, indices2, 0)]); auto rowEndBound = sd::math::nd4j_min(Nd4jLong (height - 1), rowEnd); auto colStart = Nd4jLong ((width - 1) * boxes[shape::getOffset(boxesShape, indices1, 0)]); auto colStartBound = sd::math::nd4j_max(Nd4jLong (0), colStart); auto colEnd = Nd4jLong ((width - 1) * boxes[shape::getOffset(boxesShape, indices3, 0)]); auto colEndBound = sd::math::nd4j_min(Nd4jLong(width - 1), colEnd); if (rowStart > rowEnd || colStart > colEnd) { // printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is inverted " // "and will not be drawn\n", rowStart, colStart, rowEnd, colEnd); continue; } if (rowStart >= height || rowEnd < 0 || colStart >= width || colEnd < 0) { // printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is completely " // "outside the image and not be drawn\n", rowStart, colStart, rowEnd, colEnd); continue; } // Draw upper line if (rowStart >= 0) { for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, rowStart, j, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } // Draw bottom line. if (rowEnd < height) { for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, rowEnd, j, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } // Draw left line. if (colStart >= 0) { for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, i, colStart, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } // Draw right line. if (colEnd < width) { for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x) for (auto c = 0; c < channels; c++) { Nd4jLong zPos[] = {batch, i, colEnd, c}; Nd4jLong cPos[] = {colorIndex, c}; auto cIndex = shape::getOffset(colorTableShape, cPos, 0); auto zIndex = shape::getOffset(outputShape, zPos, 0); output[zIndex] = (T)colorTable[cIndex]; } } } } } template <typename T> void drawBoundingBoxesH(sd::LaunchContext* context, NDArray const* images, NDArray const* boxes, NDArray const* colors, NDArray* output) { auto batchSize = images->sizeAt(0); auto height = images->sizeAt(1); auto width = images->sizeAt(2); auto channels = images->sizeAt(3); auto stream = context->getCudaStream(); auto boxSize = boxes->sizeAt(1); NDArray colorsTable = DefaultColorTable(channels, context); if ((colors != nullptr && colors->lengthOf() > 0)) { colorsTable = *colors; } auto imagesBuf = images->getDataBuffer()->specialAsT<T>(); auto boxesBuf = boxes->getDataBuffer()->specialAsT<float>(); // boxes should be float32 auto colorsTableBuf = colorsTable.getDataBuffer()->specialAsT<float>(); // color table is float32 auto outputBuf = output->dataBuffer()->specialAsT<T>(); drawBoundingBoxesKernel<<<128, 128, 1024, *stream>>>(imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), colorsTableBuf, colorsTable.specialShapeInfo(), outputBuf, output->specialShapeInfo(), batchSize, width, height, channels, boxSize, colorsTable.lengthOf()); } void drawBoundingBoxesFunctor(sd::LaunchContext * context, NDArray* images, NDArray* boxes, NDArray* colors, NDArray* output) { // images - batch of 3D images with BW (last dim = 1), RGB (last dim = 3) or RGBA (last dim = 4) channel set // boxes - batch of 2D bounds with last dim (y_start, x_start, y_end, x_end) to compute i and j as // floor((height - 1 ) * y_start) => rowStart, floor((height - 1) * y_end) => rowEnd // floor((width - 1 ) * x_start) => colStart, floor((width - 1) * x_end) => colEnd // height = images->sizeAt(1), width = images->sizeAt(2) // colors - colors for each box given // set up color for each box as frame NDArray::prepareSpecialUse({output}, {images, boxes, colors}); output->assign(images); BUILD_SINGLE_SELECTOR(output->dataType(), drawBoundingBoxesH, (context, images, boxes, colors, output), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {images, boxes, colors}); } } } }
817bdda3f88ca78c4c35af67144efaf0f0e69c0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nuconv.cuh" #include <float.h> extern hipStream_t streamRep; extern int Blocks; extern int Threads; __global__ void Normalize(volatile float *__restrict__ y, const uint32_t nPts, const uint32_t ng, const uint32_t d, const float maxy) { for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts; TID += gridDim.x * blockDim.x) { for (register int dim = 0; dim < d; dim++) { y[TID + dim * nPts] /= maxy; if (y[TID + dim * nPts] >= 1) { y[TID + dim * nPts] =1 - FLT_EPSILON; } y[TID + dim * nPts] *= (ng - 3); } } } __global__ void Normalize(volatile double *__restrict__ y, const uint32_t nPts, const uint32_t ng, const uint32_t d, const double maxy) { for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts; TID += gridDim.x * blockDim.x) { for (register int dim = 0; dim < d; dim++) { y[TID + dim * nPts] /= maxy; if (y[TID + dim * nPts] == 1) { y[TID + dim * nPts] = y[TID + dim * nPts] - DBL_EPSILON; } y[TID + dim * nPts] *= (ng - 3); } } } template <class dataPoint,class Complext> void nuconv(dataPoint *PhiScat, dataPoint *y, dataPoint *VScat, int n, int d, int m, int nGridDim, double *timeInfo, hipfftHandle &plan, hipfftHandle &plan_rhs, dataPoint *VGrid, dataPoint *PhiGrid, Complext *Kc, Complext *Xc) { struct GpuTimer timer; int szV = pow(nGridDim + 2, d) * m; timer.Start(streamRep); // ~~~~~~~~~~ Scale coordinates (inside bins) thrust::device_ptr<dataPoint> yVec_ptr(y); dataPoint maxy = thrust::reduce(thrust::hip::par.on(streamRep), yVec_ptr, yVec_ptr + n * d, 0.0, thrust::maximum<dataPoint>()); hipDeviceSynchronize(); timer.Stop(streamRep); timeInfo[5] += timer.Elapsed(); dataPoint h = maxy / (nGridDim - 1 - std::numeric_limits<dataPoint>::epsilon()); // ~~~~~~~~~~ scat2grid hipLaunchKernelGGL(( Normalize), dim3(Blocks), dim3(Threads), 0, streamRep, y, n, nGridDim + 2, d, maxy); hipDeviceSynchronize(); timer.Start(streamRep); s2g(VGrid, y, VScat, nGridDim, n, d, m); timer.Stop(streamRep); timeInfo[0] = timer.Elapsed(); hipDeviceSynchronize(); // ~~~~~~~~~~ grid2grid uint32_t *const nGridDims = new uint32_t[d](); for (int i = 0; i < d; i++) { nGridDims[i] = nGridDim + 2; } timer.Start(streamRep); switch (d) { case 1: conv1dnopadcuda(PhiGrid, VGrid, h, nGridDims, m, d, plan, plan_rhs,Kc,Xc); break; case 2: conv2dnopadcuda(PhiGrid, VGrid, h, nGridDims, m, d, plan, plan_rhs,Kc,Xc); break; case 3: conv3dnopadcuda(PhiGrid, VGrid, h, nGridDims, m, d, plan, plan_rhs,Kc,Xc); break; } hipDeviceSynchronize(); timer.Stop(streamRep); timeInfo[1] = timer.Elapsed(); // ~~~~~~~~~~ grid2scat timer.Start(streamRep); g2s(PhiScat, PhiGrid, y, nGridDim, n, d, m); timer.Stop(streamRep); timeInfo[2] = timer.Elapsed(); // ~~~~~~~~~~ deallocate memory hipDeviceSynchronize(); delete nGridDims; } template void nuconv(float *PhiScat, float *y, float *VScat, int n, int d, int m, int nGridDim, double *timeInfo, hipfftHandle &plan, hipfftHandle &plan_rhs, float *VGrid, float *PhiGrid, ComplexF *Kc, ComplexF *Xc); template void nuconv(double *PhiScat, double *y, double *VScat, int n, int d, int m, int nGridDim, double *timeInfo, hipfftHandle &plan, hipfftHandle &plan_rhs, double *VGrid, double *PhiGrid, ComplexD *Kc, ComplexD *Xc);
817bdda3f88ca78c4c35af67144efaf0f0e69c0a.cu
#include "nuconv.cuh" #include <float.h> extern cudaStream_t streamRep; extern int Blocks; extern int Threads; __global__ void Normalize(volatile float *__restrict__ y, const uint32_t nPts, const uint32_t ng, const uint32_t d, const float maxy) { for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts; TID += gridDim.x * blockDim.x) { for (register int dim = 0; dim < d; dim++) { y[TID + dim * nPts] /= maxy; if (y[TID + dim * nPts] >= 1) { y[TID + dim * nPts] =1 - FLT_EPSILON; } y[TID + dim * nPts] *= (ng - 3); } } } __global__ void Normalize(volatile double *__restrict__ y, const uint32_t nPts, const uint32_t ng, const uint32_t d, const double maxy) { for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts; TID += gridDim.x * blockDim.x) { for (register int dim = 0; dim < d; dim++) { y[TID + dim * nPts] /= maxy; if (y[TID + dim * nPts] == 1) { y[TID + dim * nPts] = y[TID + dim * nPts] - DBL_EPSILON; } y[TID + dim * nPts] *= (ng - 3); } } } template <class dataPoint,class Complext> void nuconv(dataPoint *PhiScat, dataPoint *y, dataPoint *VScat, int n, int d, int m, int nGridDim, double *timeInfo, cufftHandle &plan, cufftHandle &plan_rhs, dataPoint *VGrid, dataPoint *PhiGrid, Complext *Kc, Complext *Xc) { struct GpuTimer timer; int szV = pow(nGridDim + 2, d) * m; timer.Start(streamRep); // ~~~~~~~~~~ Scale coordinates (inside bins) thrust::device_ptr<dataPoint> yVec_ptr(y); dataPoint maxy = thrust::reduce(thrust::cuda::par.on(streamRep), yVec_ptr, yVec_ptr + n * d, 0.0, thrust::maximum<dataPoint>()); cudaDeviceSynchronize(); timer.Stop(streamRep); timeInfo[5] += timer.Elapsed(); dataPoint h = maxy / (nGridDim - 1 - std::numeric_limits<dataPoint>::epsilon()); // ~~~~~~~~~~ scat2grid Normalize<<<Blocks, Threads, 0, streamRep>>>(y, n, nGridDim + 2, d, maxy); cudaDeviceSynchronize(); timer.Start(streamRep); s2g(VGrid, y, VScat, nGridDim, n, d, m); timer.Stop(streamRep); timeInfo[0] = timer.Elapsed(); cudaDeviceSynchronize(); // ~~~~~~~~~~ grid2grid uint32_t *const nGridDims = new uint32_t[d](); for (int i = 0; i < d; i++) { nGridDims[i] = nGridDim + 2; } timer.Start(streamRep); switch (d) { case 1: conv1dnopadcuda(PhiGrid, VGrid, h, nGridDims, m, d, plan, plan_rhs,Kc,Xc); break; case 2: conv2dnopadcuda(PhiGrid, VGrid, h, nGridDims, m, d, plan, plan_rhs,Kc,Xc); break; case 3: conv3dnopadcuda(PhiGrid, VGrid, h, nGridDims, m, d, plan, plan_rhs,Kc,Xc); break; } cudaDeviceSynchronize(); timer.Stop(streamRep); timeInfo[1] = timer.Elapsed(); // ~~~~~~~~~~ grid2scat timer.Start(streamRep); g2s(PhiScat, PhiGrid, y, nGridDim, n, d, m); timer.Stop(streamRep); timeInfo[2] = timer.Elapsed(); // ~~~~~~~~~~ deallocate memory cudaDeviceSynchronize(); delete nGridDims; } template void nuconv(float *PhiScat, float *y, float *VScat, int n, int d, int m, int nGridDim, double *timeInfo, cufftHandle &plan, cufftHandle &plan_rhs, float *VGrid, float *PhiGrid, ComplexF *Kc, ComplexF *Xc); template void nuconv(double *PhiScat, double *y, double *VScat, int n, int d, int m, int nGridDim, double *timeInfo, cufftHandle &plan, cufftHandle &plan_rhs, double *VGrid, double *PhiGrid, ComplexD *Kc, ComplexD *Xc);
8c4ee88c33fd3482f450712268ab0578a8c64da3.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020 by Contributors * \file rank_metric.cc * \brief prediction rank based metrics. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <dmlc/registry.h> #include <xgboost/metric.h> #include <xgboost/host_device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> #include <array> #include <vector> #include "metric_common.h" #include "../common/math.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(rank_metric_gpu); /*! \brief Evaluate rank list on GPU */ template <typename EvalMetricT> struct EvalRankGpu : public Metric, public EvalRankConfig { public: double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(preds.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1); auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); // Compute individual group metric and sum them up return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this); } const char* Name() const override { return name.c_str(); } explicit EvalRankGpu(const char* name, const char* param) { using namespace std; // NOLINT(*) if (param != nullptr) { std::ostringstream os; if (sscanf(param, "%u[-]?", &this->topn) == 1) { os << name << '@' << param; this->name = os.str(); } else { os << name << param; this->name = os.str(); } if (param[strlen(param) - 1] == '-') { this->minus = true; } } else { this->name = name; } } }; /*! \brief Precision at N, for both classification and rank */ struct EvalPrecisionGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT // Find each group's metric sum dh::caching_device_vector<uint32_t> hits(ngroups, 0); const auto nitems = pred_sorter.GetNumItems(); auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) { atomicAdd(&dhits[group_idx], 1); } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return static_cast<double>(thrust::reduce(thrust::hip::par(alloc), hits.begin(), hits.end())) / ecfg.topn; } }; /*! \brief NDCG: Normalized Discounted Cumulative Gain at N */ struct EvalNDCGGpu { public: static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg, // The order in which labels have to be accessed. The order is determined // by sorting the predictions or the labels for the entire dataset const xgboost::common::Span<const uint32_t> &dlabels_sort_order, dh::caching_device_vector<double> *dcgptr) { dh::caching_device_vector<double> &dcgs(*dcgptr); // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]])); }; // NOLINT // Find each group's DCG value const auto nitems = pred_sorter.GetNumItems(); auto *ddcgs = dcgs.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; auto label = DetermineNonTrivialLabelLambda(idx); if (ridx < ecfg.topn && label) { atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0)); } }); } static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Sort the labels and compute IDCG dh::SegmentSorter<float> segment_label_sorter; segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(), pred_sorter.GetGroupSegmentsSpan()); uint32_t ngroups = pred_sorter.GetNumGroups(); dh::caching_device_vector<double> idcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg); // Compute the DCG values next dh::caching_device_vector<double> dcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg); double *ddcg = dcg.data().get(); double *didcg = idcg.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // Compute the group's DCG and reduce it across all groups dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { if (didcg[gidx] == 0.0f) { ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f; } else { ddcg[gidx] /= didcg[gidx]; } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return thrust::reduce(thrust::hip::par(alloc), dcg.begin(), dcg.end()); } }; /*! \brief Mean Average Precision at N, for both classification and rank */ struct EvalMAPGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually const auto nitems = pred_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> hits(nitems, 0); auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), hits.begin(), DetermineNonTrivialLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the nontrivial labels that are segmented to accumulate them. // This is required for computing the metric sum // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::hip::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), hits.begin(), // Input value hits.begin()); // In-place scan // Find each group's metric sum dh::caching_device_vector<double> sumap(ngroups, 0); auto *dsumap = sumap.data().get(); const auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { if (DetermineNonTrivialLabelLambda(idx)) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn) { atomicAdd(&dsumap[group_idx], static_cast<double>(dhits[idx]) / (ridx + 1)); } } }); // Aggregate the group's item precisions dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0; if (nhits != 0) { dsumap[gidx] /= nhits; } else { if (ecfg.minus) { dsumap[gidx] = 0; } else { dsumap[gidx] = 1; } } }); return thrust::reduce(thrust::hip::par(alloc), sumap.begin(), sumap.end()); } }; XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre") .describe("precision@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); }); XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg") .describe("ndcg@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); }); XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map") .describe("map@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); }); } // namespace metric } // namespace xgboost
8c4ee88c33fd3482f450712268ab0578a8c64da3.cu
/*! * Copyright 2020 by Contributors * \file rank_metric.cc * \brief prediction rank based metrics. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <dmlc/registry.h> #include <xgboost/metric.h> #include <xgboost/host_device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> #include <array> #include <vector> #include "metric_common.h" #include "../common/math.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(rank_metric_gpu); /*! \brief Evaluate rank list on GPU */ template <typename EvalMetricT> struct EvalRankGpu : public Metric, public EvalRankConfig { public: double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(preds.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1); auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); // Compute individual group metric and sum them up return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this); } const char* Name() const override { return name.c_str(); } explicit EvalRankGpu(const char* name, const char* param) { using namespace std; // NOLINT(*) if (param != nullptr) { std::ostringstream os; if (sscanf(param, "%u[-]?", &this->topn) == 1) { os << name << '@' << param; this->name = os.str(); } else { os << name << param; this->name = os.str(); } if (param[strlen(param) - 1] == '-') { this->minus = true; } } else { this->name = name; } } }; /*! \brief Precision at N, for both classification and rank */ struct EvalPrecisionGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT // Find each group's metric sum dh::caching_device_vector<uint32_t> hits(ngroups, 0); const auto nitems = pred_sorter.GetNumItems(); auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) { atomicAdd(&dhits[group_idx], 1); } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return static_cast<double>(thrust::reduce(thrust::cuda::par(alloc), hits.begin(), hits.end())) / ecfg.topn; } }; /*! \brief NDCG: Normalized Discounted Cumulative Gain at N */ struct EvalNDCGGpu { public: static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg, // The order in which labels have to be accessed. The order is determined // by sorting the predictions or the labels for the entire dataset const xgboost::common::Span<const uint32_t> &dlabels_sort_order, dh::caching_device_vector<double> *dcgptr) { dh::caching_device_vector<double> &dcgs(*dcgptr); // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]])); }; // NOLINT // Find each group's DCG value const auto nitems = pred_sorter.GetNumItems(); auto *ddcgs = dcgs.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; auto label = DetermineNonTrivialLabelLambda(idx); if (ridx < ecfg.topn && label) { atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0)); } }); } static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Sort the labels and compute IDCG dh::SegmentSorter<float> segment_label_sorter; segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(), pred_sorter.GetGroupSegmentsSpan()); uint32_t ngroups = pred_sorter.GetNumGroups(); dh::caching_device_vector<double> idcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg); // Compute the DCG values next dh::caching_device_vector<double> dcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg); double *ddcg = dcg.data().get(); double *didcg = idcg.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // Compute the group's DCG and reduce it across all groups dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { if (didcg[gidx] == 0.0f) { ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f; } else { ddcg[gidx] /= didcg[gidx]; } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return thrust::reduce(thrust::cuda::par(alloc), dcg.begin(), dcg.end()); } }; /*! \brief Mean Average Precision at N, for both classification and rank */ struct EvalMAPGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually const auto nitems = pred_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> hits(nitems, 0); auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), hits.begin(), DetermineNonTrivialLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the nontrivial labels that are segmented to accumulate them. // This is required for computing the metric sum // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), hits.begin(), // Input value hits.begin()); // In-place scan // Find each group's metric sum dh::caching_device_vector<double> sumap(ngroups, 0); auto *dsumap = sumap.data().get(); const auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { if (DetermineNonTrivialLabelLambda(idx)) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn) { atomicAdd(&dsumap[group_idx], static_cast<double>(dhits[idx]) / (ridx + 1)); } } }); // Aggregate the group's item precisions dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0; if (nhits != 0) { dsumap[gidx] /= nhits; } else { if (ecfg.minus) { dsumap[gidx] = 0; } else { dsumap[gidx] = 1; } } }); return thrust::reduce(thrust::cuda::par(alloc), sumap.begin(), sumap.end()); } }; XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre") .describe("precision@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); }); XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg") .describe("ndcg@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); }); XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map") .describe("map@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); }); } // namespace metric } // namespace xgboost
43b1c1e08a541a36c7fcbf497f96dcf106131f33.hip
// !!! This is a file automatically generated by hipify!!! // // Created by henri on 13/01/2020. // #include "matrix_add_scalar.hh" #include "kernels/kernel_mat_op.hh" double* mat_add_scalar(double* A, double scalar, int N, int M) { int SIZE = N*M; hipError_t rc = hipSuccess; // Allocate memory on the device double* d_A; double* d_B; auto* B = (double*)malloc(SIZE * sizeof(double)); hipMalloc(&d_A, SIZE * sizeof(double)); hipMalloc(&d_B, SIZE * sizeof(double)); // Copy to device rc = hipMemcpy(d_A, &A[0], SIZE * sizeof(double), hipMemcpyHostToDevice); if (rc) std::cout << "error memcpy\n"; hipMemset(d_B, 0, SIZE * sizeof(double)); // call the kernel matrixAddScalar(d_A, d_B, scalar, N, M); hipDeviceSynchronize(); // copy memory back to host hipMemcpy(&B[0], d_A, SIZE * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); return B; }
43b1c1e08a541a36c7fcbf497f96dcf106131f33.cu
// // Created by henri on 13/01/2020. // #include "matrix_add_scalar.hh" #include "kernels/kernel_mat_op.hh" double* mat_add_scalar(double* A, double scalar, int N, int M) { int SIZE = N*M; cudaError_t rc = cudaSuccess; // Allocate memory on the device double* d_A; double* d_B; auto* B = (double*)malloc(SIZE * sizeof(double)); cudaMalloc(&d_A, SIZE * sizeof(double)); cudaMalloc(&d_B, SIZE * sizeof(double)); // Copy to device rc = cudaMemcpy(d_A, &A[0], SIZE * sizeof(double), cudaMemcpyHostToDevice); if (rc) std::cout << "error memcpy\n"; cudaMemset(d_B, 0, SIZE * sizeof(double)); // call the kernel matrixAddScalar(d_A, d_B, scalar, N, M); cudaDeviceSynchronize(); // copy memory back to host cudaMemcpy(&B[0], d_A, SIZE * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); return B; }
448b59d4d82dcdb597d18d92d3d31da50de151a1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/hashing.hpp> #include <cudf/detail/utilities/hash_functions.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/table/experimental/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/tabulate.h> namespace cudf { namespace detail { namespace { using spark_hash_value_type = int32_t; template <typename Key, CUDF_ENABLE_IF(not cudf::is_nested<Key>())> struct SparkMurmurHash3_32 { using result_type = spark_hash_value_type; constexpr SparkMurmurHash3_32() = default; constexpr SparkMurmurHash3_32(uint32_t seed) : m_seed(seed) {} [[nodiscard]] __device__ inline uint32_t fmix32(uint32_t h) const { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } [[nodiscard]] __device__ inline uint32_t getblock32(std::byte const* data, cudf::size_type offset) const { // Read a 4-byte value from the data pointer as individual bytes for safe // unaligned access (very likely for string types). auto block = reinterpret_cast<uint8_t const*>(data + offset); return block[0] | (block[1] << 8) | (block[2] << 16) | (block[3] << 24); } [[nodiscard]] result_type __device__ inline operator()(Key const& key) const { return compute(key); } template <typename T> result_type __device__ inline compute(T const& key) const { return compute_bytes(reinterpret_cast<std::byte const*>(&key), sizeof(T)); } result_type __device__ inline compute_remaining_bytes(std::byte const* data, cudf::size_type len, cudf::size_type tail_offset, result_type h) const { // Process remaining bytes that do not fill a four-byte chunk using Spark's approach // (does not conform to normal MurmurHash3). for (auto i = tail_offset; i < len; i++) { // We require a two-step cast to get the k1 value from the byte. First, // we must cast to a signed int8_t. Then, the sign bit is preserved when // casting to uint32_t under 2's complement. Java preserves the sign when // casting byte-to-int, but C++ does not. uint32_t k1 = static_cast<uint32_t>(std::to_integer<int8_t>(data[i])); k1 *= c1; k1 = cudf::detail::rotate_bits_left(k1, rot_c1); k1 *= c2; h ^= k1; h = cudf::detail::rotate_bits_left(h, rot_c2); h = h * 5 + c3; } return h; } result_type __device__ compute_bytes(std::byte const* data, cudf::size_type const len) const { constexpr cudf::size_type BLOCK_SIZE = 4; cudf::size_type const nblocks = len / BLOCK_SIZE; cudf::size_type const tail_offset = nblocks * BLOCK_SIZE; result_type h = m_seed; // Process all four-byte chunks. for (cudf::size_type i = 0; i < nblocks; i++) { uint32_t k1 = getblock32(data, i * BLOCK_SIZE); k1 *= c1; k1 = cudf::detail::rotate_bits_left(k1, rot_c1); k1 *= c2; h ^= k1; h = cudf::detail::rotate_bits_left(h, rot_c2); h = h * 5 + c3; } h = compute_remaining_bytes(data, len, tail_offset, h); // Finalize hash. h ^= len; h = fmix32(h); return h; } private: uint32_t m_seed{cudf::DEFAULT_HASH_SEED}; static constexpr uint32_t c1 = 0xcc9e2d51; static constexpr uint32_t c2 = 0x1b873593; static constexpr uint32_t c3 = 0xe6546b64; static constexpr uint32_t rot_c1 = 15; static constexpr uint32_t rot_c2 = 13; }; template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<bool>::operator()(bool const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<int8_t>::operator()( int8_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<uint8_t>::operator()( uint8_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<int16_t>::operator()( int16_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<uint16_t>::operator()( uint16_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<float>::operator()( float const& key) const { return compute<float>(detail::normalize_nans(key)); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<double>::operator()( double const& key) const { return compute<double>(detail::normalize_nans(key)); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<cudf::string_view>::operator()( cudf::string_view const& key) const { auto const data = reinterpret_cast<std::byte const*>(key.data()); auto const len = key.size_bytes(); return compute_bytes(data, len); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<numeric::decimal32>::operator()( numeric::decimal32 const& key) const { return compute<uint64_t>(key.value()); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<numeric::decimal64>::operator()( numeric::decimal64 const& key) const { return compute<uint64_t>(key.value()); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<numeric::decimal128>::operator()( numeric::decimal128 const& key) const { // Generates the Spark MurmurHash3 hash value, mimicking the conversion: // java.math.BigDecimal.valueOf(unscaled_value, _scale).unscaledValue().toByteArray() // https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala#L381 __int128_t const val = key.value(); constexpr cudf::size_type key_size = sizeof(__int128_t); std::byte const* data = reinterpret_cast<std::byte const*>(&val); // Small negative values start with 0xff..., small positive values start with 0x00... bool const is_negative = val < 0; std::byte const zero_value = is_negative ? std::byte{0xff} : std::byte{0x00}; // If the value can be represented with a shorter than 16-byte integer, the // leading bytes of the little-endian value are truncated and are not hashed. auto const reverse_begin = thrust::reverse_iterator(data + key_size); auto const reverse_end = thrust::reverse_iterator(data); auto const first_nonzero_byte = thrust::find_if_not(thrust::seq, reverse_begin, reverse_end, [zero_value](std::byte const& v) { return v == zero_value; }).base(); // Max handles special case of 0 and -1 which would shorten to 0 length otherwise cudf::size_type length = ::max(1, static_cast<cudf::size_type>(thrust::distance(data, first_nonzero_byte))); // Preserve the 2's complement sign bit by adding a byte back on if necessary. // e.g. 0x0000ff would shorten to 0x00ff. The 0x00 byte is retained to // preserve the sign bit, rather than leaving an "f" at the front which would // change the sign bit. However, 0x00007f would shorten to 0x7f. No extra byte // is needed because the leftmost bit matches the sign bit. Similarly for // negative values: 0xffff00 --> 0xff00 and 0xffff80 --> 0x80. if ((length < key_size) && (is_negative ^ bool(data[length - 1] & std::byte{0x80}))) { ++length; } // Convert to big endian by reversing the range of nonzero bytes. Only those bytes are hashed. __int128_t big_endian_value = 0; auto big_endian_data = reinterpret_cast<std::byte*>(&big_endian_value); thrust::reverse_copy(thrust::seq, data, data + length, big_endian_data); return compute_bytes(big_endian_data, length); } /** * @brief Computes the hash value of a row in the given table. * * This functor uses Spark conventions for Murmur hashing, which differs from * the Murmur implementation used in the rest of libcudf. These differences * include: * - Serially using the output hash as an input seed for the next item * - Ignorance of null values * * The serial use of hashes as seeds means that data of different nested types * can exhibit hash collisions. For example, a row of an integer column * containing a 1 will have the same hash as a lists column of integers * containing a list of [1] and a struct column of a single integer column * containing a struct of {1}. * * As a consequence of ignoring null values, inputs like [1], [1, null], and * [null, 1] have the same hash (an expected hash collision). This kind of * collision can also occur across a table of nullable columns and with nulls * in structs ({1, null} and {null, 1} have the same hash). The seed value (the * previous element's hash value) is returned as the hash if an element is * null. * * For additional differences such as special tail processing and decimal type * handling, refer to the SparkMurmurHash3_32 functor. * * @tparam hash_function Hash functor to use for hashing elements. Must be SparkMurmurHash3_32. * @tparam Nullate A cudf::nullate type describing whether to check for nulls. */ template <template <typename> class hash_function, typename Nullate> class spark_murmur_device_row_hasher { friend class cudf::experimental::row::hash::row_hasher; ///< Allow row_hasher to access private ///< members. public: /** * @brief Return the hash value of a row in the given table. * * @param row_index The row index to compute the hash value of * @return The hash value of the row */ __device__ auto operator()(size_type row_index) const noexcept { return detail::accumulate( _table.begin(), _table.end(), _seed, [row_index, nulls = this->_check_nulls] __device__(auto hash, auto column) { return cudf::type_dispatcher( column.type(), element_hasher_adapter<hash_function>{nulls, hash}, column, row_index); }); } private: /** * @brief Computes the hash value of an element in the given column. * * When the column is non-nested, this is a simple wrapper around the element_hasher. * When the column is nested, this uses a seed value to serially compute each * nested element, with the output hash becoming the seed for the next value. * This requires constructing a new hash functor for each nested element, * using the new seed from the previous element's hash. The hash of a null * element is the input seed (the previous element's hash). */ template <template <typename> class hash_fn> class element_hasher_adapter { public: __device__ element_hasher_adapter(Nullate check_nulls, uint32_t seed) noexcept : _check_nulls(check_nulls), _seed(seed) { } using hash_functor = cudf::experimental::row::hash::element_hasher<hash_fn, Nullate>; template <typename T, CUDF_ENABLE_IF(not cudf::is_nested<T>())> __device__ spark_hash_value_type operator()(column_device_view const& col, size_type row_index) const noexcept { auto const hasher = hash_functor{_check_nulls, _seed, _seed}; return hasher.template operator()<T>(col, row_index); } template <typename T, CUDF_ENABLE_IF(cudf::is_nested<T>())> __device__ spark_hash_value_type operator()(column_device_view const& col, size_type row_index) const noexcept { column_device_view curr_col = col.slice(row_index, 1); while (curr_col.type().id() == type_id::STRUCT || curr_col.type().id() == type_id::LIST) { if (curr_col.type().id() == type_id::STRUCT) { if (curr_col.num_child_columns() == 0) { return _seed; } // Non-empty structs are assumed to be decomposed and contain only one child curr_col = detail::structs_column_device_view(curr_col).get_sliced_child(0); } else if (curr_col.type().id() == type_id::LIST) { curr_col = detail::lists_column_device_view(curr_col).get_sliced_child(); } } return detail::accumulate( thrust::counting_iterator(0), thrust::counting_iterator(curr_col.size()), _seed, [curr_col, nulls = this->_check_nulls] __device__(auto hash, auto element_index) { auto const hasher = hash_functor{nulls, hash, hash}; return cudf::type_dispatcher<cudf::experimental::dispatch_void_if_nested>( curr_col.type(), hasher, curr_col, element_index); }); } Nullate const _check_nulls; ///< Whether to check for nulls uint32_t const _seed; ///< The seed to use for hashing, also returned for null elements }; CUDF_HOST_DEVICE spark_murmur_device_row_hasher(Nullate check_nulls, table_device_view t, uint32_t seed = DEFAULT_HASH_SEED) noexcept : _check_nulls{check_nulls}, _table{t}, _seed(seed) { // Error out if passed an unsupported hash_function static_assert( std::is_base_of_v<SparkMurmurHash3_32<int>, hash_function<int>>, "spark_murmur_device_row_hasher only supports the SparkMurmurHash3_32 hash function"); } Nullate const _check_nulls; table_device_view const _table; uint32_t const _seed; }; void check_hash_compatibility(table_view const& input) { using column_checker_fn_t = std::function<void(column_view const&)>; column_checker_fn_t check_column = [&](column_view const& c) { if (c.type().id() == type_id::LIST) { auto const& list_col = lists_column_view(c); CUDF_EXPECTS(list_col.child().type().id() != type_id::STRUCT, "Cannot compute hash of a table with a LIST of STRUCT columns."); check_column(list_col.child()); } else if (c.type().id() == type_id::STRUCT) { for (auto child = c.child_begin(); child != c.child_end(); ++child) { check_column(*child); } } }; for (column_view const& c : input) { check_column(c); } } } // namespace std::unique_ptr<column> spark_murmur_hash3_32(table_view const& input, uint32_t seed, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto output = make_numeric_column(data_type(type_to_id<spark_hash_value_type>()), input.num_rows(), mask_state::UNALLOCATED, stream, mr); // Return early if there's nothing to hash if (input.num_columns() == 0 || input.num_rows() == 0) { return output; } // Lists of structs are not supported check_hash_compatibility(input); bool const nullable = has_nested_nulls(input); auto const row_hasher = cudf::experimental::row::hash::row_hasher(input, stream); auto output_view = output->mutable_view(); // Compute the hash value for each row thrust::tabulate( rmm::exec_policy(stream), output_view.begin<spark_hash_value_type>(), output_view.end<spark_hash_value_type>(), row_hasher.device_hasher<SparkMurmurHash3_32, spark_murmur_device_row_hasher>(nullable, seed)); return output; } } // namespace detail } // namespace cudf
448b59d4d82dcdb597d18d92d3d31da50de151a1.cu
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/hashing.hpp> #include <cudf/detail/utilities/hash_functions.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/table/experimental/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/tabulate.h> namespace cudf { namespace detail { namespace { using spark_hash_value_type = int32_t; template <typename Key, CUDF_ENABLE_IF(not cudf::is_nested<Key>())> struct SparkMurmurHash3_32 { using result_type = spark_hash_value_type; constexpr SparkMurmurHash3_32() = default; constexpr SparkMurmurHash3_32(uint32_t seed) : m_seed(seed) {} [[nodiscard]] __device__ inline uint32_t fmix32(uint32_t h) const { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } [[nodiscard]] __device__ inline uint32_t getblock32(std::byte const* data, cudf::size_type offset) const { // Read a 4-byte value from the data pointer as individual bytes for safe // unaligned access (very likely for string types). auto block = reinterpret_cast<uint8_t const*>(data + offset); return block[0] | (block[1] << 8) | (block[2] << 16) | (block[3] << 24); } [[nodiscard]] result_type __device__ inline operator()(Key const& key) const { return compute(key); } template <typename T> result_type __device__ inline compute(T const& key) const { return compute_bytes(reinterpret_cast<std::byte const*>(&key), sizeof(T)); } result_type __device__ inline compute_remaining_bytes(std::byte const* data, cudf::size_type len, cudf::size_type tail_offset, result_type h) const { // Process remaining bytes that do not fill a four-byte chunk using Spark's approach // (does not conform to normal MurmurHash3). for (auto i = tail_offset; i < len; i++) { // We require a two-step cast to get the k1 value from the byte. First, // we must cast to a signed int8_t. Then, the sign bit is preserved when // casting to uint32_t under 2's complement. Java preserves the sign when // casting byte-to-int, but C++ does not. uint32_t k1 = static_cast<uint32_t>(std::to_integer<int8_t>(data[i])); k1 *= c1; k1 = cudf::detail::rotate_bits_left(k1, rot_c1); k1 *= c2; h ^= k1; h = cudf::detail::rotate_bits_left(h, rot_c2); h = h * 5 + c3; } return h; } result_type __device__ compute_bytes(std::byte const* data, cudf::size_type const len) const { constexpr cudf::size_type BLOCK_SIZE = 4; cudf::size_type const nblocks = len / BLOCK_SIZE; cudf::size_type const tail_offset = nblocks * BLOCK_SIZE; result_type h = m_seed; // Process all four-byte chunks. for (cudf::size_type i = 0; i < nblocks; i++) { uint32_t k1 = getblock32(data, i * BLOCK_SIZE); k1 *= c1; k1 = cudf::detail::rotate_bits_left(k1, rot_c1); k1 *= c2; h ^= k1; h = cudf::detail::rotate_bits_left(h, rot_c2); h = h * 5 + c3; } h = compute_remaining_bytes(data, len, tail_offset, h); // Finalize hash. h ^= len; h = fmix32(h); return h; } private: uint32_t m_seed{cudf::DEFAULT_HASH_SEED}; static constexpr uint32_t c1 = 0xcc9e2d51; static constexpr uint32_t c2 = 0x1b873593; static constexpr uint32_t c3 = 0xe6546b64; static constexpr uint32_t rot_c1 = 15; static constexpr uint32_t rot_c2 = 13; }; template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<bool>::operator()(bool const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<int8_t>::operator()( int8_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<uint8_t>::operator()( uint8_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<int16_t>::operator()( int16_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<uint16_t>::operator()( uint16_t const& key) const { return compute<uint32_t>(key); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<float>::operator()( float const& key) const { return compute<float>(detail::normalize_nans(key)); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<double>::operator()( double const& key) const { return compute<double>(detail::normalize_nans(key)); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<cudf::string_view>::operator()( cudf::string_view const& key) const { auto const data = reinterpret_cast<std::byte const*>(key.data()); auto const len = key.size_bytes(); return compute_bytes(data, len); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<numeric::decimal32>::operator()( numeric::decimal32 const& key) const { return compute<uint64_t>(key.value()); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<numeric::decimal64>::operator()( numeric::decimal64 const& key) const { return compute<uint64_t>(key.value()); } template <> spark_hash_value_type __device__ inline SparkMurmurHash3_32<numeric::decimal128>::operator()( numeric::decimal128 const& key) const { // Generates the Spark MurmurHash3 hash value, mimicking the conversion: // java.math.BigDecimal.valueOf(unscaled_value, _scale).unscaledValue().toByteArray() // https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala#L381 __int128_t const val = key.value(); constexpr cudf::size_type key_size = sizeof(__int128_t); std::byte const* data = reinterpret_cast<std::byte const*>(&val); // Small negative values start with 0xff..., small positive values start with 0x00... bool const is_negative = val < 0; std::byte const zero_value = is_negative ? std::byte{0xff} : std::byte{0x00}; // If the value can be represented with a shorter than 16-byte integer, the // leading bytes of the little-endian value are truncated and are not hashed. auto const reverse_begin = thrust::reverse_iterator(data + key_size); auto const reverse_end = thrust::reverse_iterator(data); auto const first_nonzero_byte = thrust::find_if_not(thrust::seq, reverse_begin, reverse_end, [zero_value](std::byte const& v) { return v == zero_value; }).base(); // Max handles special case of 0 and -1 which would shorten to 0 length otherwise cudf::size_type length = std::max(1, static_cast<cudf::size_type>(thrust::distance(data, first_nonzero_byte))); // Preserve the 2's complement sign bit by adding a byte back on if necessary. // e.g. 0x0000ff would shorten to 0x00ff. The 0x00 byte is retained to // preserve the sign bit, rather than leaving an "f" at the front which would // change the sign bit. However, 0x00007f would shorten to 0x7f. No extra byte // is needed because the leftmost bit matches the sign bit. Similarly for // negative values: 0xffff00 --> 0xff00 and 0xffff80 --> 0x80. if ((length < key_size) && (is_negative ^ bool(data[length - 1] & std::byte{0x80}))) { ++length; } // Convert to big endian by reversing the range of nonzero bytes. Only those bytes are hashed. __int128_t big_endian_value = 0; auto big_endian_data = reinterpret_cast<std::byte*>(&big_endian_value); thrust::reverse_copy(thrust::seq, data, data + length, big_endian_data); return compute_bytes(big_endian_data, length); } /** * @brief Computes the hash value of a row in the given table. * * This functor uses Spark conventions for Murmur hashing, which differs from * the Murmur implementation used in the rest of libcudf. These differences * include: * - Serially using the output hash as an input seed for the next item * - Ignorance of null values * * The serial use of hashes as seeds means that data of different nested types * can exhibit hash collisions. For example, a row of an integer column * containing a 1 will have the same hash as a lists column of integers * containing a list of [1] and a struct column of a single integer column * containing a struct of {1}. * * As a consequence of ignoring null values, inputs like [1], [1, null], and * [null, 1] have the same hash (an expected hash collision). This kind of * collision can also occur across a table of nullable columns and with nulls * in structs ({1, null} and {null, 1} have the same hash). The seed value (the * previous element's hash value) is returned as the hash if an element is * null. * * For additional differences such as special tail processing and decimal type * handling, refer to the SparkMurmurHash3_32 functor. * * @tparam hash_function Hash functor to use for hashing elements. Must be SparkMurmurHash3_32. * @tparam Nullate A cudf::nullate type describing whether to check for nulls. */ template <template <typename> class hash_function, typename Nullate> class spark_murmur_device_row_hasher { friend class cudf::experimental::row::hash::row_hasher; ///< Allow row_hasher to access private ///< members. public: /** * @brief Return the hash value of a row in the given table. * * @param row_index The row index to compute the hash value of * @return The hash value of the row */ __device__ auto operator()(size_type row_index) const noexcept { return detail::accumulate( _table.begin(), _table.end(), _seed, [row_index, nulls = this->_check_nulls] __device__(auto hash, auto column) { return cudf::type_dispatcher( column.type(), element_hasher_adapter<hash_function>{nulls, hash}, column, row_index); }); } private: /** * @brief Computes the hash value of an element in the given column. * * When the column is non-nested, this is a simple wrapper around the element_hasher. * When the column is nested, this uses a seed value to serially compute each * nested element, with the output hash becoming the seed for the next value. * This requires constructing a new hash functor for each nested element, * using the new seed from the previous element's hash. The hash of a null * element is the input seed (the previous element's hash). */ template <template <typename> class hash_fn> class element_hasher_adapter { public: __device__ element_hasher_adapter(Nullate check_nulls, uint32_t seed) noexcept : _check_nulls(check_nulls), _seed(seed) { } using hash_functor = cudf::experimental::row::hash::element_hasher<hash_fn, Nullate>; template <typename T, CUDF_ENABLE_IF(not cudf::is_nested<T>())> __device__ spark_hash_value_type operator()(column_device_view const& col, size_type row_index) const noexcept { auto const hasher = hash_functor{_check_nulls, _seed, _seed}; return hasher.template operator()<T>(col, row_index); } template <typename T, CUDF_ENABLE_IF(cudf::is_nested<T>())> __device__ spark_hash_value_type operator()(column_device_view const& col, size_type row_index) const noexcept { column_device_view curr_col = col.slice(row_index, 1); while (curr_col.type().id() == type_id::STRUCT || curr_col.type().id() == type_id::LIST) { if (curr_col.type().id() == type_id::STRUCT) { if (curr_col.num_child_columns() == 0) { return _seed; } // Non-empty structs are assumed to be decomposed and contain only one child curr_col = detail::structs_column_device_view(curr_col).get_sliced_child(0); } else if (curr_col.type().id() == type_id::LIST) { curr_col = detail::lists_column_device_view(curr_col).get_sliced_child(); } } return detail::accumulate( thrust::counting_iterator(0), thrust::counting_iterator(curr_col.size()), _seed, [curr_col, nulls = this->_check_nulls] __device__(auto hash, auto element_index) { auto const hasher = hash_functor{nulls, hash, hash}; return cudf::type_dispatcher<cudf::experimental::dispatch_void_if_nested>( curr_col.type(), hasher, curr_col, element_index); }); } Nullate const _check_nulls; ///< Whether to check for nulls uint32_t const _seed; ///< The seed to use for hashing, also returned for null elements }; CUDF_HOST_DEVICE spark_murmur_device_row_hasher(Nullate check_nulls, table_device_view t, uint32_t seed = DEFAULT_HASH_SEED) noexcept : _check_nulls{check_nulls}, _table{t}, _seed(seed) { // Error out if passed an unsupported hash_function static_assert( std::is_base_of_v<SparkMurmurHash3_32<int>, hash_function<int>>, "spark_murmur_device_row_hasher only supports the SparkMurmurHash3_32 hash function"); } Nullate const _check_nulls; table_device_view const _table; uint32_t const _seed; }; void check_hash_compatibility(table_view const& input) { using column_checker_fn_t = std::function<void(column_view const&)>; column_checker_fn_t check_column = [&](column_view const& c) { if (c.type().id() == type_id::LIST) { auto const& list_col = lists_column_view(c); CUDF_EXPECTS(list_col.child().type().id() != type_id::STRUCT, "Cannot compute hash of a table with a LIST of STRUCT columns."); check_column(list_col.child()); } else if (c.type().id() == type_id::STRUCT) { for (auto child = c.child_begin(); child != c.child_end(); ++child) { check_column(*child); } } }; for (column_view const& c : input) { check_column(c); } } } // namespace std::unique_ptr<column> spark_murmur_hash3_32(table_view const& input, uint32_t seed, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto output = make_numeric_column(data_type(type_to_id<spark_hash_value_type>()), input.num_rows(), mask_state::UNALLOCATED, stream, mr); // Return early if there's nothing to hash if (input.num_columns() == 0 || input.num_rows() == 0) { return output; } // Lists of structs are not supported check_hash_compatibility(input); bool const nullable = has_nested_nulls(input); auto const row_hasher = cudf::experimental::row::hash::row_hasher(input, stream); auto output_view = output->mutable_view(); // Compute the hash value for each row thrust::tabulate( rmm::exec_policy(stream), output_view.begin<spark_hash_value_type>(), output_view.end<spark_hash_value_type>(), row_hasher.device_hasher<SparkMurmurHash3_32, spark_murmur_device_row_hasher>(nullable, seed)); return output; } } // namespace detail } // namespace cudf
459a168f79416c4b28dbde15a51fbcd6afb2600c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* Generic swap function in C, I got this off of stack overflow. */ #define swap(x,y) do { \ unsigned char swap_temp[sizeof(x) == sizeof(y) ? (signed)sizeof(x) : -1]; \ memcpy(swap_temp, &y, sizeof(x)); \ memcpy(&y, &x, sizeof(x)); \ memcpy(&x, swap_temp, sizeof(x)); \ } while(0) #define N 41 #define MAX 100 #define BLOCKS 4 // Number of blocks/subproblems that we're solving for the overall array. #define THREADS 10 // Number of subproblems inside a single block that we're solving. /* Creates and returns a pointer to an array sorted in decreasing order. If size = N, then the returned array is A[0..N-1] with A[0] = N, A[1] = N-1, ..., A[N-1] = 1 */ int* CreateUnsortedArray(int size); /* This creates and returns a pointer to a randomly generated array. */ int* CreateRandomArray(int size); /* Implementation of insertion sort. I copied and pasted this online. The idea is that each sub-array inside a block will be sorted by insertion sort. Sorts the array's elements in the range [first, last] */ __host__ __device__ void InsertionSort(int* array, int first, int last); /* Prints the elements of an array with a description of them attached in the range [first, last] */ __host__ __device__ void PrintArray(const char* descp, int* array, int first, int last); /* Merges the elements of the two sub-arrays corresponding to indices [leftFirst, leftLast] and [rightFirst, rightLast], respectively into a single, sorted array [leftFirst, rightLast]. Note that rightFirst = leftLast + 1 for this function to work. The merge code was also obtained online. */ __host__ __device__ void Merge(int *array, int *temp, int leftFirst, int leftLast, int rightFirst, int rightLast); /* This function does only the "merging" part of the "MergeSort" function below. It is intended to be called after each block in the array has been sorted, so that this would do a pair-wise merge of the blocks into a single array. */ __global__ void DoMergeOnly(int *array, int *temp, int size); /* Sorts the array using a method that behaves like merge sort. Note that temp is passed on to avoid dynamically allocating memory on the GPU. The idea here is that the array is paritioned into blocks, each block holds a single sub array. Each block is then partitioned into threads, where the threads themselves are sub-arrays of the block. The thread arrays are first sorted using insertion sort (since they're the smallest "unit" of the array). Then, each pair of adjacent threads are merged together until a single, merged array remains; this array is the current block sorted in ascending order. After the blocks each contain sorted subarrays. the function "DoMergeOnly" is called on a single block with the same number of threads as there were blocks in the preceding step. It merges the sub arrays in the blocks together and the final array is then sorted in ascending order. Note: If the number of blocks does not evenly divide the array size, then the remaining elements are added on to the last block. Same with the threads for the size of the array in each block. */ __global__ void MergeSort(int *array, int *temp, int size); int main(int argc, char* argv[]) { // Create the test array int* a = CreateUnsortedArray(N); int *dev_a, *dev_temp; hipMalloc((void**)&dev_a, N*sizeof(int)); hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice); // Allocate the temporary array (to avoid doing it on the GPU) hipMalloc((void**)&dev_temp, N*sizeof(int)); // Print the array's contents PrintArray("Before MergeSort:", a, 0, N-1); printf("\n"); // Do the first step of merge sort, where the array contains BLOCKS // number of sorted subarrays after this statement hipLaunchKernelGGL(( MergeSort), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_a, dev_temp, N); // Output the intermediate step (to verify that the sub arrays are sorted hipMemcpy(a, dev_a, N*sizeof(int), hipMemcpyDeviceToHost); PrintArray("Intermediate:", a, 0, N-1); printf("\n"); // Now do a pair-wise merge on the subarrays so that the final array is sorted. hipLaunchKernelGGL(( DoMergeOnly), dim3(1), dim3(BLOCKS), 0, 0, dev_a, dev_temp, N); // Output the final array hipMemcpy(a, dev_a, N*sizeof(int), hipMemcpyDeviceToHost); PrintArray("After MergeSort:", a, 0, N-1); printf("\n"); // Free the memory free(a); hipFree(dev_a); hipFree(dev_temp); return 0; } int* CreateUnsortedArray(int size) { int* array =(int*)malloc(size*sizeof(int)); for (int i = 0; i < size; ++i) { array[i] = size-i; } return array; } int* CreateRandomArray(int size) { int* array = (int*)malloc(size*sizeof(int)); for (int i = 0; i < size; ++i) { array[i] = rand() % MAX; } return array; } __host__ __device__ void InsertionSort(int* array, int first, int last) { for (int i = first; i <= last; ++i) { int j = i; while (j > first && array[j] < array[j-1]) { swap(array[j], array[j-1]); --j; } } } __host__ __device__ void PrintArray(const char* descp, int* array, int first, int last) { printf("%s\n", descp); for (int i = first; i <= last; ++i) { printf("%d ", array[i]); } printf("\n"); } __host__ __device__ void Merge(int *array, int *temp, int leftFirst, int leftLast, int rightFirst, int rightLast) { int i, j, k; i = leftFirst; k = leftFirst; j = rightFirst; while (i <= leftLast && j <= rightLast) { if (array[i] < array[j]) { temp[k++] = array[i++]; } else { temp[k++] = array[j++]; } } while (i <= leftLast) { temp[k++] = array[i++]; } while (j <= rightLast) { temp[k++] = array[j++]; } for (i = leftFirst; i <= rightLast; ++i) { array[i] = temp[i]; } } __global__ void MergeSort(int *array, int *temp, int size) { int elemPerBlock = size/gridDim.x; // Number of array elements per sub array of the block int blockFirst = blockIdx.x*elemPerBlock; // The starting point of the array for this block // Ending point of the sub array. Note if we're at the last block, we simply set this // To be N - 1, where N = size int blockLast = (blockIdx.x == (gridDim.x - 1) ? size - 1 : blockFirst + elemPerBlock - 1); // Number of array elements in the sub array for a single thread int elemPerThread = elemPerBlock/blockDim.x; int threadFirst = blockFirst + threadIdx.x*elemPerThread; // Same logic as blockFirst, save for threads // Same logic as blockLast, save now we're doing it for threads int threadLast = (threadIdx.x == (blockDim.x - 1) ? blockLast : threadFirst + elemPerThread - 1); InsertionSort(array, threadFirst, threadLast); // Sort the subarrays in each thread by insertion sort __syncthreads(); // Wait until all threads are finished //Now we merge pair by pair int numThreads = (blockDim.x+2-1)/2; // Initial pair is the number of threads per block over 2 while (numThreads > 1) { // The greater than 1 is because we may not have our threads as a power of 2, so we take the ceiling but ceiling never reaches 0 if (threadIdx.x < numThreads) { // Is a valid pair that we are considering int startId = threadIdx.x*2; threadFirst = blockFirst + startId*elemPerThread; // Start of the first pair int splitPoint = threadFirst+elemPerThread; // End location of the first pair if (threadIdx.x == (numThreads - 1)) { // If it's the last thread, we want the end to be the last threadLast = blockLast; } else { // Otherwise, we traverse 2*M elements, where M is the number of elements per pair to the end threadLast = threadFirst+2*elemPerThread-1; } Merge(array, temp, threadFirst, splitPoint-1, splitPoint, threadLast); } __syncthreads(); // Wait until all threads are done numThreads = (numThreads+2-1)/2; elemPerThread *= 2; } if (threadIdx.x == 0) { //Finish off the merge. We did not set test condition to 0 above because we always took the ceiling. So we address single thread case here. int splitPoint = blockFirst+elemPerThread; // Calculate the split point //Last merge Merge(array, temp, blockFirst, splitPoint-1, splitPoint, blockLast); } } // This code is the same as the latter steps in the above. I did not write this // in a separate function at the time because I wanted to test my code first // and avoid repeating the first 5-6 lines of computation in the threads. __global__ void DoMergeOnly(int *array, int *temp, int size) { int elemPerBlock = size; int blockFirst = 0; int blockLast = size-1; int elemPerThread = elemPerBlock/blockDim.x; //Now we merge pair by pair int numThreads = (blockDim.x+2-1)/2; while (numThreads > 1) { if (threadIdx.x < numThreads) { int startId = threadIdx.x*2; int threadFirst = blockFirst + startId*elemPerThread; int splitPoint = threadFirst+elemPerThread; int threadLast; if (threadIdx.x == (numThreads - 1)) { threadLast = blockLast; } else { threadLast = threadFirst+2*elemPerThread-1; } Merge(array, temp, threadFirst, splitPoint-1, splitPoint, threadLast); } __syncthreads(); numThreads = (numThreads+2-1)/2; elemPerThread *= 2; } if (threadIdx.x == 0) { //Finish off the merge int splitPoint = blockFirst+elemPerThread; Merge(array, temp, blockFirst, splitPoint-1, splitPoint, blockLast); } }
459a168f79416c4b28dbde15a51fbcd6afb2600c.cu
#include <stdio.h> /* Generic swap function in C, I got this off of stack overflow. */ #define swap(x,y) do { \ unsigned char swap_temp[sizeof(x) == sizeof(y) ? (signed)sizeof(x) : -1]; \ memcpy(swap_temp, &y, sizeof(x)); \ memcpy(&y, &x, sizeof(x)); \ memcpy(&x, swap_temp, sizeof(x)); \ } while(0) #define N 41 #define MAX 100 #define BLOCKS 4 // Number of blocks/subproblems that we're solving for the overall array. #define THREADS 10 // Number of subproblems inside a single block that we're solving. /* Creates and returns a pointer to an array sorted in decreasing order. If size = N, then the returned array is A[0..N-1] with A[0] = N, A[1] = N-1, ..., A[N-1] = 1 */ int* CreateUnsortedArray(int size); /* This creates and returns a pointer to a randomly generated array. */ int* CreateRandomArray(int size); /* Implementation of insertion sort. I copied and pasted this online. The idea is that each sub-array inside a block will be sorted by insertion sort. Sorts the array's elements in the range [first, last] */ __host__ __device__ void InsertionSort(int* array, int first, int last); /* Prints the elements of an array with a description of them attached in the range [first, last] */ __host__ __device__ void PrintArray(const char* descp, int* array, int first, int last); /* Merges the elements of the two sub-arrays corresponding to indices [leftFirst, leftLast] and [rightFirst, rightLast], respectively into a single, sorted array [leftFirst, rightLast]. Note that rightFirst = leftLast + 1 for this function to work. The merge code was also obtained online. */ __host__ __device__ void Merge(int *array, int *temp, int leftFirst, int leftLast, int rightFirst, int rightLast); /* This function does only the "merging" part of the "MergeSort" function below. It is intended to be called after each block in the array has been sorted, so that this would do a pair-wise merge of the blocks into a single array. */ __global__ void DoMergeOnly(int *array, int *temp, int size); /* Sorts the array using a method that behaves like merge sort. Note that temp is passed on to avoid dynamically allocating memory on the GPU. The idea here is that the array is paritioned into blocks, each block holds a single sub array. Each block is then partitioned into threads, where the threads themselves are sub-arrays of the block. The thread arrays are first sorted using insertion sort (since they're the smallest "unit" of the array). Then, each pair of adjacent threads are merged together until a single, merged array remains; this array is the current block sorted in ascending order. After the blocks each contain sorted subarrays. the function "DoMergeOnly" is called on a single block with the same number of threads as there were blocks in the preceding step. It merges the sub arrays in the blocks together and the final array is then sorted in ascending order. Note: If the number of blocks does not evenly divide the array size, then the remaining elements are added on to the last block. Same with the threads for the size of the array in each block. */ __global__ void MergeSort(int *array, int *temp, int size); int main(int argc, char* argv[]) { // Create the test array int* a = CreateUnsortedArray(N); int *dev_a, *dev_temp; cudaMalloc((void**)&dev_a, N*sizeof(int)); cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); // Allocate the temporary array (to avoid doing it on the GPU) cudaMalloc((void**)&dev_temp, N*sizeof(int)); // Print the array's contents PrintArray("Before MergeSort:", a, 0, N-1); printf("\n"); // Do the first step of merge sort, where the array contains BLOCKS // number of sorted subarrays after this statement MergeSort<<<BLOCKS, THREADS>>>(dev_a, dev_temp, N); // Output the intermediate step (to verify that the sub arrays are sorted cudaMemcpy(a, dev_a, N*sizeof(int), cudaMemcpyDeviceToHost); PrintArray("Intermediate:", a, 0, N-1); printf("\n"); // Now do a pair-wise merge on the subarrays so that the final array is sorted. DoMergeOnly<<<1, BLOCKS>>>(dev_a, dev_temp, N); // Output the final array cudaMemcpy(a, dev_a, N*sizeof(int), cudaMemcpyDeviceToHost); PrintArray("After MergeSort:", a, 0, N-1); printf("\n"); // Free the memory free(a); cudaFree(dev_a); cudaFree(dev_temp); return 0; } int* CreateUnsortedArray(int size) { int* array =(int*)malloc(size*sizeof(int)); for (int i = 0; i < size; ++i) { array[i] = size-i; } return array; } int* CreateRandomArray(int size) { int* array = (int*)malloc(size*sizeof(int)); for (int i = 0; i < size; ++i) { array[i] = rand() % MAX; } return array; } __host__ __device__ void InsertionSort(int* array, int first, int last) { for (int i = first; i <= last; ++i) { int j = i; while (j > first && array[j] < array[j-1]) { swap(array[j], array[j-1]); --j; } } } __host__ __device__ void PrintArray(const char* descp, int* array, int first, int last) { printf("%s\n", descp); for (int i = first; i <= last; ++i) { printf("%d ", array[i]); } printf("\n"); } __host__ __device__ void Merge(int *array, int *temp, int leftFirst, int leftLast, int rightFirst, int rightLast) { int i, j, k; i = leftFirst; k = leftFirst; j = rightFirst; while (i <= leftLast && j <= rightLast) { if (array[i] < array[j]) { temp[k++] = array[i++]; } else { temp[k++] = array[j++]; } } while (i <= leftLast) { temp[k++] = array[i++]; } while (j <= rightLast) { temp[k++] = array[j++]; } for (i = leftFirst; i <= rightLast; ++i) { array[i] = temp[i]; } } __global__ void MergeSort(int *array, int *temp, int size) { int elemPerBlock = size/gridDim.x; // Number of array elements per sub array of the block int blockFirst = blockIdx.x*elemPerBlock; // The starting point of the array for this block // Ending point of the sub array. Note if we're at the last block, we simply set this // To be N - 1, where N = size int blockLast = (blockIdx.x == (gridDim.x - 1) ? size - 1 : blockFirst + elemPerBlock - 1); // Number of array elements in the sub array for a single thread int elemPerThread = elemPerBlock/blockDim.x; int threadFirst = blockFirst + threadIdx.x*elemPerThread; // Same logic as blockFirst, save for threads // Same logic as blockLast, save now we're doing it for threads int threadLast = (threadIdx.x == (blockDim.x - 1) ? blockLast : threadFirst + elemPerThread - 1); InsertionSort(array, threadFirst, threadLast); // Sort the subarrays in each thread by insertion sort __syncthreads(); // Wait until all threads are finished //Now we merge pair by pair int numThreads = (blockDim.x+2-1)/2; // Initial pair is the number of threads per block over 2 while (numThreads > 1) { // The greater than 1 is because we may not have our threads as a power of 2, so we take the ceiling but ceiling never reaches 0 if (threadIdx.x < numThreads) { // Is a valid pair that we are considering int startId = threadIdx.x*2; threadFirst = blockFirst + startId*elemPerThread; // Start of the first pair int splitPoint = threadFirst+elemPerThread; // End location of the first pair if (threadIdx.x == (numThreads - 1)) { // If it's the last thread, we want the end to be the last threadLast = blockLast; } else { // Otherwise, we traverse 2*M elements, where M is the number of elements per pair to the end threadLast = threadFirst+2*elemPerThread-1; } Merge(array, temp, threadFirst, splitPoint-1, splitPoint, threadLast); } __syncthreads(); // Wait until all threads are done numThreads = (numThreads+2-1)/2; elemPerThread *= 2; } if (threadIdx.x == 0) { //Finish off the merge. We did not set test condition to 0 above because we always took the ceiling. So we address single thread case here. int splitPoint = blockFirst+elemPerThread; // Calculate the split point //Last merge Merge(array, temp, blockFirst, splitPoint-1, splitPoint, blockLast); } } // This code is the same as the latter steps in the above. I did not write this // in a separate function at the time because I wanted to test my code first // and avoid repeating the first 5-6 lines of computation in the threads. __global__ void DoMergeOnly(int *array, int *temp, int size) { int elemPerBlock = size; int blockFirst = 0; int blockLast = size-1; int elemPerThread = elemPerBlock/blockDim.x; //Now we merge pair by pair int numThreads = (blockDim.x+2-1)/2; while (numThreads > 1) { if (threadIdx.x < numThreads) { int startId = threadIdx.x*2; int threadFirst = blockFirst + startId*elemPerThread; int splitPoint = threadFirst+elemPerThread; int threadLast; if (threadIdx.x == (numThreads - 1)) { threadLast = blockLast; } else { threadLast = threadFirst+2*elemPerThread-1; } Merge(array, temp, threadFirst, splitPoint-1, splitPoint, threadLast); } __syncthreads(); numThreads = (numThreads+2-1)/2; elemPerThread *= 2; } if (threadIdx.x == 0) { //Finish off the merge int splitPoint = blockFirst+elemPerThread; Merge(array, temp, blockFirst, splitPoint-1, splitPoint, blockLast); } }
5f0879b9887a4995f3d3841ce76c1402d3e1eb54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergecg.cu, normal z -> d, Mon Jun 25 18:24:26 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from dmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_dcgreduce_kernel_spmv1( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated reduction for two vectors __global__ void magma_dcgreduce_kernel_spmv2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_dcgmerge_spmvcsr_kernel( int n, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ) { double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_dcgmerge_spmvell_kernel( int n, int num_cols_per_row, double * dval, magma_index_t * dcolind, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ n * k + i ]; double val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_dcgmerge_spmvellpack_kernel( int n, int num_cols_per_row, double * dval, magma_index_t * dcolind, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ num_cols_per_row * i + k ]; double val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELL alignment 1 and the first step of the reduction __global__ void magma_dcgmerge_spmvell_kernelb1( int n, int blocksize, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); int idx = threadIdx.x; // local row int bdx = blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < n ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; double val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * d [ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } /* if(i < n ) { int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < border; k++){ int col = dcolind [ offset+ blocksize * k + threadIdx.x ]; double val = dval[offset+ blocksize * k + threadIdx.x]; if( val != 0){ dot += val*d[col]; } } //double dot = MAGMA_D_MAKE(0.0, 0.0); //for ( int k = 0; k < num_cols_per_row; k++ ) { // int col = dcolind [ n * k + i ]; // double val = dval [ n * k + i ]; // if( val != 0) // dot += val * d[ col ]; //} z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; }*/ __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_8( int n, double * dval, magma_index_t * dcolind, magma_index_t * drowlength, double * d, double * z, double * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ) { shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_16( int n, double * dval, magma_index_t * dcolind, magma_index_t * drowlength, double * d, double * z, double * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ) { shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_32( int n, double * dval, magma_index_t * dcolind, magma_index_t * drowlength, double * d, double * z, double * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ) { shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_dcgmerge_spmvellpackrt_kernel2( int n, double * z, double * d, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_D_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_dcgmerge_spmvsellc_kernel( int num_rows, int blocksize, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++) { int col = dcolind [offset+ blocksize * n + Idx ]; double val = dval[offset+ blocksize * n + Idx]; if( val != 0) { dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_dcg_rhokernel( double * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { double tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_d_matrix input matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dd magmaDouble_ptr input vector d @param[out] dz magmaDouble_ptr input vector z @param[out] skp magmaDouble_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_spmv1( magma_d_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) hipLaunchKernelGGL(( magma_dcgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) hipLaunchKernelGGL(( magma_dcgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) hipLaunchKernelGGL(( magma_dcgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_CUCSR ) { hipsparseHandle_t cusparseHandle = 0; hipsparseMatDescr_t descr = 0; double c_one = MAGMA_D_ONE; double c_zero = MAGMA_D_ZERO; hipsparseCreate( &cusparseHandle ); hipsparseSetStream( cusparseHandle, queue->cuda_stream() ); hipsparseCreateMatDescr( &descr ); hipsparseSetMatType( descr, HIPSPARSE_MATRIX_TYPE_GENERAL ); hipsparseSetMatIndexBase( descr, HIPSPARSE_INDEX_BASE_ZERO ); hipsparseDcsrmv( cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, A.num_rows, A.num_cols, A.nnz, &c_one, descr, A.dval, A.drow, A.dcol, dd, &c_zero, dz ); hipsparseDestroyMatDescr( descr ); hipsparseDestroy( cusparseHandle ); cusparseHandle = 0; descr = 0; hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvell_kernelb1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.blocksize, A.dval, A.dcol, A.drow, dd, dz, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment > 1) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = int( sqrt( double( A.numblocks ))); int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 ); dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( double ); if ( A.alignment == 8) hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_8) , dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_16) , dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_32) , dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = magma_ceildiv( A.num_rows, A.blocksize ); int num_threads = A.alignment*A.blocksize; int real_row_length = magma_roundup( A.max_nnz_row, A.alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = int( sqrt( double( num_blocks ))); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( double ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_32) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_16) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_8) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", int(A.alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dcg_rhokernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_dcgmerge_xrbeta_kernel( int n, double * x, double * r, double * d, double * z, double * skp, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_dcg_alphabetakernel( double * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { double tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_D_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_dcg_d_kernel( int n, double * skp, double * r, double * d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha = skp[0]; if( i<n ) { d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in,out] dx magmaDouble_ptr input vector x @param[in,out] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_xrbeta( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dr, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_dpcgmerge_xrbeta_kernel( int n, double * x, double * r, double * d, double * z, double * skp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; } } // dot product for multiple vectors __global__ void magma_dmddot_one_kernel_1( int n, double * v0, double * w0, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v0[ i ] * v0[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in,out] dx magmaDouble_ptr input vector x @param[in,out] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dpcgmerge_xrbeta1( magma_int_t n, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_dpcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream(), n, dx, dr, dd, dz, skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dh magmaDouble_ptr input vector x @param[in] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input/output vector d @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dpcgmerge_xrbeta2( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dh, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dmddot_one_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, dr, dh, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_djcgmerge_xrbeta_kernel( int n, double * diag, double * x, double * r, double * d, double * z, double * h, double * vtmp, double * skp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; h[i] = r[i] * diag[i]; } __syncthreads(); temp[ Idx ] = ( i < n ) ? h[ i ] * r[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? r[ i ] * r[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] diag magmaDouble_ptr inverse diagonal (Jacobi preconditioner) @param[in] dx magmaDouble_ptr iteration vector x @param[in] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] dh magmaDouble_ptr input vector h @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_djcgmerge_xrbeta( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr diag, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr dh, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_djcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , n, diag, dx, dr, dd, dz, dh, d1, skp ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
5f0879b9887a4995f3d3841ce76c1402d3e1eb54.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergecg.cu, normal z -> d, Mon Jun 25 18:24:26 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from dmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_dcgreduce_kernel_spmv1( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated reduction for two vectors __global__ void magma_dcgreduce_kernel_spmv2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_dcgmerge_spmvcsr_kernel( int n, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ) { double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_dcgmerge_spmvell_kernel( int n, int num_cols_per_row, double * dval, magma_index_t * dcolind, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ n * k + i ]; double val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_dcgmerge_spmvellpack_kernel( int n, int num_cols_per_row, double * dval, magma_index_t * dcolind, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ num_cols_per_row * i + k ]; double val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELL alignment 1 and the first step of the reduction __global__ void magma_dcgmerge_spmvell_kernelb1( int n, int blocksize, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); int idx = threadIdx.x; // local row int bdx = blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < n ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; double val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * d [ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } /* if(i < n ) { int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < border; k++){ int col = dcolind [ offset+ blocksize * k + threadIdx.x ]; double val = dval[offset+ blocksize * k + threadIdx.x]; if( val != 0){ dot += val*d[col]; } } //double dot = MAGMA_D_MAKE(0.0, 0.0); //for ( int k = 0; k < num_cols_per_row; k++ ) { // int col = dcolind [ n * k + i ]; // double val = dval [ n * k + i ]; // if( val != 0) // dot += val * d[ col ]; //} z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; }*/ __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_8( int n, double * dval, magma_index_t * dcolind, magma_index_t * drowlength, double * d, double * z, double * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ) { shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_16( int n, double * dval, magma_index_t * dcolind, magma_index_t * drowlength, double * d, double * z, double * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ) { shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_32( int n, double * dval, magma_index_t * dcolind, magma_index_t * drowlength, double * d, double * z, double * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ) { shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_dcgmerge_spmvellpackrt_kernel2( int n, double * z, double * d, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_D_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_dcgmerge_spmvsellc_kernel( int num_rows, int blocksize, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++) { int col = dcolind [offset+ blocksize * n + Idx ]; double val = dval[offset+ blocksize * n + Idx]; if( val != 0) { dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * d, double * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_dcg_rhokernel( double * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { double tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_d_matrix input matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dd magmaDouble_ptr input vector d @param[out] dz magmaDouble_ptr input vector z @param[out] skp magmaDouble_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_spmv1( magma_d_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) magma_dcgmerge_spmvcsr_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) magma_dcgmerge_spmvellpack_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) magma_dcgmerge_spmvell_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_CUCSR ) { cusparseHandle_t cusparseHandle = 0; cusparseMatDescr_t descr = 0; double c_one = MAGMA_D_ONE; double c_zero = MAGMA_D_ZERO; cusparseCreate( &cusparseHandle ); cusparseSetStream( cusparseHandle, queue->cuda_stream() ); cusparseCreateMatDescr( &descr ); cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL ); cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO ); cusparseDcsrmv( cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, A.num_rows, A.num_cols, A.nnz, &c_one, descr, A.dval, A.drow, A.dcol, dd, &c_zero, dz ); cusparseDestroyMatDescr( descr ); cusparseDestroy( cusparseHandle ); cusparseHandle = 0; descr = 0; magma_dcgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) { magma_dcgmerge_spmvell_kernelb1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.dval, A.dcol, A.drow, dd, dz, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment > 1) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = int( sqrt( double( A.numblocks ))); int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 ); dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( double ); if ( A.alignment == 8) magma_dcgmerge_spmvsellpt_kernel_8 <<< gridsellp, block, Mssellp, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) magma_dcgmerge_spmvsellpt_kernel_16 <<< gridsellp, block, Mssellp, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) magma_dcgmerge_spmvsellpt_kernel_32 <<< gridsellp, block, Mssellp, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_dcgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = magma_ceildiv( A.num_rows, A.blocksize ); int num_threads = A.alignment*A.blocksize; int real_row_length = magma_roundup( A.max_nnz_row, A.alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = int( sqrt( double( num_blocks ))); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( double ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { magma_dcgmerge_spmvellpackrt_kernel_32 <<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { magma_dcgmerge_spmvellpackrt_kernel_16 <<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { magma_dcgmerge_spmvellpackrt_kernel_8 <<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", int(A.alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_dcgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dcg_rhokernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_dcgmerge_xrbeta_kernel( int n, double * x, double * r, double * d, double * z, double * skp, double * vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_dcg_alphabetakernel( double * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { double tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_D_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_dcg_d_kernel( int n, double * skp, double * r, double * d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha = skp[0]; if( i<n ) { d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in,out] dx magmaDouble_ptr input vector x @param[in,out] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_xrbeta( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dcg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_dcg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dr, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_dpcgmerge_xrbeta_kernel( int n, double * x, double * r, double * d, double * z, double * skp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; } } // dot product for multiple vectors __global__ void magma_dmddot_one_kernel_1( int n, double * v0, double * w0, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v0[ i ] * v0[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in,out] dx magmaDouble_ptr input vector x @param[in,out] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dpcgmerge_xrbeta1( magma_int_t n, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); magma_dpcgmerge_xrbeta_kernel<<< Gs, Bs, 0, queue->cuda_stream()>>> ( n, dx, dr, dd, dz, skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dh magmaDouble_ptr input vector x @param[in] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input/output vector d @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dpcgmerge_xrbeta2( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dh, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dmddot_one_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, dr, dh, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dcgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dcg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_dcg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_djcgmerge_xrbeta_kernel( int n, double * diag, double * x, double * r, double * d, double * z, double * h, double * vtmp, double * skp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; h[i] = r[i] * diag[i]; } __syncthreads(); temp[ Idx ] = ( i < n ) ? h[ i ] * r[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? r[ i ] * r[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] diag magmaDouble_ptr inverse diagonal (Jacobi preconditioner) @param[in] dx magmaDouble_ptr iteration vector x @param[in] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] dh magmaDouble_ptr input vector h @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_djcgmerge_xrbeta( magma_int_t n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr diag, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr dh, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_djcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( n, diag, dx, dr, dd, dz, dh, d1, skp ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dcgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dcg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_dcg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
59042f151a5a0235cf869e5ce77a5bd9e7addfc7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <time.h> #include <iostream> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define MAX_TRIES 100 #define N_LIMIT 20 #define MAX_TEMP_STEPS 500 #define TEMP_START 20 #define COOLING 0.95 #define THREADS 256 #define MAX_CITY 512 #define BOLTZMANN_COEFF 0.1 static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ),file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) using namespace std; struct city { double x; double y; }; struct permutation { int cost; int order[MAX_CITY]; int nSucc; }; struct GlobalConstants { int CITY_N; city* cities; hiprandState_t* devStates; }; //global variables struct city *cities; int CITY_N; //global variables on GPU __constant__ GlobalConstants cuTspParam; /* rounding function, but at .5 rounds to the lower int. Due to the TSPLIB * standard library. */ __device__ __host__ __inline__ int nint(float x) { return (int) (x + 0.5); } /* Randomisation is done by a simple linear congruential generator. * We use A and C values as done by glibc. */ __device__ unsigned __inline__ int randomInt(hiprandState_t *state, unsigned int max) { return hiprand(state) % max; } __device__ __inline__ double randomDouble(hiprandState_t *state) { return (double) hiprand_uniform(state); } __device__ __inline__ bool randomBool(hiprandState_t *state) { if ((randomInt(state, 256) >> 7) & 0x00000001) return true; else return false; } __global__ void initCurand(hiprandState_t *state, unsigned long seed) { int idx = threadIdx.x + blockIdx.x * blockDim.x; hiprand_init(seed, idx, 0, &state[idx]); } __device__ __host__ __inline__ int euclideanDistance(struct city *a, struct city *b) { float dx = b->x - a->x; float dy = b->y - a->y; return nint((sqrt(dx * dx + dy * dy))); } /* Calcuates the delta of the costs given by a new order using reverse */ __device__ int reverseCost(struct city *cities, int *order, int *n) { int cost; cost = -euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]); cost -= euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[2]]]); return cost; } /* The order of the city is changed by swapping the * order between n[0] and n[1]. * The swapping is done beginning from the outer end * going into the middle */ __device__ void reverse(int *order, int *n) { int CITY_N = cuTspParam.CITY_N; int swaps = (1 + ((n[1] - n[0] + CITY_N) % CITY_N)) / 2; // this many elements have to be swapped to have a complete reversal for (int j = 0; j < swaps; ++j) { int k = (n[0] + j) % CITY_N; int l = (n[1] - j + CITY_N) % CITY_N; int tmp = order[k]; order[k] = order[l]; order[l] = tmp; } } /* Calculates the delta of the costs of the city order if * the transportation of this segments (given by n) are actually * done. */ __device__ int transportCost(struct city *cities, int *order, int *n) { int cost; cost = -euclideanDistance(&cities[order[n[1]]], &cities[order[n[5]]]); cost -= euclideanDistance(&cities[order[n[0]]], &cities[order[n[4]]]); cost -= euclideanDistance(&cities[order[n[2]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]); cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[4]]], &cities[order[n[5]]]); return cost; } /* Transport the path segment (consisting of the start n[0] and end at n[1] * to the path given by n[2] and n[3], which are adjacent and the segment is * to be placed in between. n[4] is the city preceding n[0] and n[5] succeeds * n[1]. * Transportation should only be done if the metroplis algorithm agrees. * */ __device__ void transport(int *order, int *n) { int CITY_N = cuTspParam.CITY_N; int newOrder[MAX_CITY]; int m1 = (n[1] - n[0] + CITY_N) % CITY_N; int m2 = (n[4] - n[3] + CITY_N) % CITY_N; int m3 = (n[2] - n[5] + CITY_N) % CITY_N; int i = 0; for (int j = 0; j <= m1; ++j) { newOrder[i++] = order[(j + n[0]) % CITY_N]; } for (int j = 0; j <= m2; ++j) { newOrder[i++] = order[(j + n[3]) % CITY_N]; } for (int j = 0; j <= m3; ++j) { newOrder[i++] = order[(j + n[5]) % CITY_N]; } for (int j = 0; j < CITY_N; ++j) { order[j] = newOrder[j]; } } /* Metroplis algorithm: Always take the downhill path and * sometime take the uphill path to avoid local minima */ __device__ __inline__ bool metropolis(const int cost, const double t, hiprandState_t *state) { return cost < 0 || randomDouble(state) < exp((double) (BOLTZMANN_COEFF * -cost / t)); } /* Main kernel function */ __global__ void solve(struct permutation *permutations, const float t) { struct city* cities = cuTspParam.cities; int CITY_N = cuTspParam.CITY_N; int notSeg; // number of cities not on the segment int maxChangeTries = MAX_TRIES * CITY_N; int succLimit = N_LIMIT * CITY_N; int dCost; bool ans; int n[6]; int id = blockDim.x * blockIdx.x + threadIdx.x; struct permutation *perm = &(permutations[id]); hiprandState_t localState = cuTspParam.devStates[id]; perm->nSucc = 0; for (int j = 0; j < maxChangeTries; ++j) { do { n[0] = randomInt(&localState, CITY_N); n[1] = randomInt(&localState, CITY_N - 1); if (n[1] >= n[0]) ++n[1]; notSeg = (n[0] - n[1] + CITY_N - 1) % CITY_N; } while (notSeg < 2); /* It is randomly choosen whether a transportation or a reversion is done */ if (randomBool(&localState)) { n[2] = (n[1] + randomInt(&localState, abs(notSeg - 1)) + 1) % CITY_N; n[3] = (n[2] + 1) % CITY_N; n[4] = (n[0] + CITY_N- 1) % CITY_N; n[5] = (n[1] + 1) % CITY_N; dCost = transportCost(cities, perm->order, n); ans = metropolis(dCost, t, &localState); if (ans) { ++perm->nSucc; perm->cost += dCost; transport(perm->order, n); } } else { n[2] = (n[0] + CITY_N - 1) % CITY_N; n[3] = (n[1] + 1) % CITY_N; dCost = reverseCost(cities, perm->order, n); ans = metropolis(dCost, t, &localState); if (ans) { ++perm->nSucc; perm->cost += dCost; reverse(perm->order, n); } } /* Finish early if there are enough successful changes */ if (perm->nSucc > succLimit) break; } } class Anneal { private: /* Calculates the length of the initial path, which is already given. * This is in O(n) */ void initialPath(struct permutation *perm, struct city *cities) { int i, i1, i2; perm->cost= 0; for (i = 0; i < CITY_N - 1; i++) { i1 = perm->order[i]; i2 = perm->order[i+1]; perm->cost += euclideanDistance(&cities[i1], &cities[i2]); } i1 = perm->order[CITY_N - 1]; i2 = perm->order[0]; perm->cost += euclideanDistance(&cities[i1], &cities[i2]); cout << "Initial path length: " << perm->cost << endl; } void printInformation(struct permutation *currPerm, bool showOrder = true) { cout << "Path Length = " << currPerm->cost << endl; cout << "Successful Moves: " << currPerm->nSucc << endl; if (showOrder) { cout << "Order: "; for (int j = 0; j < CITY_N; j++) { cout << currPerm->order[j] << " "; } } cout << endl; } public: double runtime; int resultCost; Anneal() {} void order(struct city *cities, int *order) { double t = TEMP_START; struct permutation *dPermutation; struct permutation *hPermutation = (struct permutation *) malloc(THREADS * sizeof(struct permutation)); struct city *dCities; struct permutation *currPerm = (struct permutation *) malloc(sizeof(struct permutation)); struct permutation *allMinPerm= (struct permutation *) malloc(sizeof(struct permutation)); int oldCost = 2147483647; int repeatCost = 0; clock_t startAll, endAll; // timer to measure the overall run time double runtimeAll; clock_t startCuda, endCuda; //timer to measure the run time of cuda double cudaRuntime = 0.0f; hiprandState_t *devStates; startAll = clock(); // Kernel invocation int threadsPerBlock = 256; int blocksPerGrid = (THREADS + threadsPerBlock - 1) / threadsPerBlock; cout << "Threads: " << THREADS << ", Blocks: " << blocksPerGrid << endl; memcpy(currPerm->order, order, CITY_N * sizeof(int)); initialPath(currPerm, cities); memcpy(allMinPerm, currPerm, sizeof(struct permutation)); HANDLE_ERROR(hipMalloc(&dPermutation, THREADS * sizeof(struct permutation))); HANDLE_ERROR(hipMalloc(&dCities, CITY_N * sizeof(struct city))); HANDLE_ERROR(hipMemcpy(dCities, cities, CITY_N * sizeof(struct city), hipMemcpyHostToDevice)); // for generate random numbers directly on the device HANDLE_ERROR(hipMalloc((void **)&devStates, THREADS * sizeof(hiprandState_t))); hipLaunchKernelGGL(( initCurand), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, 1234); //put global constants to constant memory GlobalConstants params; params.cities = dCities; params.CITY_N = CITY_N; params.devStates = devStates; hipMemcpyToSymbol(cuTspParam, &params, sizeof(GlobalConstants)); /* Try up to MAX_TEMP_STEPS temperature steps. It could stop before if no kernel * showed any succesful change or if the solution did not change 5 times */ for (int i = 0; i < MAX_TEMP_STEPS; ++i) { hipDeviceSynchronize(); startCuda = clock(); //Copies the initial permutation to each result permutation for (int i = 0; i < THREADS; ++i) { memcpy(hPermutation[i].order, currPerm->order, CITY_N * sizeof(int)); hPermutation[i].cost = currPerm->cost; } HANDLE_ERROR(hipMemcpy(dPermutation, hPermutation, THREADS * sizeof(struct permutation), hipMemcpyHostToDevice)); //invoke cuda hipLaunchKernelGGL(( solve), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dPermutation, t); HANDLE_ERROR(hipDeviceSynchronize()); endCuda = clock(); cudaRuntime += (endCuda - startCuda) * 1000 / CLOCKS_PER_SEC; HANDLE_ERROR(hipMemcpy(hPermutation, dPermutation, THREADS * sizeof(struct permutation), hipMemcpyDeviceToHost)); /* Loops through all resulting permutations and store the one with minimal length but * at least one swap. * If all threads didn't swap, exit the program. * Takes O(n) time. */ int minCost = 2147483647; bool swap = false; for (int j = 0; j < THREADS; ++j) { if (minCost >= hPermutation[j].cost && hPermutation[j].nSucc != 0) { currPerm = &(hPermutation[j]); minCost = currPerm->cost; swap = true; if (minCost < allMinPerm->cost) memcpy(allMinPerm, currPerm, sizeof(struct permutation)); } } if (!swap) { cout << "No swaps occured. Exit" << endl; break; } if (oldCost == minCost) { if (++repeatCost == 5) { cout << "Cost did not change 5 times in a row. Exit" << endl; break; } } else repeatCost = 0; cout << endl << "T = " << t << endl; printInformation(currPerm, false); oldCost = minCost; t *= COOLING; } endAll = clock(); runtimeAll = (endAll - startAll) / (1.0f * CLOCKS_PER_SEC) * 1000; cout << endl << "Final Result:" << endl; cout << "=============" << endl; printInformation(allMinPerm); runtime = runtimeAll; resultCost = allMinPerm->cost; printf("\nThe program needed an overall time of %.2lf ms.\n", runtimeAll); printf("%.2lf ms were spent at the CUDA part.\n", cudaRuntime); printf("So %.2lf ms were spent at the host.", runtimeAll - cudaRuntime); hipFree(dPermutation); hipFree(dCities); free(allMinPerm); free(hPermutation); } };
59042f151a5a0235cf869e5ce77a5bd9e7addfc7.cu
#include <stdio.h> #include <math.h> #include <time.h> #include <iostream> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #define MAX_TRIES 100 #define N_LIMIT 20 #define MAX_TEMP_STEPS 500 #define TEMP_START 20 #define COOLING 0.95 #define THREADS 256 #define MAX_CITY 512 #define BOLTZMANN_COEFF 0.1 static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) using namespace std; struct city { double x; double y; }; struct permutation { int cost; int order[MAX_CITY]; int nSucc; }; struct GlobalConstants { int CITY_N; city* cities; curandState* devStates; }; //global variables struct city *cities; int CITY_N; //global variables on GPU __constant__ GlobalConstants cuTspParam; /* rounding function, but at .5 rounds to the lower int. Due to the TSPLIB * standard library. */ __device__ __host__ __inline__ int nint(float x) { return (int) (x + 0.5); } /* Randomisation is done by a simple linear congruential generator. * We use A and C values as done by glibc. */ __device__ unsigned __inline__ int randomInt(curandState *state, unsigned int max) { return curand(state) % max; } __device__ __inline__ double randomDouble(curandState *state) { return (double) curand_uniform(state); } __device__ __inline__ bool randomBool(curandState *state) { if ((randomInt(state, 256) >> 7) & 0x00000001) return true; else return false; } __global__ void initCurand(curandState *state, unsigned long seed) { int idx = threadIdx.x + blockIdx.x * blockDim.x; curand_init(seed, idx, 0, &state[idx]); } __device__ __host__ __inline__ int euclideanDistance(struct city *a, struct city *b) { float dx = b->x - a->x; float dy = b->y - a->y; return nint((sqrt(dx * dx + dy * dy))); } /* Calcuates the delta of the costs given by a new order using reverse */ __device__ int reverseCost(struct city *cities, int *order, int *n) { int cost; cost = -euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]); cost -= euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[2]]]); return cost; } /* The order of the city is changed by swapping the * order between n[0] and n[1]. * The swapping is done beginning from the outer end * going into the middle */ __device__ void reverse(int *order, int *n) { int CITY_N = cuTspParam.CITY_N; int swaps = (1 + ((n[1] - n[0] + CITY_N) % CITY_N)) / 2; // this many elements have to be swapped to have a complete reversal for (int j = 0; j < swaps; ++j) { int k = (n[0] + j) % CITY_N; int l = (n[1] - j + CITY_N) % CITY_N; int tmp = order[k]; order[k] = order[l]; order[l] = tmp; } } /* Calculates the delta of the costs of the city order if * the transportation of this segments (given by n) are actually * done. */ __device__ int transportCost(struct city *cities, int *order, int *n) { int cost; cost = -euclideanDistance(&cities[order[n[1]]], &cities[order[n[5]]]); cost -= euclideanDistance(&cities[order[n[0]]], &cities[order[n[4]]]); cost -= euclideanDistance(&cities[order[n[2]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]); cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]); cost += euclideanDistance(&cities[order[n[4]]], &cities[order[n[5]]]); return cost; } /* Transport the path segment (consisting of the start n[0] and end at n[1] * to the path given by n[2] and n[3], which are adjacent and the segment is * to be placed in between. n[4] is the city preceding n[0] and n[5] succeeds * n[1]. * Transportation should only be done if the metroplis algorithm agrees. * */ __device__ void transport(int *order, int *n) { int CITY_N = cuTspParam.CITY_N; int newOrder[MAX_CITY]; int m1 = (n[1] - n[0] + CITY_N) % CITY_N; int m2 = (n[4] - n[3] + CITY_N) % CITY_N; int m3 = (n[2] - n[5] + CITY_N) % CITY_N; int i = 0; for (int j = 0; j <= m1; ++j) { newOrder[i++] = order[(j + n[0]) % CITY_N]; } for (int j = 0; j <= m2; ++j) { newOrder[i++] = order[(j + n[3]) % CITY_N]; } for (int j = 0; j <= m3; ++j) { newOrder[i++] = order[(j + n[5]) % CITY_N]; } for (int j = 0; j < CITY_N; ++j) { order[j] = newOrder[j]; } } /* Metroplis algorithm: Always take the downhill path and * sometime take the uphill path to avoid local minima */ __device__ __inline__ bool metropolis(const int cost, const double t, curandState *state) { return cost < 0 || randomDouble(state) < exp((double) (BOLTZMANN_COEFF * -cost / t)); } /* Main kernel function */ __global__ void solve(struct permutation *permutations, const float t) { struct city* cities = cuTspParam.cities; int CITY_N = cuTspParam.CITY_N; int notSeg; // number of cities not on the segment int maxChangeTries = MAX_TRIES * CITY_N; int succLimit = N_LIMIT * CITY_N; int dCost; bool ans; int n[6]; int id = blockDim.x * blockIdx.x + threadIdx.x; struct permutation *perm = &(permutations[id]); curandState localState = cuTspParam.devStates[id]; perm->nSucc = 0; for (int j = 0; j < maxChangeTries; ++j) { do { n[0] = randomInt(&localState, CITY_N); n[1] = randomInt(&localState, CITY_N - 1); if (n[1] >= n[0]) ++n[1]; notSeg = (n[0] - n[1] + CITY_N - 1) % CITY_N; } while (notSeg < 2); /* It is randomly choosen whether a transportation or a reversion is done */ if (randomBool(&localState)) { n[2] = (n[1] + randomInt(&localState, abs(notSeg - 1)) + 1) % CITY_N; n[3] = (n[2] + 1) % CITY_N; n[4] = (n[0] + CITY_N- 1) % CITY_N; n[5] = (n[1] + 1) % CITY_N; dCost = transportCost(cities, perm->order, n); ans = metropolis(dCost, t, &localState); if (ans) { ++perm->nSucc; perm->cost += dCost; transport(perm->order, n); } } else { n[2] = (n[0] + CITY_N - 1) % CITY_N; n[3] = (n[1] + 1) % CITY_N; dCost = reverseCost(cities, perm->order, n); ans = metropolis(dCost, t, &localState); if (ans) { ++perm->nSucc; perm->cost += dCost; reverse(perm->order, n); } } /* Finish early if there are enough successful changes */ if (perm->nSucc > succLimit) break; } } class Anneal { private: /* Calculates the length of the initial path, which is already given. * This is in O(n) */ void initialPath(struct permutation *perm, struct city *cities) { int i, i1, i2; perm->cost= 0; for (i = 0; i < CITY_N - 1; i++) { i1 = perm->order[i]; i2 = perm->order[i+1]; perm->cost += euclideanDistance(&cities[i1], &cities[i2]); } i1 = perm->order[CITY_N - 1]; i2 = perm->order[0]; perm->cost += euclideanDistance(&cities[i1], &cities[i2]); cout << "Initial path length: " << perm->cost << endl; } void printInformation(struct permutation *currPerm, bool showOrder = true) { cout << "Path Length = " << currPerm->cost << endl; cout << "Successful Moves: " << currPerm->nSucc << endl; if (showOrder) { cout << "Order: "; for (int j = 0; j < CITY_N; j++) { cout << currPerm->order[j] << " "; } } cout << endl; } public: double runtime; int resultCost; Anneal() {} void order(struct city *cities, int *order) { double t = TEMP_START; struct permutation *dPermutation; struct permutation *hPermutation = (struct permutation *) malloc(THREADS * sizeof(struct permutation)); struct city *dCities; struct permutation *currPerm = (struct permutation *) malloc(sizeof(struct permutation)); struct permutation *allMinPerm= (struct permutation *) malloc(sizeof(struct permutation)); int oldCost = 2147483647; int repeatCost = 0; clock_t startAll, endAll; // timer to measure the overall run time double runtimeAll; clock_t startCuda, endCuda; //timer to measure the run time of cuda double cudaRuntime = 0.0f; curandState *devStates; startAll = clock(); // Kernel invocation int threadsPerBlock = 256; int blocksPerGrid = (THREADS + threadsPerBlock - 1) / threadsPerBlock; cout << "Threads: " << THREADS << ", Blocks: " << blocksPerGrid << endl; memcpy(currPerm->order, order, CITY_N * sizeof(int)); initialPath(currPerm, cities); memcpy(allMinPerm, currPerm, sizeof(struct permutation)); HANDLE_ERROR(cudaMalloc(&dPermutation, THREADS * sizeof(struct permutation))); HANDLE_ERROR(cudaMalloc(&dCities, CITY_N * sizeof(struct city))); HANDLE_ERROR(cudaMemcpy(dCities, cities, CITY_N * sizeof(struct city), cudaMemcpyHostToDevice)); // for generate random numbers directly on the device HANDLE_ERROR(cudaMalloc((void **)&devStates, THREADS * sizeof(curandState))); initCurand<<<blocksPerGrid, threadsPerBlock>>>(devStates, 1234); //put global constants to constant memory GlobalConstants params; params.cities = dCities; params.CITY_N = CITY_N; params.devStates = devStates; cudaMemcpyToSymbol(cuTspParam, &params, sizeof(GlobalConstants)); /* Try up to MAX_TEMP_STEPS temperature steps. It could stop before if no kernel * showed any succesful change or if the solution did not change 5 times */ for (int i = 0; i < MAX_TEMP_STEPS; ++i) { cudaThreadSynchronize(); startCuda = clock(); //Copies the initial permutation to each result permutation for (int i = 0; i < THREADS; ++i) { memcpy(hPermutation[i].order, currPerm->order, CITY_N * sizeof(int)); hPermutation[i].cost = currPerm->cost; } HANDLE_ERROR(cudaMemcpy(dPermutation, hPermutation, THREADS * sizeof(struct permutation), cudaMemcpyHostToDevice)); //invoke cuda solve<<<blocksPerGrid, threadsPerBlock>>>(dPermutation, t); HANDLE_ERROR(cudaThreadSynchronize()); endCuda = clock(); cudaRuntime += (endCuda - startCuda) * 1000 / CLOCKS_PER_SEC; HANDLE_ERROR(cudaMemcpy(hPermutation, dPermutation, THREADS * sizeof(struct permutation), cudaMemcpyDeviceToHost)); /* Loops through all resulting permutations and store the one with minimal length but * at least one swap. * If all threads didn't swap, exit the program. * Takes O(n) time. */ int minCost = 2147483647; bool swap = false; for (int j = 0; j < THREADS; ++j) { if (minCost >= hPermutation[j].cost && hPermutation[j].nSucc != 0) { currPerm = &(hPermutation[j]); minCost = currPerm->cost; swap = true; if (minCost < allMinPerm->cost) memcpy(allMinPerm, currPerm, sizeof(struct permutation)); } } if (!swap) { cout << "No swaps occured. Exit" << endl; break; } if (oldCost == minCost) { if (++repeatCost == 5) { cout << "Cost did not change 5 times in a row. Exit" << endl; break; } } else repeatCost = 0; cout << endl << "T = " << t << endl; printInformation(currPerm, false); oldCost = minCost; t *= COOLING; } endAll = clock(); runtimeAll = (endAll - startAll) / (1.0f * CLOCKS_PER_SEC) * 1000; cout << endl << "Final Result:" << endl; cout << "=============" << endl; printInformation(allMinPerm); runtime = runtimeAll; resultCost = allMinPerm->cost; printf("\nThe program needed an overall time of %.2lf ms.\n", runtimeAll); printf("%.2lf ms were spent at the CUDA part.\n", cudaRuntime); printf("So %.2lf ms were spent at the host.", runtimeAll - cudaRuntime); cudaFree(dPermutation); cudaFree(dCities); free(allMinPerm); free(hPermutation); } };
791edbc60f70768f026c480cb76290a07858c4ee.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "hello2D.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( hello2D), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( hello2D), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( hello2D), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
791edbc60f70768f026c480cb76290a07858c4ee.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "hello2D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); hello2D<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { hello2D<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { hello2D<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
345b376e222c7b3aa82c3653fa391d219c50d1cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Paulius Micikevicius (pauliusm@nvidia.com) * Max Grossman (jmaxg3@gmail.com) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #include "common.h" #include "common2d.h" #define BDIMX 32 #define BDIMY 16 __global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff, int nx, int ny, int dimx, int radius) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int this_offset = POINT_OFFSET(x, y, dimx, radius); TYPE temp = 2.0f * curr[this_offset] - next[this_offset]; TYPE div = c_coeff[0] * curr[this_offset]; for (int d = radius; d >= 1; d--) { int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius); int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius); int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius); int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius); div += c_coeff[d] * (curr[y_pos_offset] + curr[y_neg_offset] + curr[x_pos_offset] + curr[x_neg_offset]); } next[this_offset] = temp + div * vsq[this_offset]; } int main(int argc, char *argv[]) { config conf; setup_config(&conf, argc, argv); init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled); #ifndef PADDING fprintf(stderr, "Must be compiled with -DPADDING\n"); return 1; #endif if (conf.nx % BDIMX != 0) { fprintf(stderr, "Invalid nx configuration, must be an even multiple of " "%d\n", BDIMX); return 1; } if (conf.ny % BDIMY != 0) { fprintf(stderr, "Invalid ny configuration, must be an even multiple of " "%d\n", BDIMY); return 1; } if (conf.radius > TRANSACTION_LEN) { fprintf(stderr, "Radius must be less than TRANSACTION_LEN to include " "it in dimx padding\n"); return 1; } TYPE dx = 20.f; TYPE dt = 0.002f; // compute the pitch for perfect coalescing size_t dimx = TRANSACTION_LEN + conf.nx + conf.radius; dimx += (TRANSACTION_LEN - (dimx % TRANSACTION_LEN)); size_t dimy = conf.ny + 2*conf.radius; size_t nbytes = dimx * dimy * sizeof(TYPE); if (conf.verbose) { printf("x = %zu, y = %zu\n", dimx, dimy); printf("nsteps = %d\n", conf.nsteps); printf("radius = %d\n", conf.radius); } TYPE c_coeff[NUM_COEFF]; TYPE *curr, *next, *vsq; CHECK(hipHostMalloc((void **)&curr, nbytes)); CHECK(hipHostMalloc((void **)&next, nbytes)); CHECK(hipHostMalloc((void **)&vsq, nbytes)); config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps); TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt); init_data(curr, next, vsq, c_coeff, dimx, dimy, dx, dt); TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff; CHECK(hipMalloc((void **)&d_curr, nbytes)); CHECK(hipMalloc((void **)&d_next, nbytes)); CHECK(hipMalloc((void **)&d_vsq, nbytes)); CHECK(hipMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE))); dim3 block(BDIMX, BDIMY); dim3 grid(conf.nx / block.x, conf.ny / block.y); double mem_start = seconds(); CHECK(hipMemcpy(d_curr, curr, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_next, next, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_vsq, vsq, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE), hipMemcpyHostToDevice)); double start = seconds(); for (int step = 0; step < conf.nsteps; step++) { for (int src = 0; src < conf.nsrcs; src++) { if (conf.srcs[src].t > step) continue; int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y, dimx, conf.radius); CHECK(hipMemcpy(d_curr + src_offset, srcs[src] + step, sizeof(TYPE), hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( fwd_kernel), dim3(grid), dim3(block), 0, 0, d_next, d_curr, d_vsq, d_c_coeff, conf.nx, conf.ny, dimx, conf.radius); TYPE *tmp = d_next; d_next = d_curr; d_curr = tmp; update_progress(step + 1); } CHECK(hipDeviceSynchronize()); double compute_s = seconds() - start; CHECK(hipMemcpy(curr, d_curr, nbytes, hipMemcpyDeviceToHost)); double total_s = seconds() - mem_start; finish_progress(); float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps); fprintf(stderr, "iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n", total_s, compute_s / conf.nsteps, point_rate / 1000000.f); if (conf.save_text) { save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius); } CHECK(hipHostFree(curr)); CHECK(hipHostFree(next)); CHECK(hipHostFree(vsq)); for (int i = 0; i < conf.nsrcs; i++) { free(srcs[i]); } free(srcs); CHECK(hipFree(d_curr)); CHECK(hipFree(d_next)); CHECK(hipFree(d_vsq)); CHECK(hipFree(d_c_coeff)); return 0; }
345b376e222c7b3aa82c3653fa391d219c50d1cc.cu
/* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Paulius Micikevicius (pauliusm@nvidia.com) * Max Grossman (jmaxg3@gmail.com) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #include "common.h" #include "common2d.h" #define BDIMX 32 #define BDIMY 16 __global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff, int nx, int ny, int dimx, int radius) { int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int this_offset = POINT_OFFSET(x, y, dimx, radius); TYPE temp = 2.0f * curr[this_offset] - next[this_offset]; TYPE div = c_coeff[0] * curr[this_offset]; for (int d = radius; d >= 1; d--) { int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius); int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius); int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius); int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius); div += c_coeff[d] * (curr[y_pos_offset] + curr[y_neg_offset] + curr[x_pos_offset] + curr[x_neg_offset]); } next[this_offset] = temp + div * vsq[this_offset]; } int main(int argc, char *argv[]) { config conf; setup_config(&conf, argc, argv); init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled); #ifndef PADDING fprintf(stderr, "Must be compiled with -DPADDING\n"); return 1; #endif if (conf.nx % BDIMX != 0) { fprintf(stderr, "Invalid nx configuration, must be an even multiple of " "%d\n", BDIMX); return 1; } if (conf.ny % BDIMY != 0) { fprintf(stderr, "Invalid ny configuration, must be an even multiple of " "%d\n", BDIMY); return 1; } if (conf.radius > TRANSACTION_LEN) { fprintf(stderr, "Radius must be less than TRANSACTION_LEN to include " "it in dimx padding\n"); return 1; } TYPE dx = 20.f; TYPE dt = 0.002f; // compute the pitch for perfect coalescing size_t dimx = TRANSACTION_LEN + conf.nx + conf.radius; dimx += (TRANSACTION_LEN - (dimx % TRANSACTION_LEN)); size_t dimy = conf.ny + 2*conf.radius; size_t nbytes = dimx * dimy * sizeof(TYPE); if (conf.verbose) { printf("x = %zu, y = %zu\n", dimx, dimy); printf("nsteps = %d\n", conf.nsteps); printf("radius = %d\n", conf.radius); } TYPE c_coeff[NUM_COEFF]; TYPE *curr, *next, *vsq; CHECK(cudaMallocHost((void **)&curr, nbytes)); CHECK(cudaMallocHost((void **)&next, nbytes)); CHECK(cudaMallocHost((void **)&vsq, nbytes)); config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps); TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt); init_data(curr, next, vsq, c_coeff, dimx, dimy, dx, dt); TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff; CHECK(cudaMalloc((void **)&d_curr, nbytes)); CHECK(cudaMalloc((void **)&d_next, nbytes)); CHECK(cudaMalloc((void **)&d_vsq, nbytes)); CHECK(cudaMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE))); dim3 block(BDIMX, BDIMY); dim3 grid(conf.nx / block.x, conf.ny / block.y); double mem_start = seconds(); CHECK(cudaMemcpy(d_curr, curr, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_next, next, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_vsq, vsq, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE), cudaMemcpyHostToDevice)); double start = seconds(); for (int step = 0; step < conf.nsteps; step++) { for (int src = 0; src < conf.nsrcs; src++) { if (conf.srcs[src].t > step) continue; int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y, dimx, conf.radius); CHECK(cudaMemcpy(d_curr + src_offset, srcs[src] + step, sizeof(TYPE), cudaMemcpyHostToDevice)); } fwd_kernel<<<grid, block>>>(d_next, d_curr, d_vsq, d_c_coeff, conf.nx, conf.ny, dimx, conf.radius); TYPE *tmp = d_next; d_next = d_curr; d_curr = tmp; update_progress(step + 1); } CHECK(cudaDeviceSynchronize()); double compute_s = seconds() - start; CHECK(cudaMemcpy(curr, d_curr, nbytes, cudaMemcpyDeviceToHost)); double total_s = seconds() - mem_start; finish_progress(); float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps); fprintf(stderr, "iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n", total_s, compute_s / conf.nsteps, point_rate / 1000000.f); if (conf.save_text) { save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius); } CHECK(cudaFreeHost(curr)); CHECK(cudaFreeHost(next)); CHECK(cudaFreeHost(vsq)); for (int i = 0; i < conf.nsrcs; i++) { free(srcs[i]); } free(srcs); CHECK(cudaFree(d_curr)); CHECK(cudaFree(d_next)); CHECK(cudaFree(d_vsq)); CHECK(cudaFree(d_c_coeff)); return 0; }
e08af85af3addcaaf8aae8c52ec7f82e60dd39d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RTreeMatcher.cuh" #include <device_launch_parameters.h> #include <thrust/scan.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> __global__ void CopyMasks(int Count, int *R, int *rSums, int L, int** Masks, unsigned int *IPs) { int mask = blockIdx.x * blockDim.x + threadIdx.x; while (mask < Count) { for (int l = 0; l < L; ++l) Masks[l][mask] = (IPs[mask] >> (32 - rSums[l])) & ((1 << R[l]) - 1); mask += blockDim.x * gridDim.x; } } __global__ void MarkNodesBorders(int Count, int l, int **nodesBorders, int **Masks) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; while(i < Count) { if (Masks[l - 1][i - 1] != Masks[l - 1][i] || nodesBorders[l - 1][i] == 1) nodesBorders[l][i] = 1; i += blockDim.x * gridDim.x; } } __global__ void FillIndexes(int Count, int l, int **nodesIndexes, int **startIndexes, int **endIndexes) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; while (i < Count) { if (nodesIndexes[l][i] > 0) { startIndexes[l][nodesIndexes[l][i] - 1] = i; endIndexes[l][nodesIndexes[l][i] - 2] = i; } i += blockDim.x * gridDim.x; } } __global__ void FillChildren(int l, int *LevelsSizes, int **startIndexes, int **endIndexes, int **Children, int *ChildrenCount, int **Masks, int **nodesIndexes) { int node = blockIdx.x; while (node < LevelsSizes[l]) { int i = startIndexes[l][node] + threadIdx.x; while (i < endIndexes[l][node]) { if (nodesIndexes[l + 1][i] > 0) Children[l][node * ChildrenCount[l] + Masks[l][i]] = nodesIndexes[l + 1][i]; i += blockDim.x; } node += gridDim.x; } } __inline__ __device__ int warpReduceSum(int val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) val += __shfl_down(val, offset); return val; } __inline__ __device__ int blockReduceSum(int val) { static __shared__ int shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum(val); if (lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; if (wid == 0) val = warpReduceSum(val); return val; } __global__ void FillListsLenghts(int l, int *R, int *rSums, int *rPreSums, int *LevelsSizes, int **startIndexes, int **endIndexes, int *Lenghts, int **ListsLenghts) { int node = blockIdx.x; while(node < LevelsSizes[l]) { int counter = 0; int i = startIndexes[l][node] + threadIdx.x; while (i < endIndexes[l][node]) { if (Lenghts[i] > rPreSums[l] && Lenghts[i] <= rSums[l]) ++counter; i += blockDim.x; } int sum = blockReduceSum(counter); if (threadIdx.x == 0) ListsLenghts[l][node] = sum; node += gridDim.x; } } __global__ void FillListItems(int l, int *R, int *rSums, int *rPreSums, int Count, int **startIndexes, int ** endIndexes, int **ListsStarts, int *LevelsSizes, int *Lenghts, int * ListItems) { extern __shared__ int insertShift[]; int node = blockIdx.x; while(node < LevelsSizes[l]) { if(threadIdx.x == 0) *insertShift = 0; for (int maskLenght = rSums[l]; maskLenght > rPreSums[l]; --maskLenght) { int i = startIndexes[l][node] + threadIdx.x; while (i < endIndexes[l][node]) { //TODO: Atomic add dedykowany dla pamici dzielonej if (Lenghts[i] == maskLenght) ListItems[(ListsStarts[l][node]) + atomicAdd(insertShift, 1)] = i; i += blockDim.x; } } __syncthreads(); node += gridDim.x; } //TODO: Dedykowane strategie wypeniania zalene od poziomu (iloci wzw, dugoci list) } __global__ void FillToLeave(int l, int *LevelsSizes, int **startIndexes, int **endIndexes, int *Lenghts, int *rPreSums, int *toLeave) { extern __shared__ int currentToLeave[]; int node = blockIdx.x; while( node < LevelsSizes[l]) { int i = startIndexes[l][node] + threadIdx.x; if (threadIdx.x == 0) *currentToLeave = 0; while( i < endIndexes[l][node]) { //TODO: Czy to musi/powinno by atomic if (Lenghts[i] > rPreSums[l]) *currentToLeave = 1; __syncthreads(); if (*currentToLeave == 1) break; i += blockDim.x; } __syncthreads(); if (threadIdx.x == 0) toLeave[node] = *currentToLeave; node += gridDim.x; } } __global__ void FillNewIndexes(int l, int *LevelsSizes, int *newIndexes, int *newStartIndexes, int *newEndIndexes, int **startIndexes, int **endIndexes, int **nodesBorders) { int node = blockIdx.x * blockDim.x + threadIdx.x; while( node < LevelsSizes[l]) { if (newIndexes[node] != 0) { newStartIndexes[newIndexes[node] - 1] = startIndexes[l][node]; newEndIndexes[newIndexes[node] - 1] = endIndexes[l][node]; } else { nodesBorders[l][startIndexes[l][node]] = 0; } node += gridDim.x * blockDim.x; } } void RTreeModel::Build(IPSet &set, GpuSetup setup) { Count = set.Size; L = h_R.size(); //Allocating memory for Rs GpuAssert(hipMalloc((void**)&R, L * sizeof(int)), "Cannot allocate memory for R"); GpuAssert(hipMalloc((void**)&rSums, L * sizeof(int)), "Cannot allocate memory for R"); GpuAssert(hipMalloc((void**)&rPreSums, L * sizeof(int)), "Cannot allocate memory for R"); GpuAssert(hipMemcpy(R, h_R.data(), L * sizeof(int), hipMemcpyHostToDevice), "Cannot copy R memory"); thrust::inclusive_scan(thrust::device, R, R + L, rSums); thrust::exclusive_scan(thrust::device, R, R + L, rPreSums); //Allocationg memory for masks GpuAssert(hipMalloc(reinterpret_cast<void**>(&Masks), L * sizeof(int*)), "Cannot init ip masks device memory"); GpuAssert(hipMalloc(reinterpret_cast<void**>(&Lenghts), Count * sizeof(int)), "Cannot init Lenght memory"); GpuAssert(hipMemcpy(Lenghts, set.d_Lenghts, Count * sizeof(int), hipMemcpyDeviceToDevice), "Cannot copy Lenghts"); int** h_Masks = new int*[L]; for (int l = 0; l < L; ++l) GpuAssert(hipMalloc((void**)(&h_Masks[l]), Count * sizeof(int)), "Cannot init ip masks device memory"); GpuAssert(hipMemcpy(Masks, h_Masks, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy MaxIP pointers to GPU"); delete[] h_Masks; //Copying masks from IPSet and partitioning them hipLaunchKernelGGL(( CopyMasks) , dim3(setup.Blocks), dim3(setup.Threads) , 0, 0, Count, R, rSums, L, Masks, set.d_IPs); GpuAssert(hipGetLastError(), "Error while launching CopyMasks kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running CopyMasks kernel"); //Allocating memory for nodesBorders int ** nodesBorders; int ** nodesIndexes; GpuAssert(hipMalloc(reinterpret_cast<void**>(&nodesBorders), L * sizeof(int*)), "Cannot init nodes borders device memory"); GpuAssert(hipMalloc(reinterpret_cast<void**>(&nodesIndexes), L * sizeof(int*)), "Cannot init nodes indexes device memory"); int **h_nodesBorders = new int*[L]; int **h_nodesIndexes = new int*[L]; for(int l = 0; l < L; ++l) { GpuAssert(hipMalloc(reinterpret_cast<void**>(&h_nodesBorders[l]), Count * sizeof(int)), "Cannot init nodes borders device memory"); GpuAssert(hipMalloc(reinterpret_cast<void**>(&h_nodesIndexes[l]), Count * sizeof(int)), "Cannot init nodes indexes device memory"); } GpuAssert(hipMemcpy(nodesBorders, h_nodesBorders, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy nodes borders device memory"); GpuAssert(hipMemcpy(nodesIndexes, h_nodesIndexes, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy nodes indexes device memory"); //Marking first nodes on each level, setting rest of the nodesBorders memory to 0 int mark = 1; for (int l = 0; l < L; ++l) { GpuAssert(hipMemset(h_nodesBorders[l], 0, Count * sizeof(int)), "Cannot clear nodesBorders memory"); GpuAssert(hipMemcpy(h_nodesBorders[l], &mark, sizeof(int), hipMemcpyHostToDevice), "Cannot mark nodes start"); } //Marking nodes borders for(int l = 1; l < L; ++l) { hipLaunchKernelGGL(( MarkNodesBorders) , dim3(setup.Blocks), dim3(setup.Threads) , 0, 0, Count, l, nodesBorders, Masks); GpuAssert(hipGetLastError(), "Error while launching MarkNodesBorders kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running MarkNodesBorders kernel"); } //Counting number of nodes and indexing them on each level. Indexing is done from 1 up, since 0 means empty value LevelsSizes = new int[L]; for(int l = 0; l < L; ++l) { thrust::inclusive_scan(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l]); GpuAssert(hipMemcpy(LevelsSizes + l, h_nodesIndexes[l] + Count - 1, sizeof(int), hipMemcpyDeviceToHost), "Cannot copy level size"); thrust::transform(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l], h_nodesIndexes[l], thrust::multiplies<int>()); } int *d_LevelSizes; GpuAssert(hipMalloc((void**)&d_LevelSizes, L * sizeof(int)), "Cannot init d_LevelSizes memory"); GpuAssert(hipMemcpy(d_LevelSizes, LevelsSizes, L * sizeof(int), hipMemcpyHostToDevice), "Cannot copy LevelSizes memory"); //Filling start and end indexes of tree nodes int ** startIndexes; int ** endIndexes; GpuAssert(hipMalloc(reinterpret_cast<void**>(&startIndexes), L * sizeof(int*)), "Cannot init startIndexes device memory"); GpuAssert(hipMalloc(reinterpret_cast<void**>(&endIndexes), L * sizeof(int*)), "Cannot init endIndexes device memory"); int **h_startIndexes = new int*[L]; int **h_endIndexes = new int*[L]; for (int l = 0; l < L; ++l) { GpuAssert(hipMalloc(reinterpret_cast<void**>(&h_startIndexes[l]), LevelsSizes[l] * sizeof(int)), "Cannot init startIndexes device memory"); GpuAssert(hipMalloc(reinterpret_cast<void**>(&h_endIndexes[l]), LevelsSizes[l] * sizeof(int)), "Cannot init endIndexes device memory"); GpuAssert(hipMemset(h_startIndexes[l], 0, sizeof(int)), "Cannot mark first startIndex"); GpuAssert(hipMemcpy(h_endIndexes[l] + (LevelsSizes[l] - 1), &Count, sizeof(int), hipMemcpyHostToDevice), "Cannot mark last endIndex"); } GpuAssert(hipMemcpy(startIndexes, h_startIndexes, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy startIndexes device memory"); GpuAssert(hipMemcpy(endIndexes, h_endIndexes, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy endIndexes device memory"); for (int l = 1; l < L; ++l) { FillIndexes << <setup.Blocks, setup.Threads >> > (Count, l, nodesIndexes, startIndexes, endIndexes); GpuAssert(hipGetLastError(), "Error while launching FillIndexes kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running FillIndexes kernel"); } //Removing empty nodes int *d_toLeave; for(int l = 0; l < L; ++l) { GpuAssert(hipMalloc((void**)&d_toLeave, LevelsSizes[l] * sizeof(int)), "Cannot allocate toLeave memory"); //TODO: Ptla for mogaby by przeniesiona do kernela hipLaunchKernelGGL(( FillToLeave), dim3(setup.Blocks), dim3(setup.Threads), sizeof(int) , 0, l, d_LevelSizes, startIndexes, endIndexes, Lenghts, rPreSums, d_toLeave); GpuAssert(hipGetLastError(), "Error while launching FillToLeave kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running FillToLeave kernel"); int *newIndexes; GpuAssert(hipMalloc((void**)&newIndexes, LevelsSizes[l] * sizeof(int)), "Cannot allocate newIndexes memory"); thrust::inclusive_scan(thrust::device, d_toLeave, d_toLeave + LevelsSizes[l], newIndexes); int newLevelSize; GpuAssert(hipMemcpy(&newLevelSize, newIndexes + LevelsSizes[l] - 1, sizeof(int), hipMemcpyDeviceToHost), "Cannot copy new level size"); thrust::transform(thrust::device, d_toLeave, d_toLeave + LevelsSizes[l], newIndexes, newIndexes, thrust::multiplies<int>()); int *newStartIndexes; int *newEndIndexes; GpuAssert(hipMalloc((void**)&newStartIndexes, newLevelSize * sizeof(int)), "Cannot allocate newStartIndexes memory"); GpuAssert(hipMalloc((void**)&newEndIndexes, newLevelSize * sizeof(int)), "Cannot allocate newEndIndexes memory"); FillNewIndexes << <setup.Blocks, setup.Threads >> > (l, d_LevelSizes, newIndexes, newStartIndexes, newEndIndexes, startIndexes, endIndexes, nodesBorders); GpuAssert(hipGetLastError(), "Error while launching FillNewIndexes kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running FillNewIndexes kernel"); thrust::inclusive_scan(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l]); thrust::transform(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l], h_nodesIndexes[l], thrust::multiplies<int>()); GpuAssert(hipFree(h_startIndexes[l]), "Cannot free startIndexes memory"); h_startIndexes[l] = newStartIndexes; GpuAssert(hipFree(h_endIndexes[l]), "Cannot free endIndexes memory"); h_endIndexes[l] = newEndIndexes; LevelsSizes[l] = newLevelSize; GpuAssert(hipFree(d_toLeave), "Cannot free toLeave memory"); GpuAssert(hipFree(newIndexes), "Cannot free newIndexes memory"); } GpuAssert(hipMemcpy(d_LevelSizes, LevelsSizes, L * sizeof(int), hipMemcpyHostToDevice), "Cannot copy LevelSizes memory"); GpuAssert(hipMemcpy(startIndexes, h_startIndexes, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy startIndexes device memory"); GpuAssert(hipMemcpy(endIndexes, h_endIndexes, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy endIndexes device memory"); //Filling children of tree nodes int *h_ChildrenCount = new int[L-1]; for(int l = 0; l < L-1; ++l) h_ChildrenCount[l] = 2 << (h_R[l] - 1); GpuAssert(hipMalloc((void**)&ChildrenCount, (L-1) * sizeof(int)), "Cannot init Children memory"); GpuAssert(hipMalloc((void**)&Children, (L-1) * sizeof(int*)), "Cannot init Children memory"); GpuAssert(hipMemcpy(ChildrenCount, h_ChildrenCount, (L-1) * sizeof(int), hipMemcpyHostToDevice), "Cannot copy Children memory"); h_Children = new int*[L-1]; for(int l = 0; l < L-1; ++l) GpuAssert(hipMalloc((void**)&h_Children[l], LevelsSizes[l] * h_ChildrenCount[l] * sizeof(int)), "Cannot init children memory"); GpuAssert(hipMemcpy(Children, h_Children, (L - 1) * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy Children memory"); for (int l = 0; l < L - 1; ++l) { thrust::fill_n(thrust::device, h_Children[l], LevelsSizes[l] * h_ChildrenCount[l], 0); FillChildren << <setup.Blocks, setup.Threads >> > (l, d_LevelSizes, startIndexes, endIndexes, Children, ChildrenCount, Masks, nodesIndexes); GpuAssert(hipGetLastError(), "Error while launching FillChildren kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running FillChildren kernel"); } //Building lists of items for each node GpuAssert(hipMalloc((void**)&ListItems, Count * sizeof(int)), "Cannot init ListItems memory"); GpuAssert(hipMalloc((void**)&ListsStarts, L * sizeof(int*)), "Cannot init ListsStarts memory"); GpuAssert(hipMalloc((void**)&ListsLenghts, L * sizeof(int*)), "Cannot init ListsLenghts memory"); h_ListsStarts = new int*[L]; h_ListsLenghts = new int*[L]; for(int l = 0; l < L; ++l) { GpuAssert(hipMalloc((void**)&h_ListsStarts[l], LevelsSizes[l] * sizeof(int)), "Cannot init ListsStarts memory"); GpuAssert(hipMalloc((void**)&h_ListsLenghts[l], LevelsSizes[l] * sizeof(int)), "Cannot init ListsLenghts memory"); } GpuAssert(hipMemcpy(ListsStarts, h_ListsStarts, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy ListsStarts memory"); GpuAssert(hipMemcpy(ListsLenghts, h_ListsLenghts, L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy ListsLenghts memory"); for(int l = 0; l < L; ++l) { thrust::fill_n(thrust::device, h_ListsLenghts[l], LevelsSizes[l], 0); FillListsLenghts << <setup.Blocks, setup.Threads >> > (l, R, rSums, rPreSums, d_LevelSizes, startIndexes, endIndexes, Lenghts, ListsLenghts); GpuAssert(hipGetLastError(), "Error while launching FillListsLenghts kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running FillListsLenghts kernel"); } //Filling lists start indexes totalListItemsPerLevel = new int[L]; for(int l = 0; l < L; ++l) { thrust::exclusive_scan(thrust::device, h_ListsLenghts[l], h_ListsLenghts[l] + LevelsSizes[l], h_ListsStarts[l]); totalListItemsPerLevel[l] = thrust::reduce(thrust::device, h_ListsLenghts[l], h_ListsLenghts[l] + LevelsSizes[l]); } //Shifting lists int shift = 0; for (int l = 1; l < L; ++l) { shift += totalListItemsPerLevel[l - 1]; thrust::for_each_n(thrust::device, h_ListsStarts[l], LevelsSizes[l], thrust::placeholders::_1 += shift); } //Filling list items for(int l = 0; l < L; ++l) { FillListItems << <setup.Blocks, setup.Threads, setup.Blocks * sizeof(int) >> > (l, R, rSums, rPreSums, Count, startIndexes, endIndexes, ListsStarts, d_LevelSizes, Lenghts, ListItems); GpuAssert(hipGetLastError(), "Error while launching FillListItems kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running FillListItems kernel"); } //int *c; //int *ni; //for (int i = 0; i < L; ++i) // printf("%d ", LevelsSizes[i]); //cout << endl; //for (int i = 0; i < L-1; ++i) // printf("%d ", h_ChildrenCount[i]); //cout << endl; //int *si; //int *ei; //for(int i = 0; i < L; ++i) //{ // si = new int[LevelsSizes[i]]; // ei = new int[LevelsSizes[i]]; // hipMemcpy(si, h_startIndexes[i], LevelsSizes[i] * sizeof(int), hipMemcpyDeviceToHost); // hipMemcpy(ei, h_endIndexes[i], LevelsSizes[i] * sizeof(int), hipMemcpyDeviceToHost); // for (int j = 0; j < LevelsSizes[i]; ++j) // printf("(%5d;%5d)", si[j], ei[j]); // cout << endl; // delete[] si; // delete[] ei; //} //for(int i = 0; i < L; ++i) //{ // ni = new int[Count]; // hipMemcpy(ni, h_nodesIndexes[i], Count * sizeof(int), hipMemcpyDeviceToHost); // for (int j = 0; j < Count; ++j) // printf("%5d", ni[j]); // cout << endl; // delete[]ni; //} //cout << "===========" << endl << "===========" << endl << "===========" << endl << endl << endl; //for(int i = 0; i < L-1; ++i) //{ // if (LevelsSizes[i] * h_ChildrenCount[i]> 0) // { // c = new int[LevelsSizes[i] * h_ChildrenCount[i]]; // hipMemcpy(c, h_Children[i], LevelsSizes[i] * h_ChildrenCount[i] * sizeof(int), hipMemcpyDeviceToHost); // for(int j = 0; j < LevelsSizes[i] * h_ChildrenCount[i]; ++j) // printf("%5d", c[j]); // cout << endl; // delete[]c; // } //} //cout << "===========" << endl << "===========" << endl << "===========" << endl << endl << endl; //for(int i = 0; i < L; ++i) //{ // if (LevelsSizes[i] > 0) // { // c = new int[LevelsSizes[i]]; // hipMemcpy(c, h_ListsStarts[i], LevelsSizes[i] * sizeof(int), hipMemcpyDeviceToHost); // for (int j = 0; j < LevelsSizes[i]; ++j) // printf("%5d", c[j]); // cout << endl; // delete[]c; // } //} //cout << "===========" << endl << "===========" << endl << "===========" << endl << endl << endl; //for (int i = 0; i < L; ++i) //{ // if (LevelsSizes[i] > 0) // { // c = new int[LevelsSizes[i]]; // hipMemcpy(c, h_ListsLenghts[i], LevelsSizes[i] * sizeof(int), hipMemcpyDeviceToHost); // for (int j = 0; j < LevelsSizes[i]; ++j) // printf("%5d", c[j]); // cout << endl; // delete[]c; // } //} //Cleanup for(int i = 0; i < L; ++i) { GpuAssert(hipFree(h_nodesBorders[i]), "Cannot free nodes borders device memory."); GpuAssert(hipFree(h_nodesIndexes[i]), "Cannot free nodes indexes device memory."); } GpuAssert(hipFree(nodesBorders), "Cannot free nodes borders device memory."); GpuAssert(hipFree(nodesIndexes), "Cannot free nodes indexes device memory."); delete[] h_nodesBorders; delete[] h_nodesIndexes; GpuAssert(hipFree(d_LevelSizes), "Cannot free d_LevelSizes memory"); for (int i = 0; i < L; ++i) { GpuAssert(hipFree(h_startIndexes[i]), "Cannot free startIndexes device memory."); GpuAssert(hipFree(h_endIndexes[i]), "Cannot free endIndexes device memory."); } GpuAssert(hipFree(startIndexes), "Cannot free startIndexes device memory."); GpuAssert(hipFree(endIndexes), "Cannot free endIndexes device memory."); delete[] h_startIndexes; delete[] h_endIndexes; delete[] h_ChildrenCount; } int RTreeModel::GetMinListLenght(int i) { if (LevelsSizes[i] == 0) return 0; int min; int* minP = thrust::min_element(thrust::device, h_ListsLenghts[i], h_ListsLenghts[i] + LevelsSizes[i]); GpuAssert(hipMemcpy(&min, minP, sizeof(int), hipMemcpyDeviceToHost), "Cannot copy min value"); return min; } int RTreeModel::GetMaxListLenght(int i) { if (LevelsSizes[i] == 0) return 0; int max; int* maxP = thrust::max_element(thrust::device, h_ListsLenghts[i], h_ListsLenghts[i] + LevelsSizes[i]); GpuAssert(hipMemcpy(&max, maxP, sizeof(int), hipMemcpyDeviceToHost), "Cannot copy max value"); return max; } void RTreeModel::Dispose() { if(Masks != NULL) { int** h_Masks = new int*[L]; GpuAssert(hipMemcpy(h_Masks, Masks, L * sizeof(int*), hipMemcpyDeviceToHost), "Cannot copy MaxIP pointers to CPU"); for (int i = 0; i < L; ++i) GpuAssert(hipFree(h_Masks[i]), "Cannot free MaxIP memory"); delete[] h_Masks; GpuAssert(hipFree(Masks), "Cannot free MaxIP memory"); Masks = NULL; } if(R != NULL) { GpuAssert(hipFree(R), "Cannot free R memory"); GpuAssert(hipFree(rSums), "Cannot free rSums memory"); GpuAssert(hipFree(rPreSums), "Cannot free rPreSums memory"); R = rSums = rPreSums = NULL; } if(Lenghts != NULL) { GpuAssert(hipFree(Lenghts), "Cannot free Lenghts memory."); Lenghts = NULL; } if(LevelsSizes != NULL) { delete[] LevelsSizes; LevelsSizes = NULL; } if(Children != NULL) { for (int l = 0; l < L - 1; ++l) GpuAssert(hipFree(h_Children[l]), "Cannot free Children memory"); GpuAssert(hipFree(Children), "Cannot free children memory"); GpuAssert(hipFree(ChildrenCount), "Cannot free Children memory"); delete[] h_Children; Children = h_Children = NULL; ChildrenCount = NULL; } if(ListItems != NULL) { GpuAssert(hipFree(ListItems), "Cannot free ListItems memory"); ListItems = NULL; } if(ListsStarts != NULL) { for (int l = 0; l < L; ++l) GpuAssert(hipFree(h_ListsStarts[l]), "Cannot free ListsStarts memory"); GpuAssert(hipFree(ListsStarts), "Cannot free ListsStarts memory"); delete[] h_ListsStarts; ListsStarts = h_ListsStarts = NULL; } if (ListsLenghts != NULL) { for (int l = 0; l < L; ++l) GpuAssert(hipFree(h_ListsLenghts[l]), "Cannot free ListsLenghts memory"); GpuAssert(hipFree(ListsLenghts), "Cannot free ListsLenghts memory"); delete[] h_ListsLenghts; ListsLenghts = h_ListsLenghts = NULL; } if(totalListItemsPerLevel != NULL) { delete[] totalListItemsPerLevel; totalListItemsPerLevel = NULL; } } void RTreeMatcher::BuildModel(IPSet &set) { Setup = set.Setup; Timer timer; timer.Start(); Model.Build(set, Setup); ModelBuildTime = timer.Stop(); } __global__ void MatchIPs(int ** Children, int *ChildrenCount, int **Masks, int *result, int **ListsStarts, int **ListsLenghts, int *Lenghts, int L, int *R, int *rPreSums, int *ListItems, int **ips, int Count) { //TODO: Wyrwnaie extern __shared__ int sharedMem[]; int *nodesToCheck = sharedMem + threadIdx.x * L; int i = blockDim.x * blockIdx.x + threadIdx.x; while( i < Count) { //Find nodes to be searched nodesToCheck[0] = 1; for (int l = 1; l < L; ++l) { nodesToCheck[l] = 0; if (nodesToCheck[l - 1] != 0) nodesToCheck[l] = Children[l - 1][(nodesToCheck[l - 1] - 1)*ChildrenCount[l - 1] + ips[l - 1][i]]; } //Search lists for (int l = L - 1; l >= 0 && result[i] == -1; --l) if (nodesToCheck[l] != 0) { for (int s = ListsStarts[l][nodesToCheck[l] - 1]; s < ListsStarts[l][nodesToCheck[l] - 1] + ListsLenghts[l][nodesToCheck[l] - 1] && result[i] == -1; ++s) { int shitf = R[l] - (Lenghts[ListItems[s]] - rPreSums[l]); if (Masks[l][ListItems[s]] >> shitf == ips[l][i] >> shitf) result[i] = ListItems[s]; } } i += gridDim.x * blockDim.x; } } Result RTreeMatcher::Match(IPSet &set) { Result result(set.Size); result.MatchedMaskIndex = new int[set.Size]; Timer timer; timer.Start(); int **d_IPs; int *d_IPsLenghts; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPs), Model.L * sizeof(int*)), "Cannot init ip masks device memory"); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPsLenghts), set.Size * sizeof(int)), "Cannot init Lenght mamory"); int** h_Masks = new int*[Model.L]; for (int l = 0; l < Model.L; ++l) GpuAssert(hipMalloc(reinterpret_cast<void**>(&h_Masks[l]), set.Size * sizeof(int)), "Cannot init ip masks device memory"); GpuAssert(hipMemcpy(d_IPs, h_Masks, Model.L * sizeof(int*), hipMemcpyHostToDevice), "Cannot copy MaxIP pointers to GPU"); //Copying ips from IPSet and partitioning them CopyMasks << < Setup.Blocks, Setup.Threads >> > (set.Size, Model.R, Model.rSums, Model.L, d_IPs, set.d_IPs); GpuAssert(hipGetLastError(), "Error while launching CopyMasks kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running CopyMasks kernel"); int *d_Result; GpuAssert(hipMalloc((void**)&d_Result, result.IpsToMatchCount * sizeof(int)), "Cannot allocate memory for Result"); thrust::fill_n(thrust::device, d_Result, result.IpsToMatchCount, -1); //Matching MatchIPs << <Setup.Blocks, Setup.Threads, Setup.Threads * Model.L * sizeof(int)>> > (Model.Children, Model.ChildrenCount, Model.Masks, d_Result, Model.ListsStarts, Model.ListsLenghts, Model.Lenghts, Model.L, Model.R, Model.rPreSums, Model.ListItems, d_IPs, set.Size); GpuAssert(hipGetLastError(), "Error while launching MatchIPs kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running MatchIPs kernel"); GpuAssert(hipMemcpy(result.MatchedMaskIndex, d_Result, result.IpsToMatchCount * sizeof(int), hipMemcpyDeviceToHost), "Cannot copy Result data"); for (int l = 0; l < Model.L; ++l) GpuAssert(hipFree(h_Masks[l]), "Cannot free ip masks device memory"); GpuAssert(hipFree(d_Result), "Cannot free Result memory"); GpuAssert(hipFree(d_IPs), "Cannot free d_IPs memory"); GpuAssert(hipFree(d_IPsLenghts), "Cannot free d_IPsLenghts memory"); delete[] h_Masks; result.MatchingTime = timer.Stop(); return result; }
e08af85af3addcaaf8aae8c52ec7f82e60dd39d3.cu
#include "RTreeMatcher.cuh" #include <device_launch_parameters.h> #include <thrust/scan.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> __global__ void CopyMasks(int Count, int *R, int *rSums, int L, int** Masks, unsigned int *IPs) { int mask = blockIdx.x * blockDim.x + threadIdx.x; while (mask < Count) { for (int l = 0; l < L; ++l) Masks[l][mask] = (IPs[mask] >> (32 - rSums[l])) & ((1 << R[l]) - 1); mask += blockDim.x * gridDim.x; } } __global__ void MarkNodesBorders(int Count, int l, int **nodesBorders, int **Masks) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; while(i < Count) { if (Masks[l - 1][i - 1] != Masks[l - 1][i] || nodesBorders[l - 1][i] == 1) nodesBorders[l][i] = 1; i += blockDim.x * gridDim.x; } } __global__ void FillIndexes(int Count, int l, int **nodesIndexes, int **startIndexes, int **endIndexes) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; while (i < Count) { if (nodesIndexes[l][i] > 0) { startIndexes[l][nodesIndexes[l][i] - 1] = i; endIndexes[l][nodesIndexes[l][i] - 2] = i; } i += blockDim.x * gridDim.x; } } __global__ void FillChildren(int l, int *LevelsSizes, int **startIndexes, int **endIndexes, int **Children, int *ChildrenCount, int **Masks, int **nodesIndexes) { int node = blockIdx.x; while (node < LevelsSizes[l]) { int i = startIndexes[l][node] + threadIdx.x; while (i < endIndexes[l][node]) { if (nodesIndexes[l + 1][i] > 0) Children[l][node * ChildrenCount[l] + Masks[l][i]] = nodesIndexes[l + 1][i]; i += blockDim.x; } node += gridDim.x; } } __inline__ __device__ int warpReduceSum(int val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) val += __shfl_down(val, offset); return val; } __inline__ __device__ int blockReduceSum(int val) { static __shared__ int shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum(val); if (lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; if (wid == 0) val = warpReduceSum(val); return val; } __global__ void FillListsLenghts(int l, int *R, int *rSums, int *rPreSums, int *LevelsSizes, int **startIndexes, int **endIndexes, int *Lenghts, int **ListsLenghts) { int node = blockIdx.x; while(node < LevelsSizes[l]) { int counter = 0; int i = startIndexes[l][node] + threadIdx.x; while (i < endIndexes[l][node]) { if (Lenghts[i] > rPreSums[l] && Lenghts[i] <= rSums[l]) ++counter; i += blockDim.x; } int sum = blockReduceSum(counter); if (threadIdx.x == 0) ListsLenghts[l][node] = sum; node += gridDim.x; } } __global__ void FillListItems(int l, int *R, int *rSums, int *rPreSums, int Count, int **startIndexes, int ** endIndexes, int **ListsStarts, int *LevelsSizes, int *Lenghts, int * ListItems) { extern __shared__ int insertShift[]; int node = blockIdx.x; while(node < LevelsSizes[l]) { if(threadIdx.x == 0) *insertShift = 0; for (int maskLenght = rSums[l]; maskLenght > rPreSums[l]; --maskLenght) { int i = startIndexes[l][node] + threadIdx.x; while (i < endIndexes[l][node]) { //TODO: Atomic add dedykowany dla pamięci dzielonej if (Lenghts[i] == maskLenght) ListItems[(ListsStarts[l][node]) + atomicAdd(insertShift, 1)] = i; i += blockDim.x; } } __syncthreads(); node += gridDim.x; } //TODO: Dedykowane strategie wypełniania zależne od poziomu (ilości węzłów, długości list) } __global__ void FillToLeave(int l, int *LevelsSizes, int **startIndexes, int **endIndexes, int *Lenghts, int *rPreSums, int *toLeave) { extern __shared__ int currentToLeave[]; int node = blockIdx.x; while( node < LevelsSizes[l]) { int i = startIndexes[l][node] + threadIdx.x; if (threadIdx.x == 0) *currentToLeave = 0; while( i < endIndexes[l][node]) { //TODO: Czy to musi/powinno być atomic if (Lenghts[i] > rPreSums[l]) *currentToLeave = 1; __syncthreads(); if (*currentToLeave == 1) break; i += blockDim.x; } __syncthreads(); if (threadIdx.x == 0) toLeave[node] = *currentToLeave; node += gridDim.x; } } __global__ void FillNewIndexes(int l, int *LevelsSizes, int *newIndexes, int *newStartIndexes, int *newEndIndexes, int **startIndexes, int **endIndexes, int **nodesBorders) { int node = blockIdx.x * blockDim.x + threadIdx.x; while( node < LevelsSizes[l]) { if (newIndexes[node] != 0) { newStartIndexes[newIndexes[node] - 1] = startIndexes[l][node]; newEndIndexes[newIndexes[node] - 1] = endIndexes[l][node]; } else { nodesBorders[l][startIndexes[l][node]] = 0; } node += gridDim.x * blockDim.x; } } void RTreeModel::Build(IPSet &set, GpuSetup setup) { Count = set.Size; L = h_R.size(); //Allocating memory for Rs GpuAssert(cudaMalloc((void**)&R, L * sizeof(int)), "Cannot allocate memory for R"); GpuAssert(cudaMalloc((void**)&rSums, L * sizeof(int)), "Cannot allocate memory for R"); GpuAssert(cudaMalloc((void**)&rPreSums, L * sizeof(int)), "Cannot allocate memory for R"); GpuAssert(cudaMemcpy(R, h_R.data(), L * sizeof(int), cudaMemcpyHostToDevice), "Cannot copy R memory"); thrust::inclusive_scan(thrust::device, R, R + L, rSums); thrust::exclusive_scan(thrust::device, R, R + L, rPreSums); //Allocationg memory for masks GpuAssert(cudaMalloc(reinterpret_cast<void**>(&Masks), L * sizeof(int*)), "Cannot init ip masks device memory"); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&Lenghts), Count * sizeof(int)), "Cannot init Lenght memory"); GpuAssert(cudaMemcpy(Lenghts, set.d_Lenghts, Count * sizeof(int), cudaMemcpyDeviceToDevice), "Cannot copy Lenghts"); int** h_Masks = new int*[L]; for (int l = 0; l < L; ++l) GpuAssert(cudaMalloc((void**)(&h_Masks[l]), Count * sizeof(int)), "Cannot init ip masks device memory"); GpuAssert(cudaMemcpy(Masks, h_Masks, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy MaxIP pointers to GPU"); delete[] h_Masks; //Copying masks from IPSet and partitioning them CopyMasks <<< setup.Blocks, setup.Threads >>> (Count, R, rSums, L, Masks, set.d_IPs); GpuAssert(cudaGetLastError(), "Error while launching CopyMasks kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running CopyMasks kernel"); //Allocating memory for nodesBorders int ** nodesBorders; int ** nodesIndexes; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&nodesBorders), L * sizeof(int*)), "Cannot init nodes borders device memory"); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&nodesIndexes), L * sizeof(int*)), "Cannot init nodes indexes device memory"); int **h_nodesBorders = new int*[L]; int **h_nodesIndexes = new int*[L]; for(int l = 0; l < L; ++l) { GpuAssert(cudaMalloc(reinterpret_cast<void**>(&h_nodesBorders[l]), Count * sizeof(int)), "Cannot init nodes borders device memory"); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&h_nodesIndexes[l]), Count * sizeof(int)), "Cannot init nodes indexes device memory"); } GpuAssert(cudaMemcpy(nodesBorders, h_nodesBorders, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy nodes borders device memory"); GpuAssert(cudaMemcpy(nodesIndexes, h_nodesIndexes, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy nodes indexes device memory"); //Marking first nodes on each level, setting rest of the nodesBorders memory to 0 int mark = 1; for (int l = 0; l < L; ++l) { GpuAssert(cudaMemset(h_nodesBorders[l], 0, Count * sizeof(int)), "Cannot clear nodesBorders memory"); GpuAssert(cudaMemcpy(h_nodesBorders[l], &mark, sizeof(int), cudaMemcpyHostToDevice), "Cannot mark nodes start"); } //Marking nodes borders for(int l = 1; l < L; ++l) { MarkNodesBorders <<<setup.Blocks, setup.Threads >>>(Count, l, nodesBorders, Masks); GpuAssert(cudaGetLastError(), "Error while launching MarkNodesBorders kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running MarkNodesBorders kernel"); } //Counting number of nodes and indexing them on each level. Indexing is done from 1 up, since 0 means empty value LevelsSizes = new int[L]; for(int l = 0; l < L; ++l) { thrust::inclusive_scan(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l]); GpuAssert(cudaMemcpy(LevelsSizes + l, h_nodesIndexes[l] + Count - 1, sizeof(int), cudaMemcpyDeviceToHost), "Cannot copy level size"); thrust::transform(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l], h_nodesIndexes[l], thrust::multiplies<int>()); } int *d_LevelSizes; GpuAssert(cudaMalloc((void**)&d_LevelSizes, L * sizeof(int)), "Cannot init d_LevelSizes memory"); GpuAssert(cudaMemcpy(d_LevelSizes, LevelsSizes, L * sizeof(int), cudaMemcpyHostToDevice), "Cannot copy LevelSizes memory"); //Filling start and end indexes of tree nodes int ** startIndexes; int ** endIndexes; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&startIndexes), L * sizeof(int*)), "Cannot init startIndexes device memory"); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&endIndexes), L * sizeof(int*)), "Cannot init endIndexes device memory"); int **h_startIndexes = new int*[L]; int **h_endIndexes = new int*[L]; for (int l = 0; l < L; ++l) { GpuAssert(cudaMalloc(reinterpret_cast<void**>(&h_startIndexes[l]), LevelsSizes[l] * sizeof(int)), "Cannot init startIndexes device memory"); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&h_endIndexes[l]), LevelsSizes[l] * sizeof(int)), "Cannot init endIndexes device memory"); GpuAssert(cudaMemset(h_startIndexes[l], 0, sizeof(int)), "Cannot mark first startIndex"); GpuAssert(cudaMemcpy(h_endIndexes[l] + (LevelsSizes[l] - 1), &Count, sizeof(int), cudaMemcpyHostToDevice), "Cannot mark last endIndex"); } GpuAssert(cudaMemcpy(startIndexes, h_startIndexes, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy startIndexes device memory"); GpuAssert(cudaMemcpy(endIndexes, h_endIndexes, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy endIndexes device memory"); for (int l = 1; l < L; ++l) { FillIndexes << <setup.Blocks, setup.Threads >> > (Count, l, nodesIndexes, startIndexes, endIndexes); GpuAssert(cudaGetLastError(), "Error while launching FillIndexes kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running FillIndexes kernel"); } //Removing empty nodes int *d_toLeave; for(int l = 0; l < L; ++l) { GpuAssert(cudaMalloc((void**)&d_toLeave, LevelsSizes[l] * sizeof(int)), "Cannot allocate toLeave memory"); //TODO: Pętla for mogłaby być przeniesiona do kernela FillToLeave<<<setup.Blocks, setup.Threads, sizeof(int) >>> (l, d_LevelSizes, startIndexes, endIndexes, Lenghts, rPreSums, d_toLeave); GpuAssert(cudaGetLastError(), "Error while launching FillToLeave kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running FillToLeave kernel"); int *newIndexes; GpuAssert(cudaMalloc((void**)&newIndexes, LevelsSizes[l] * sizeof(int)), "Cannot allocate newIndexes memory"); thrust::inclusive_scan(thrust::device, d_toLeave, d_toLeave + LevelsSizes[l], newIndexes); int newLevelSize; GpuAssert(cudaMemcpy(&newLevelSize, newIndexes + LevelsSizes[l] - 1, sizeof(int), cudaMemcpyDeviceToHost), "Cannot copy new level size"); thrust::transform(thrust::device, d_toLeave, d_toLeave + LevelsSizes[l], newIndexes, newIndexes, thrust::multiplies<int>()); int *newStartIndexes; int *newEndIndexes; GpuAssert(cudaMalloc((void**)&newStartIndexes, newLevelSize * sizeof(int)), "Cannot allocate newStartIndexes memory"); GpuAssert(cudaMalloc((void**)&newEndIndexes, newLevelSize * sizeof(int)), "Cannot allocate newEndIndexes memory"); FillNewIndexes << <setup.Blocks, setup.Threads >> > (l, d_LevelSizes, newIndexes, newStartIndexes, newEndIndexes, startIndexes, endIndexes, nodesBorders); GpuAssert(cudaGetLastError(), "Error while launching FillNewIndexes kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running FillNewIndexes kernel"); thrust::inclusive_scan(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l]); thrust::transform(thrust::device, h_nodesBorders[l], h_nodesBorders[l] + Count, h_nodesIndexes[l], h_nodesIndexes[l], thrust::multiplies<int>()); GpuAssert(cudaFree(h_startIndexes[l]), "Cannot free startIndexes memory"); h_startIndexes[l] = newStartIndexes; GpuAssert(cudaFree(h_endIndexes[l]), "Cannot free endIndexes memory"); h_endIndexes[l] = newEndIndexes; LevelsSizes[l] = newLevelSize; GpuAssert(cudaFree(d_toLeave), "Cannot free toLeave memory"); GpuAssert(cudaFree(newIndexes), "Cannot free newIndexes memory"); } GpuAssert(cudaMemcpy(d_LevelSizes, LevelsSizes, L * sizeof(int), cudaMemcpyHostToDevice), "Cannot copy LevelSizes memory"); GpuAssert(cudaMemcpy(startIndexes, h_startIndexes, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy startIndexes device memory"); GpuAssert(cudaMemcpy(endIndexes, h_endIndexes, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy endIndexes device memory"); //Filling children of tree nodes int *h_ChildrenCount = new int[L-1]; for(int l = 0; l < L-1; ++l) h_ChildrenCount[l] = 2 << (h_R[l] - 1); GpuAssert(cudaMalloc((void**)&ChildrenCount, (L-1) * sizeof(int)), "Cannot init Children memory"); GpuAssert(cudaMalloc((void**)&Children, (L-1) * sizeof(int*)), "Cannot init Children memory"); GpuAssert(cudaMemcpy(ChildrenCount, h_ChildrenCount, (L-1) * sizeof(int), cudaMemcpyHostToDevice), "Cannot copy Children memory"); h_Children = new int*[L-1]; for(int l = 0; l < L-1; ++l) GpuAssert(cudaMalloc((void**)&h_Children[l], LevelsSizes[l] * h_ChildrenCount[l] * sizeof(int)), "Cannot init children memory"); GpuAssert(cudaMemcpy(Children, h_Children, (L - 1) * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy Children memory"); for (int l = 0; l < L - 1; ++l) { thrust::fill_n(thrust::device, h_Children[l], LevelsSizes[l] * h_ChildrenCount[l], 0); FillChildren << <setup.Blocks, setup.Threads >> > (l, d_LevelSizes, startIndexes, endIndexes, Children, ChildrenCount, Masks, nodesIndexes); GpuAssert(cudaGetLastError(), "Error while launching FillChildren kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running FillChildren kernel"); } //Building lists of items for each node GpuAssert(cudaMalloc((void**)&ListItems, Count * sizeof(int)), "Cannot init ListItems memory"); GpuAssert(cudaMalloc((void**)&ListsStarts, L * sizeof(int*)), "Cannot init ListsStarts memory"); GpuAssert(cudaMalloc((void**)&ListsLenghts, L * sizeof(int*)), "Cannot init ListsLenghts memory"); h_ListsStarts = new int*[L]; h_ListsLenghts = new int*[L]; for(int l = 0; l < L; ++l) { GpuAssert(cudaMalloc((void**)&h_ListsStarts[l], LevelsSizes[l] * sizeof(int)), "Cannot init ListsStarts memory"); GpuAssert(cudaMalloc((void**)&h_ListsLenghts[l], LevelsSizes[l] * sizeof(int)), "Cannot init ListsLenghts memory"); } GpuAssert(cudaMemcpy(ListsStarts, h_ListsStarts, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy ListsStarts memory"); GpuAssert(cudaMemcpy(ListsLenghts, h_ListsLenghts, L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy ListsLenghts memory"); for(int l = 0; l < L; ++l) { thrust::fill_n(thrust::device, h_ListsLenghts[l], LevelsSizes[l], 0); FillListsLenghts << <setup.Blocks, setup.Threads >> > (l, R, rSums, rPreSums, d_LevelSizes, startIndexes, endIndexes, Lenghts, ListsLenghts); GpuAssert(cudaGetLastError(), "Error while launching FillListsLenghts kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running FillListsLenghts kernel"); } //Filling lists start indexes totalListItemsPerLevel = new int[L]; for(int l = 0; l < L; ++l) { thrust::exclusive_scan(thrust::device, h_ListsLenghts[l], h_ListsLenghts[l] + LevelsSizes[l], h_ListsStarts[l]); totalListItemsPerLevel[l] = thrust::reduce(thrust::device, h_ListsLenghts[l], h_ListsLenghts[l] + LevelsSizes[l]); } //Shifting lists int shift = 0; for (int l = 1; l < L; ++l) { shift += totalListItemsPerLevel[l - 1]; thrust::for_each_n(thrust::device, h_ListsStarts[l], LevelsSizes[l], thrust::placeholders::_1 += shift); } //Filling list items for(int l = 0; l < L; ++l) { FillListItems << <setup.Blocks, setup.Threads, setup.Blocks * sizeof(int) >> > (l, R, rSums, rPreSums, Count, startIndexes, endIndexes, ListsStarts, d_LevelSizes, Lenghts, ListItems); GpuAssert(cudaGetLastError(), "Error while launching FillListItems kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running FillListItems kernel"); } //int *c; //int *ni; //for (int i = 0; i < L; ++i) // printf("%d ", LevelsSizes[i]); //cout << endl; //for (int i = 0; i < L-1; ++i) // printf("%d ", h_ChildrenCount[i]); //cout << endl; //int *si; //int *ei; //for(int i = 0; i < L; ++i) //{ // si = new int[LevelsSizes[i]]; // ei = new int[LevelsSizes[i]]; // cudaMemcpy(si, h_startIndexes[i], LevelsSizes[i] * sizeof(int), cudaMemcpyDeviceToHost); // cudaMemcpy(ei, h_endIndexes[i], LevelsSizes[i] * sizeof(int), cudaMemcpyDeviceToHost); // for (int j = 0; j < LevelsSizes[i]; ++j) // printf("(%5d;%5d)", si[j], ei[j]); // cout << endl; // delete[] si; // delete[] ei; //} //for(int i = 0; i < L; ++i) //{ // ni = new int[Count]; // cudaMemcpy(ni, h_nodesIndexes[i], Count * sizeof(int), cudaMemcpyDeviceToHost); // for (int j = 0; j < Count; ++j) // printf("%5d", ni[j]); // cout << endl; // delete[]ni; //} //cout << "===========" << endl << "===========" << endl << "===========" << endl << endl << endl; //for(int i = 0; i < L-1; ++i) //{ // if (LevelsSizes[i] * h_ChildrenCount[i]> 0) // { // c = new int[LevelsSizes[i] * h_ChildrenCount[i]]; // cudaMemcpy(c, h_Children[i], LevelsSizes[i] * h_ChildrenCount[i] * sizeof(int), cudaMemcpyDeviceToHost); // for(int j = 0; j < LevelsSizes[i] * h_ChildrenCount[i]; ++j) // printf("%5d", c[j]); // cout << endl; // delete[]c; // } //} //cout << "===========" << endl << "===========" << endl << "===========" << endl << endl << endl; //for(int i = 0; i < L; ++i) //{ // if (LevelsSizes[i] > 0) // { // c = new int[LevelsSizes[i]]; // cudaMemcpy(c, h_ListsStarts[i], LevelsSizes[i] * sizeof(int), cudaMemcpyDeviceToHost); // for (int j = 0; j < LevelsSizes[i]; ++j) // printf("%5d", c[j]); // cout << endl; // delete[]c; // } //} //cout << "===========" << endl << "===========" << endl << "===========" << endl << endl << endl; //for (int i = 0; i < L; ++i) //{ // if (LevelsSizes[i] > 0) // { // c = new int[LevelsSizes[i]]; // cudaMemcpy(c, h_ListsLenghts[i], LevelsSizes[i] * sizeof(int), cudaMemcpyDeviceToHost); // for (int j = 0; j < LevelsSizes[i]; ++j) // printf("%5d", c[j]); // cout << endl; // delete[]c; // } //} //Cleanup for(int i = 0; i < L; ++i) { GpuAssert(cudaFree(h_nodesBorders[i]), "Cannot free nodes borders device memory."); GpuAssert(cudaFree(h_nodesIndexes[i]), "Cannot free nodes indexes device memory."); } GpuAssert(cudaFree(nodesBorders), "Cannot free nodes borders device memory."); GpuAssert(cudaFree(nodesIndexes), "Cannot free nodes indexes device memory."); delete[] h_nodesBorders; delete[] h_nodesIndexes; GpuAssert(cudaFree(d_LevelSizes), "Cannot free d_LevelSizes memory"); for (int i = 0; i < L; ++i) { GpuAssert(cudaFree(h_startIndexes[i]), "Cannot free startIndexes device memory."); GpuAssert(cudaFree(h_endIndexes[i]), "Cannot free endIndexes device memory."); } GpuAssert(cudaFree(startIndexes), "Cannot free startIndexes device memory."); GpuAssert(cudaFree(endIndexes), "Cannot free endIndexes device memory."); delete[] h_startIndexes; delete[] h_endIndexes; delete[] h_ChildrenCount; } int RTreeModel::GetMinListLenght(int i) { if (LevelsSizes[i] == 0) return 0; int min; int* minP = thrust::min_element(thrust::device, h_ListsLenghts[i], h_ListsLenghts[i] + LevelsSizes[i]); GpuAssert(cudaMemcpy(&min, minP, sizeof(int), cudaMemcpyDeviceToHost), "Cannot copy min value"); return min; } int RTreeModel::GetMaxListLenght(int i) { if (LevelsSizes[i] == 0) return 0; int max; int* maxP = thrust::max_element(thrust::device, h_ListsLenghts[i], h_ListsLenghts[i] + LevelsSizes[i]); GpuAssert(cudaMemcpy(&max, maxP, sizeof(int), cudaMemcpyDeviceToHost), "Cannot copy max value"); return max; } void RTreeModel::Dispose() { if(Masks != NULL) { int** h_Masks = new int*[L]; GpuAssert(cudaMemcpy(h_Masks, Masks, L * sizeof(int*), cudaMemcpyDeviceToHost), "Cannot copy MaxIP pointers to CPU"); for (int i = 0; i < L; ++i) GpuAssert(cudaFree(h_Masks[i]), "Cannot free MaxIP memory"); delete[] h_Masks; GpuAssert(cudaFree(Masks), "Cannot free MaxIP memory"); Masks = NULL; } if(R != NULL) { GpuAssert(cudaFree(R), "Cannot free R memory"); GpuAssert(cudaFree(rSums), "Cannot free rSums memory"); GpuAssert(cudaFree(rPreSums), "Cannot free rPreSums memory"); R = rSums = rPreSums = NULL; } if(Lenghts != NULL) { GpuAssert(cudaFree(Lenghts), "Cannot free Lenghts memory."); Lenghts = NULL; } if(LevelsSizes != NULL) { delete[] LevelsSizes; LevelsSizes = NULL; } if(Children != NULL) { for (int l = 0; l < L - 1; ++l) GpuAssert(cudaFree(h_Children[l]), "Cannot free Children memory"); GpuAssert(cudaFree(Children), "Cannot free children memory"); GpuAssert(cudaFree(ChildrenCount), "Cannot free Children memory"); delete[] h_Children; Children = h_Children = NULL; ChildrenCount = NULL; } if(ListItems != NULL) { GpuAssert(cudaFree(ListItems), "Cannot free ListItems memory"); ListItems = NULL; } if(ListsStarts != NULL) { for (int l = 0; l < L; ++l) GpuAssert(cudaFree(h_ListsStarts[l]), "Cannot free ListsStarts memory"); GpuAssert(cudaFree(ListsStarts), "Cannot free ListsStarts memory"); delete[] h_ListsStarts; ListsStarts = h_ListsStarts = NULL; } if (ListsLenghts != NULL) { for (int l = 0; l < L; ++l) GpuAssert(cudaFree(h_ListsLenghts[l]), "Cannot free ListsLenghts memory"); GpuAssert(cudaFree(ListsLenghts), "Cannot free ListsLenghts memory"); delete[] h_ListsLenghts; ListsLenghts = h_ListsLenghts = NULL; } if(totalListItemsPerLevel != NULL) { delete[] totalListItemsPerLevel; totalListItemsPerLevel = NULL; } } void RTreeMatcher::BuildModel(IPSet &set) { Setup = set.Setup; Timer timer; timer.Start(); Model.Build(set, Setup); ModelBuildTime = timer.Stop(); } __global__ void MatchIPs(int ** Children, int *ChildrenCount, int **Masks, int *result, int **ListsStarts, int **ListsLenghts, int *Lenghts, int L, int *R, int *rPreSums, int *ListItems, int **ips, int Count) { //TODO: Wyrównaie extern __shared__ int sharedMem[]; int *nodesToCheck = sharedMem + threadIdx.x * L; int i = blockDim.x * blockIdx.x + threadIdx.x; while( i < Count) { //Find nodes to be searched nodesToCheck[0] = 1; for (int l = 1; l < L; ++l) { nodesToCheck[l] = 0; if (nodesToCheck[l - 1] != 0) nodesToCheck[l] = Children[l - 1][(nodesToCheck[l - 1] - 1)*ChildrenCount[l - 1] + ips[l - 1][i]]; } //Search lists for (int l = L - 1; l >= 0 && result[i] == -1; --l) if (nodesToCheck[l] != 0) { for (int s = ListsStarts[l][nodesToCheck[l] - 1]; s < ListsStarts[l][nodesToCheck[l] - 1] + ListsLenghts[l][nodesToCheck[l] - 1] && result[i] == -1; ++s) { int shitf = R[l] - (Lenghts[ListItems[s]] - rPreSums[l]); if (Masks[l][ListItems[s]] >> shitf == ips[l][i] >> shitf) result[i] = ListItems[s]; } } i += gridDim.x * blockDim.x; } } Result RTreeMatcher::Match(IPSet &set) { Result result(set.Size); result.MatchedMaskIndex = new int[set.Size]; Timer timer; timer.Start(); int **d_IPs; int *d_IPsLenghts; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPs), Model.L * sizeof(int*)), "Cannot init ip masks device memory"); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPsLenghts), set.Size * sizeof(int)), "Cannot init Lenght mamory"); int** h_Masks = new int*[Model.L]; for (int l = 0; l < Model.L; ++l) GpuAssert(cudaMalloc(reinterpret_cast<void**>(&h_Masks[l]), set.Size * sizeof(int)), "Cannot init ip masks device memory"); GpuAssert(cudaMemcpy(d_IPs, h_Masks, Model.L * sizeof(int*), cudaMemcpyHostToDevice), "Cannot copy MaxIP pointers to GPU"); //Copying ips from IPSet and partitioning them CopyMasks << < Setup.Blocks, Setup.Threads >> > (set.Size, Model.R, Model.rSums, Model.L, d_IPs, set.d_IPs); GpuAssert(cudaGetLastError(), "Error while launching CopyMasks kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running CopyMasks kernel"); int *d_Result; GpuAssert(cudaMalloc((void**)&d_Result, result.IpsToMatchCount * sizeof(int)), "Cannot allocate memory for Result"); thrust::fill_n(thrust::device, d_Result, result.IpsToMatchCount, -1); //Matching MatchIPs << <Setup.Blocks, Setup.Threads, Setup.Threads * Model.L * sizeof(int)>> > (Model.Children, Model.ChildrenCount, Model.Masks, d_Result, Model.ListsStarts, Model.ListsLenghts, Model.Lenghts, Model.L, Model.R, Model.rPreSums, Model.ListItems, d_IPs, set.Size); GpuAssert(cudaGetLastError(), "Error while launching MatchIPs kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running MatchIPs kernel"); GpuAssert(cudaMemcpy(result.MatchedMaskIndex, d_Result, result.IpsToMatchCount * sizeof(int), cudaMemcpyDeviceToHost), "Cannot copy Result data"); for (int l = 0; l < Model.L; ++l) GpuAssert(cudaFree(h_Masks[l]), "Cannot free ip masks device memory"); GpuAssert(cudaFree(d_Result), "Cannot free Result memory"); GpuAssert(cudaFree(d_IPs), "Cannot free d_IPs memory"); GpuAssert(cudaFree(d_IPsLenghts), "Cannot free d_IPsLenghts memory"); delete[] h_Masks; result.MatchingTime = timer.Stop(); return result; }
4caa1c46aa9caf2fea8c70b0f5b1ea0e5fb8683e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "image.h" #define PIXELS_PER_BLOCK 512 #define THREAD_PER_BLOCK 4 #define min(a, b) ((a) < (b) ? (a) : (b)) __global__ void histogram(BYTE* img, int height, int width, int channels, int * histogram) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; for (int i = x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i < min(height, x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i++) { for (int j = y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j < min(width, y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j++) { if (channels < 3) { atomicAdd(histogram + img[i * width * channels + j * channels + 0], 1); } else { atomicAdd(histogram + (int)(img[i * width * channels + j * channels + 0] * 0.299 + img[i * width * channels + j * channels + 1] * 0.587 + img[i * width * channels + j * channels + 2] * 0.114), 1); } } } } void save_histogram(const char* name, int* hist, int height) { BYTE* img = (BYTE*) malloc(sizeof(BYTE) * height * 256); int max = 0; for (int i = 0; i < 256; i++) { max = (max > hist[i]) ? max : hist[i]; } double coef = 1.0 * height / max; for (int i = 0; i < height; i++) { for (int j = 0; j < 256; j++) { if ((int)(hist[j] * coef) <= i) { img[(height - 1 - i) * 256 + j + 0] = 0; } else { img[(height - 1 - i) * 256 + j + 0] = 255; } } } stbi_write_jpg(name, 256, height, 1, img, 100); free(img); } int main(int argc, char** argv) { int width, height, channels; BYTE* h_image = stbi_load(argv[1], &width, &height, &channels, 0); printf("H = %d, W = %d, C = %d\n", height, width, channels); BYTE* d_image; int* d_out; int* h_hist =(int*) malloc(sizeof(int) * 256); hipMalloc(&d_image, height * width * height * sizeof(BYTE)); hipMalloc(&d_out, 256 * sizeof(int)); hipMemcpy(d_image, h_image, sizeof(BYTE) * channels * width * height, hipMemcpyHostToDevice); int blck_x = (height + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK; int blck_y = (width + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK; memset(h_hist, '\0', sizeof(int) * 256); hipMemcpy(d_out, h_hist, sizeof(int) * 256, hipMemcpyHostToDevice); hipLaunchKernelGGL(( histogram), dim3(dim3(blck_x, blck_y)), dim3(dim3(THREAD_PER_BLOCK, THREAD_PER_BLOCK)), 0, 0, d_image, height, width, channels, d_out); hipMemcpy(h_hist, d_out, sizeof(int) * 256 , hipMemcpyDeviceToHost); for (int i = 0; i < 256; ++i) { printf("%d ", h_hist[i]); } save_histogram("hist.jpg", h_hist, 100); free(h_image); free(h_hist); hipFree(d_out); hipFree(d_image); return 0; }
4caa1c46aa9caf2fea8c70b0f5b1ea0e5fb8683e.cu
#include "image.h" #define PIXELS_PER_BLOCK 512 #define THREAD_PER_BLOCK 4 #define min(a, b) ((a) < (b) ? (a) : (b)) __global__ void histogram(BYTE* img, int height, int width, int channels, int * histogram) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; for (int i = x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i < min(height, x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i++) { for (int j = y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j < min(width, y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j++) { if (channels < 3) { atomicAdd(histogram + img[i * width * channels + j * channels + 0], 1); } else { atomicAdd(histogram + (int)(img[i * width * channels + j * channels + 0] * 0.299 + img[i * width * channels + j * channels + 1] * 0.587 + img[i * width * channels + j * channels + 2] * 0.114), 1); } } } } void save_histogram(const char* name, int* hist, int height) { BYTE* img = (BYTE*) malloc(sizeof(BYTE) * height * 256); int max = 0; for (int i = 0; i < 256; i++) { max = (max > hist[i]) ? max : hist[i]; } double coef = 1.0 * height / max; for (int i = 0; i < height; i++) { for (int j = 0; j < 256; j++) { if ((int)(hist[j] * coef) <= i) { img[(height - 1 - i) * 256 + j + 0] = 0; } else { img[(height - 1 - i) * 256 + j + 0] = 255; } } } stbi_write_jpg(name, 256, height, 1, img, 100); free(img); } int main(int argc, char** argv) { int width, height, channels; BYTE* h_image = stbi_load(argv[1], &width, &height, &channels, 0); printf("H = %d, W = %d, C = %d\n", height, width, channels); BYTE* d_image; int* d_out; int* h_hist =(int*) malloc(sizeof(int) * 256); cudaMalloc(&d_image, height * width * height * sizeof(BYTE)); cudaMalloc(&d_out, 256 * sizeof(int)); cudaMemcpy(d_image, h_image, sizeof(BYTE) * channels * width * height, cudaMemcpyHostToDevice); int blck_x = (height + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK; int blck_y = (width + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK; memset(h_hist, '\0', sizeof(int) * 256); cudaMemcpy(d_out, h_hist, sizeof(int) * 256, cudaMemcpyHostToDevice); histogram<<<dim3(blck_x, blck_y), dim3(THREAD_PER_BLOCK, THREAD_PER_BLOCK)>>>(d_image, height, width, channels, d_out); cudaMemcpy(h_hist, d_out, sizeof(int) * 256 , cudaMemcpyDeviceToHost); for (int i = 0; i < 256; ++i) { printf("%d ", h_hist[i]); } save_histogram("hist.jpg", h_hist, 100); free(h_image); free(h_hist); cudaFree(d_out); cudaFree(d_image); return 0; }
63b093ce8a4f60609724fad6a7412ea2d8295a75.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void le_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "le_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a <= b; }); }); } REGISTER_DISPATCH(le_stub, &le_kernel_cuda); }} // namespace at::native
63b093ce8a4f60609724fad6a7412ea2d8295a75.cu
#include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void le_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "le_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a <= b; }); }); } REGISTER_DISPATCH(le_stub, &le_kernel_cuda); }} // namespace at::native
0e5cd745c9f56443ac1fa7fa76feebf524ba3290.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes CUDA #include <hip/hip_runtime.h> #define TPB 32 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runFloyd(int *mat, const size_t N); void GenMatrix(int *mat, const size_t N); void ST_APSP(int *mat, const size_t N); void printMatrix(int *mat, const size_t N); bool CmpArray(const int *l, const int *r, const size_t eleNum); /* Generate Matrix */ void GenMatrix(int *mat, const size_t N) { for(int i = 0; i < N; i ++) for(int j = 0; j < N; j++) mat[i*N+j] = (i==j)?0:rand()%32 - 1; } /* Sequential (Single Thread) APSP on CPU. */ void ST_APSP(int *mat, const size_t N) { for(int k = 0; k < N; k ++) for(int i = 0; i < N; i ++) for(int j = 0; j < N; j ++) { int i0 = i*N + j; int i1 = i*N + k; int i2 = k*N + j; if(mat[i1] != -1 && mat[i2] != -1) mat[i0] = min(mat[i0], mat[i1] + mat[i2]); } } /* Compare two array */ bool CmpArray(const int *l, const int *r, const size_t eleNum) { for(int i = 0; i < eleNum; i ++) if(l[i] != r[i]) { printf("ERROR: l[%d] = %d, r[%d] = %d\n", i, l[i], i, r[i]); return false; } return true; } void printMatrix (int*mat, const size_t N) { for (int i = 0; i< N; i++) { for(int j= 0; j< N; j++) { int value = mat[i*N +j]; printf("%d, ",value); } printf("\n"); } } __global__ void transpose(int* mat, int* result, const size_t N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) result[j*N + i] = mat[i*N + j]; } /* GPU kernel function */ __global__ void floydKernel(int k, int *result_d, const size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < N && y < N){ int xk = x*N+k; int ky = k*N+y; int xy = x*N+y; if((result_d[xk] != -1) && (result_d[ky]!=-1)) result_d[xy] = min(result_d[xy],result_d[xk] + result_d[ky]); } __syncthreads(); } /* Call kernel function from Host */ void runFloyd(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; hipMalloc((int **) &result_d, size); hipMemcpy(result_d, result, size, hipMemcpyHostToDevice); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } for(int k = 0; k < N; k++){ hipLaunchKernelGGL(( floydKernel), dim3(Grid), dim3(Block), 0, 0, k, result_d,N); } hipMemcpy(result, result_d, size, hipMemcpyDeviceToHost); hipFree(result_d); } __global__ void coalesceKernel (int k, int *mat, int *transposed_mat, const size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) if (transposed_mat[k*N + x] != -1 && mat[k*N + y] != -1) { //transposed_mat[k,x] = mat[x,k] --> coalesced mat[x*N + y] = min(transposed_mat[k*N + x] + mat[k*N + y],mat[x*N+y]); transposed_mat[y*N + x] = mat[x*N + y]; //update both to avoid overhead } __syncthreads(); } void runFloyd_coalescing(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; hipMalloc((int **) &result_d, size); hipMemcpy(result_d, result, size, hipMemcpyHostToDevice); int *transposed_mat_d; hipMalloc((int **) &transposed_mat_d, size); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } hipLaunchKernelGGL(( transpose), dim3(Grid), dim3(Block), 0, 0, result_d, transposed_mat_d, N); for(int k = 0; k < N; ++k) { hipLaunchKernelGGL(( coalesceKernel), dim3(Grid), dim3(Block), 0, 0, k, result_d, transposed_mat_d, N); } hipMemcpy(result,result_d,size,hipMemcpyDeviceToHost); hipFree(transposed_mat_d); hipFree(result_d); } __global__ void sharedKernel (int k, int *mat, const size_t N) { extern __shared__ int smem[]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) { if (threadIdx.x == 0) { //to avoid reading from memory all the time, //the number of times it reads from memory is just 2*TPB times. smem[TPB+threadIdx.y] = mat[k*N + y]; //KJ } if (threadIdx.y == 0) { smem[threadIdx.x] = mat[x*N + k]; //IK } __syncthreads(); //make sure the arrays are filled if (smem[threadIdx.x] != -1 && smem[TPB+threadIdx.y] != -1) //number of times the shared memory is accessed = TPB*TPB mat[x*N+y] = min(smem[threadIdx.x] + smem[TPB+threadIdx.y], mat[x*N+y]); } __syncthreads(); } void runFloyd_shared(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; hipMalloc((int **) &result_d, size); hipMemcpy(result_d, result, size, hipMemcpyHostToDevice); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } for(int k = 0; k < N; ++k) { hipLaunchKernelGGL(( sharedKernel), dim3(Grid), dim3(Block), (2*TPB)*sizeof(int), 0, k, result_d, N); } hipMemcpy(result,result_d,size,hipMemcpyDeviceToHost); hipFree(result_d); } __global__ void sharedCoalescedKernel (int k, int *mat, int *transposed_mat, const size_t N) { extern __shared__ int smem[]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) { if (threadIdx.x == 0) { //to avoid reading from memory all the time, //the number of times it reads from memory is just 2*TPB times. smem[TPB+threadIdx.y] = mat[k*N + y]; //KJ } if (threadIdx.y == 0) { smem[threadIdx.x] = transposed_mat[k*N + x]; //KI ==> equivalent to IK of mat } __syncthreads(); //make sure the arrays are filled if (smem[threadIdx.x] != -1 && smem[TPB+threadIdx.y] != -1) { mat[x*N+y] = min(smem[threadIdx.x] + smem[TPB+threadIdx.y], mat[x*N+y]); //number of times being accessed TPB*TPB transposed_mat[y*N+x] = mat[x*N+y]; } } __syncthreads(); } void runFloyd_sharedCoalesced(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; hipMalloc((int **) &result_d, size); hipMemcpy(result_d, result, size, hipMemcpyHostToDevice); int *transposed_mat_d; hipMalloc((int **) &transposed_mat_d, size); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } hipLaunchKernelGGL(( transpose), dim3(Grid), dim3(Block), 0, 0, result_d, transposed_mat_d, N); for(int k = 0; k < N; ++k) { hipLaunchKernelGGL(( sharedCoalescedKernel), dim3(Grid), dim3(Block), (2*TPB)*sizeof(int), 0, k, result_d, transposed_mat_d, N); } hipMemcpy(result,result_d,size,hipMemcpyDeviceToHost); hipFree(transposed_mat_d); hipFree(result_d); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { hipEvent_t begin, stop; hipEventCreate(&begin); hipEventCreate(&stop); float dt_ms; long int usec; struct timeval start, end; if (argc < 1) { printf("Usage: N [TPB]\n"); return 0; } // generate a random matrix. size_t N = atoi(argv[1]); int *mat = (int*)malloc(sizeof(int) * N * N); GenMatrix(mat, N); // compute the reference result. int *ref = (int*)malloc(sizeof(int) * N * N); memcpy(ref, mat, sizeof(int) * N * N); // PERFORM COMPUTATION ON HOST CPU gettimeofday(&start,0); ST_APSP(ref, N); gettimeofday(&end,0); printf("Sequential execution time = %ld usecs \n\n", (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec)); // PERFORM COMPUTATION ON GPU int *result = (int*)malloc(sizeof(int) * N * N); memcpy(result, mat, sizeof(int)*N*N); hipEventRecord(begin,0); runFloyd(result, N); hipEventRecord(stop,0); hipEventSynchronize(begin); hipEventSynchronize(stop); hipEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA Normal execution time = % ld usecs \n",usec); // compare your result with reference result if(CmpArray(result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); // PERFORM COMPUTATION ON GPU WITH MEMORY COALESCING METHOD int *coalesced_result = (int*)malloc(sizeof(int) * N * N); memcpy(coalesced_result, mat, sizeof(int)*N*N); hipEventRecord(begin,0); runFloyd_coalescing(coalesced_result, N); hipEventRecord(stop,0); hipEventSynchronize(begin); hipEventSynchronize(stop); hipEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA Coalescing execution time = % ld usecs \n",usec); if(CmpArray(coalesced_result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); // PERFORM COMPUTATION ON GPU WITH MEMORY TILING SHARED MEMORY METHOD int *shared_result = (int*)malloc(sizeof(int) * N * N); memcpy(shared_result, mat, sizeof(int)*N*N); hipEventRecord(begin,0); runFloyd_shared(shared_result, N); hipEventRecord(stop,0); hipEventSynchronize(begin); hipEventSynchronize(stop); hipEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA SM execution time = % ld usecs \n",usec); if(CmpArray(shared_result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); // PERFORM COMPUTATION ON GPU WITH MEMORY TILING SHARED MEMORY AND COALESCING METHOD METHOD int *shared_coalesced_result = (int*)malloc(sizeof(int) * N * N); memcpy(shared_coalesced_result, mat, sizeof(int)*N*N); hipEventRecord(begin,0); runFloyd_sharedCoalesced(shared_coalesced_result, N); hipEventRecord(stop,0); hipEventSynchronize(begin); hipEventSynchronize(stop); hipEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA SM+Coalesceing execution time = % ld usecs \n",usec); if(CmpArray(shared_coalesced_result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); hipEventDestroy(begin); hipEventDestroy(stop); }
0e5cd745c9f56443ac1fa7fa76feebf524ba3290.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes CUDA #include <cuda_runtime.h> #define TPB 32 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runFloyd(int *mat, const size_t N); void GenMatrix(int *mat, const size_t N); void ST_APSP(int *mat, const size_t N); void printMatrix(int *mat, const size_t N); bool CmpArray(const int *l, const int *r, const size_t eleNum); /* Generate Matrix */ void GenMatrix(int *mat, const size_t N) { for(int i = 0; i < N; i ++) for(int j = 0; j < N; j++) mat[i*N+j] = (i==j)?0:rand()%32 - 1; } /* Sequential (Single Thread) APSP on CPU. */ void ST_APSP(int *mat, const size_t N) { for(int k = 0; k < N; k ++) for(int i = 0; i < N; i ++) for(int j = 0; j < N; j ++) { int i0 = i*N + j; int i1 = i*N + k; int i2 = k*N + j; if(mat[i1] != -1 && mat[i2] != -1) mat[i0] = min(mat[i0], mat[i1] + mat[i2]); } } /* Compare two array */ bool CmpArray(const int *l, const int *r, const size_t eleNum) { for(int i = 0; i < eleNum; i ++) if(l[i] != r[i]) { printf("ERROR: l[%d] = %d, r[%d] = %d\n", i, l[i], i, r[i]); return false; } return true; } void printMatrix (int*mat, const size_t N) { for (int i = 0; i< N; i++) { for(int j= 0; j< N; j++) { int value = mat[i*N +j]; printf("%d, ",value); } printf("\n"); } } __global__ void transpose(int* mat, int* result, const size_t N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) result[j*N + i] = mat[i*N + j]; } /* GPU kernel function */ __global__ void floydKernel(int k, int *result_d, const size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < N && y < N){ int xk = x*N+k; int ky = k*N+y; int xy = x*N+y; if((result_d[xk] != -1) && (result_d[ky]!=-1)) result_d[xy] = min(result_d[xy],result_d[xk] + result_d[ky]); } __syncthreads(); } /* Call kernel function from Host */ void runFloyd(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; cudaMalloc((int **) &result_d, size); cudaMemcpy(result_d, result, size, cudaMemcpyHostToDevice); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } for(int k = 0; k < N; k++){ floydKernel<<<Grid, Block>>>(k, result_d,N); } cudaMemcpy(result, result_d, size, cudaMemcpyDeviceToHost); cudaFree(result_d); } __global__ void coalesceKernel (int k, int *mat, int *transposed_mat, const size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) if (transposed_mat[k*N + x] != -1 && mat[k*N + y] != -1) { //transposed_mat[k,x] = mat[x,k] --> coalesced mat[x*N + y] = min(transposed_mat[k*N + x] + mat[k*N + y],mat[x*N+y]); transposed_mat[y*N + x] = mat[x*N + y]; //update both to avoid overhead } __syncthreads(); } void runFloyd_coalescing(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; cudaMalloc((int **) &result_d, size); cudaMemcpy(result_d, result, size, cudaMemcpyHostToDevice); int *transposed_mat_d; cudaMalloc((int **) &transposed_mat_d, size); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } transpose<<<Grid, Block>>>(result_d, transposed_mat_d, N); for(int k = 0; k < N; ++k) { coalesceKernel<<<Grid, Block>>>(k, result_d, transposed_mat_d, N); } cudaMemcpy(result,result_d,size,cudaMemcpyDeviceToHost); cudaFree(transposed_mat_d); cudaFree(result_d); } __global__ void sharedKernel (int k, int *mat, const size_t N) { extern __shared__ int smem[]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) { if (threadIdx.x == 0) { //to avoid reading from memory all the time, //the number of times it reads from memory is just 2*TPB times. smem[TPB+threadIdx.y] = mat[k*N + y]; //KJ } if (threadIdx.y == 0) { smem[threadIdx.x] = mat[x*N + k]; //IK } __syncthreads(); //make sure the arrays are filled if (smem[threadIdx.x] != -1 && smem[TPB+threadIdx.y] != -1) //number of times the shared memory is accessed = TPB*TPB mat[x*N+y] = min(smem[threadIdx.x] + smem[TPB+threadIdx.y], mat[x*N+y]); } __syncthreads(); } void runFloyd_shared(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; cudaMalloc((int **) &result_d, size); cudaMemcpy(result_d, result, size, cudaMemcpyHostToDevice); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } for(int k = 0; k < N; ++k) { sharedKernel<<<Grid, Block, (2*TPB)*sizeof(int)>>>(k, result_d, N); } cudaMemcpy(result,result_d,size,cudaMemcpyDeviceToHost); cudaFree(result_d); } __global__ void sharedCoalescedKernel (int k, int *mat, int *transposed_mat, const size_t N) { extern __shared__ int smem[]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) { if (threadIdx.x == 0) { //to avoid reading from memory all the time, //the number of times it reads from memory is just 2*TPB times. smem[TPB+threadIdx.y] = mat[k*N + y]; //KJ } if (threadIdx.y == 0) { smem[threadIdx.x] = transposed_mat[k*N + x]; //KI ==> equivalent to IK of mat } __syncthreads(); //make sure the arrays are filled if (smem[threadIdx.x] != -1 && smem[TPB+threadIdx.y] != -1) { mat[x*N+y] = min(smem[threadIdx.x] + smem[TPB+threadIdx.y], mat[x*N+y]); //number of times being accessed TPB*TPB transposed_mat[y*N+x] = mat[x*N+y]; } } __syncthreads(); } void runFloyd_sharedCoalesced(int *result, const size_t N) { int size = N * N * sizeof(int); int *result_d; cudaMalloc((int **) &result_d, size); cudaMemcpy(result_d, result, size, cudaMemcpyHostToDevice); int *transposed_mat_d; cudaMalloc((int **) &transposed_mat_d, size); dim3 Grid(N/TPB,N/TPB,1); dim3 Block(TPB,TPB,1); if (N%TPB!=0) { //ceiling function Grid.x++; Grid.y++; } transpose<<<Grid, Block>>>(result_d, transposed_mat_d, N); for(int k = 0; k < N; ++k) { sharedCoalescedKernel<<<Grid, Block, (2*TPB)*sizeof(int)>>>(k, result_d, transposed_mat_d, N); } cudaMemcpy(result,result_d,size,cudaMemcpyDeviceToHost); cudaFree(transposed_mat_d); cudaFree(result_d); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { cudaEvent_t begin, stop; cudaEventCreate(&begin); cudaEventCreate(&stop); float dt_ms; long int usec; struct timeval start, end; if (argc < 1) { printf("Usage: N [TPB]\n"); return 0; } // generate a random matrix. size_t N = atoi(argv[1]); int *mat = (int*)malloc(sizeof(int) * N * N); GenMatrix(mat, N); // compute the reference result. int *ref = (int*)malloc(sizeof(int) * N * N); memcpy(ref, mat, sizeof(int) * N * N); // PERFORM COMPUTATION ON HOST CPU gettimeofday(&start,0); ST_APSP(ref, N); gettimeofday(&end,0); printf("Sequential execution time = %ld usecs \n\n", (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec)); // PERFORM COMPUTATION ON GPU int *result = (int*)malloc(sizeof(int) * N * N); memcpy(result, mat, sizeof(int)*N*N); cudaEventRecord(begin,0); runFloyd(result, N); cudaEventRecord(stop,0); cudaEventSynchronize(begin); cudaEventSynchronize(stop); cudaEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA Normal execution time = % ld usecs \n",usec); // compare your result with reference result if(CmpArray(result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); // PERFORM COMPUTATION ON GPU WITH MEMORY COALESCING METHOD int *coalesced_result = (int*)malloc(sizeof(int) * N * N); memcpy(coalesced_result, mat, sizeof(int)*N*N); cudaEventRecord(begin,0); runFloyd_coalescing(coalesced_result, N); cudaEventRecord(stop,0); cudaEventSynchronize(begin); cudaEventSynchronize(stop); cudaEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA Coalescing execution time = % ld usecs \n",usec); if(CmpArray(coalesced_result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); // PERFORM COMPUTATION ON GPU WITH MEMORY TILING SHARED MEMORY METHOD int *shared_result = (int*)malloc(sizeof(int) * N * N); memcpy(shared_result, mat, sizeof(int)*N*N); cudaEventRecord(begin,0); runFloyd_shared(shared_result, N); cudaEventRecord(stop,0); cudaEventSynchronize(begin); cudaEventSynchronize(stop); cudaEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA SM execution time = % ld usecs \n",usec); if(CmpArray(shared_result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); // PERFORM COMPUTATION ON GPU WITH MEMORY TILING SHARED MEMORY AND COALESCING METHOD METHOD int *shared_coalesced_result = (int*)malloc(sizeof(int) * N * N); memcpy(shared_coalesced_result, mat, sizeof(int)*N*N); cudaEventRecord(begin,0); runFloyd_sharedCoalesced(shared_coalesced_result, N); cudaEventRecord(stop,0); cudaEventSynchronize(begin); cudaEventSynchronize(stop); cudaEventElapsedTime(&dt_ms, begin, stop); usec = dt_ms *1000; printf("CUDA SM+Coalesceing execution time = % ld usecs \n",usec); if(CmpArray(shared_coalesced_result, ref, N * N)) printf("Your result is correct.\n\n"); else printf("Your result is wrong.\n\n"); cudaEventDestroy(begin); cudaEventDestroy(stop); }
6486493e8994ef3644b7f13a1266c178e1638e43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ////////////////////////////////////////ECE 406 Final Project/////////////////////////////////////////////////// // GPU Version by Renfei Wang and Shaowei Su // // This program will unscramble the image // // Please input three arguments // // <image filename> <csv filename> <Box size> // // the BoxSize is 2,4 or 8 according to the csv file // //////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <stdio.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <string.h> #include <stdlib.h> #include <math.h> #include <stdint.h> #include <sys/time.h> using namespace cv; int M; // number of rows in image int N; // number of columns in image int numBox; int boxSize; int box_col; // equals to the box_row hipError_t launch_unscramble(uchar *p,uint64_t *csvMat,int boxSize,int box_col,int *result_matrix_row,int *result_matrix_col,int *result_xor_row,int *result_xor_col,int M,float* Runtimes,int *row_xor,int *col_xor); __global__ void unscramble_kernel(uchar *GPU_image,uint64_t *GPU_csvMat,int boxSize,int box_col,int M,int *GPU_result_matrix_row,int *GPU_result_matrix_col,int *GPU_result_xor_row,int *GPU_result_xor_col,int *GPU_row_xor,int *GPU_col_xor){ int x = blockIdx.x; int i= threadIdx.x; int k = 0; int tid = blockIdx.x * blockDim.x + threadIdx.x; uint64_t temp1,temp2,temp3; int result; extern __shared__ int sdata_row[]; extern __shared__ int sdata_col[]; ///////////This is calculate the xor of every row in the checkbox///////////////// if(i<box_col){ temp1 = GPU_csvMat[i*2+1+x*box_col*2]; for(k=0;k<boxSize;k++){ result=0; temp2 = temp1>>8; temp3 = temp2<<8; result = temp1 - temp3; temp1 = GPU_csvMat[i*2+1+x*box_col*2]>>8*(k+1); GPU_result_matrix_row[(boxSize-1-k)*box_col+i+boxSize*x*box_col]=result; } } ///////////This is calculate the xor of every column in the checkbox///////////////// if(i>=box_col&&i<2*box_col){ temp1 = GPU_csvMat[(i-box_col)*2+x*box_col*2]; for(k=0;k<boxSize;k++){ result=0; temp2 = temp1>>8; temp3 = temp2<<8; result = temp1 - temp3; temp1 = GPU_csvMat[(i-box_col)*2+x*box_col*2]>>8*(k+1); GPU_result_matrix_col[(i-box_col)*box_col*boxSize+x+(boxSize-1-k)*box_col]=result; } } __syncthreads(); ///////////////This is the xor of each row//////////////////////////////////////// if(tid<M){ GPU_result_xor_row[tid]=GPU_result_matrix_row[tid*box_col]; for(k=1;k<box_col;k++){ GPU_result_xor_row[tid]=GPU_result_xor_row[tid]^GPU_result_matrix_row[tid*box_col+k]; } } ///////////////This is the xor of each column//////////////////////////////////////// if(tid>=M&&tid<2*M){ for(k=0;k<box_col-1;k++){ GPU_result_matrix_col[k+1+(tid-M)*box_col]=GPU_result_matrix_col[k+(tid-M)*box_col]^GPU_result_matrix_col[k+1+(tid-M)*box_col]; } GPU_result_xor_col[tid-M]=GPU_result_matrix_col[box_col-1+(tid-M)*box_col]; } /////////////This is to calculate the xor of row in the scramble image/////////////// if(tid>=2*M&&tid<3*M){ sdata_row[tid-2*M] = GPU_image[(tid-2*M)*M]; for(k=0;k<M-1;k++){ sdata_row[tid-2*M]=sdata_row[tid-2*M]^GPU_image[(tid-2*M)*M+k+1]; } GPU_row_xor[tid-2*M]=sdata_row[tid-2*M]; } /////////////This is to calculate the xor of column in the scramble image/////////////// if(tid>=3*M&&tid<4*M){ sdata_col[tid-3*M] = GPU_image[tid-3*M]; for(k=1;k<M;k++){ sdata_col[tid-3*M]=sdata_col[tid-3*M]^GPU_image[k*M+tid-3*M]; } GPU_col_xor[tid-3*M]=sdata_col[tid-3*M]; } __syncthreads(); } int main(int argc, char *argv[]){ int i, j; int *row_xor, *col_xor; float GPURuntimes[4]; // run times of the GPU code hipError_t cudaStatus; if( argc != 4) { printf("Usage: input format: <image filename><csv filename><Box size>\n"); printf("box size should be 2, 4 or 8\n"); exit(EXIT_FAILURE); } /////////////////////image load///////////////////////////////////////// Mat image; image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);//read the image if(! image.data ) { fprintf(stderr, "Could not open the image.\n"); exit(EXIT_FAILURE); } printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.rows, image.cols, image.dims); // Set up global variables based on image size: M = image.rows; N = image.cols; boxSize = atoi(argv[3]); numBox = pow(M / boxSize, 2); box_col= M/boxSize;// how many box in one col ///////////////////malloc memory for the xor//////////////////////////// row_xor = (int*) malloc(M*sizeof(int)); if(row_xor == NULL){ printf("Fail to melloc \n\n"); exit(EXIT_FAILURE); } col_xor = (int*) malloc(N*sizeof(int)); if(col_xor == NULL){ printf("Fail to melloc \n\n"); exit(EXIT_FAILURE); } uchar *p = image.data; char buffer[1024] ; char *record,*line; i = 0; j = 0; uint64_t csvmat_read[numBox][2]; uint64_t csvMat[numBox*2]; /////////////////csv file load///////////////////////////////////////// FILE *fstream = fopen(argv[2],"r"); if(fstream == NULL) { printf("\n file opening failed "); exit(EXIT_FAILURE); } while((line=fgets(buffer,sizeof(buffer),fstream))!=NULL) { j=0; record = strtok(line,","); while(record != NULL) { csvmat_read[i][j] = strtoull(record,0,0) ; //printf("record : %lld at %d, %d \n", csvmat_read[i][j], i, j) ; record = strtok(NULL,","); j++; } ++i ; } for(int i=0;i<numBox;i=i+1){ csvMat[2*i]=csvmat_read[i][0]; csvMat[2*i+1]=csvmat_read[i][1]; } ////////////some varibles and memories malloc/////////////////// int *result_matrix_row; int *result_matrix_col; int *result_xor_row; int *result_xor_col; result_matrix_row= (int*) malloc(M*box_col*sizeof(int));// this is to store the decimal which is transformed from the 8 digits if(result_matrix_row == NULL){ printf("Fail to melloc result_matrix_row\n\n"); exit(EXIT_FAILURE); } result_matrix_col= (int*) malloc(M*box_col*sizeof(int));// this is to store the decimal which is transformed from the 8 digits if(result_matrix_col == NULL){ printf("Fail to melloc result_matrix_col\n\n"); exit(EXIT_FAILURE); } result_xor_row= (int*) malloc(M*sizeof(int)); if(result_xor_row == NULL){ printf("Fail to melloc result_xor_row\n\n"); exit(EXIT_FAILURE); } result_xor_col= (int*) malloc(M*sizeof(int)); if(result_xor_col == NULL){ printf("Fail to melloc result_xor_col\n\n"); exit(EXIT_FAILURE); } uchar *temp_image; temp_image=(uchar*) malloc(M*N*sizeof(uchar)); if(temp_image == NULL){ printf("Fail to melloc p\n\n"); exit(EXIT_FAILURE); } Mat temp = Mat(M, N, CV_8UC1, temp_image); /////////////////////launch the GPU part//////////////////////// cudaStatus = launch_unscramble(p,csvMat,boxSize,box_col,result_matrix_row,result_matrix_col,result_xor_row,result_xor_col,M,GPURuntimes,row_xor,col_xor); if (cudaStatus != hipSuccess) { fprintf(stderr, "launch_unscramble failed!\n"); exit(EXIT_FAILURE); } printf("-----------------------------------------------------------------\n"); printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \n Total=%5.2f ms\n", GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]); printf("-----------------------------------------------------------------\n"); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!\n"); exit(EXIT_FAILURE); } ////////////////get the unscramble image, first by row////////////////////////////////// int flag1=0; int flag2=0; int swap[256]; for(int i=0;i<256;i++){ swap[i]=0; } for(int j=0;j<N;j++){ for(int i=0;i<M;i++){//swap from this line if(result_xor_row[j]==row_xor[i] && swap[i]==0){// if find the targets, then swap swap[i]=1; Mat M1 = temp.row(j); image.row(i).copyTo(M1); flag1++; //printf("has swaped column %d and %d and the result_xor is %d the row_xor is %d\n",j,i,result_xor[j],row_xor[i]); break; } } } ////////////////get the unscramble image, then by column////////////////////////////////// for(int i=0;i<256;i++){ swap[i]=0; } for(int j=0;j<N;j++){ for(int i=0;i<M;i++){//swap from this line if(result_xor_col[j]==col_xor[i] && swap[i]==0){// if find the targets, then swap swap[i]=1; flag2++; Mat M2 = image.col(j); temp.col(i).copyTo(M2); //printf("has swaped row %d and %dand the result_xor is %d the row_xor is %d\n",j,i,result_xor[j],col_xor[i]); break; } } } printf("%d, %d \n",flag1,flag2); // Display the output image: Mat result = Mat(M, N, CV_8UC1, image.data); // and save it to disk: string output_filename = "unscramble.png"; if (!imwrite(output_filename, result)) { fprintf(stderr, "couldn't write output to disk!\n"); exit(EXIT_FAILURE); } printf("Saved image '%s', size = %dx%d (dims = %d).\n", output_filename.c_str(), result.rows, result.cols, result.dims); free(row_xor); free(col_xor); free(result_matrix_row); free(result_matrix_col); free(result_xor_row); free(result_xor_col); free(temp_image); exit(EXIT_SUCCESS); } // Helper function for launching a CUDA kernel (including memcpy, timing, etc.): hipError_t launch_unscramble(uchar *p,uint64_t *csvMat,int boxSize,int box_col,int *result_matrix_row,int *result_matrix_col,int *result_xor_row,int *result_xor_col,int M,float* Runtimes,int *row_xor,int *col_xor) { hipEvent_t time1, time2, time3, time4; uint64_t *GPU_csvMat; int *GPU_result_matrix_row; int *GPU_result_matrix_col; int *GPU_result_xor_row; int *GPU_result_xor_col; uchar *GPU_image; int *GPU_row_xor; int *GPU_col_xor; // Choose which GPU to run on; change this on a multi-GPU system. hipError_t cudaStatus; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } hipEventCreate(&time1); hipEventCreate(&time2); hipEventCreate(&time3); hipEventCreate(&time4); hipEventRecord(time1, 0); // Allocate GPU buffer for inputs and outputs: cudaStatus = hipMalloc((void**)&GPU_image, M*N*sizeof(uchar)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_image hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_csvMat, 2*box_col*box_col*sizeof(uint64_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_csvMat hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_result_matrix_row, M*box_col*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_result_matrix_row hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_result_matrix_col, M*box_col*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_result_matrix_col hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_result_xor_row, M*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_result_xor_row hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_result_xor_col, M*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_result_xor_col hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_row_xor, M*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_row_xor hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&GPU_col_xor, M*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_col_xor hipMalloc failed!\n"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(GPU_csvMat, csvMat, 2*box_col*box_col*sizeof(uint64_t), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_csvMat hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(GPU_image, p, M*N*sizeof(uchar), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "GPU_csvMat hipMemcpy failed!\n"); goto Error; } hipEventRecord(time2, 0); // Launch a kernel on the GPU hipLaunchKernelGGL(( unscramble_kernel), dim3(box_col),dim3(M),2*M*sizeof(int), 0, GPU_image,GPU_csvMat,boxSize,box_col,M,GPU_result_matrix_row,GPU_result_matrix_col,GPU_result_xor_row,GPU_result_xor_col,GPU_row_xor,GPU_col_xor); // Check for errors immediately after kernel launch. cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus)); goto Error; } hipEventRecord(time3, 0); // Copy output (results) from GPU buffer to host (CPU) memory. cudaStatus = hipMemcpy(result_xor_row, GPU_result_xor_row, M*sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "result_xor_row hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(result_xor_col, GPU_result_xor_col, M*sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "result_xor_row hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(row_xor, GPU_row_xor, M*sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "row_xor hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(col_xor, GPU_col_xor, M*sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "row_xor hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(result_matrix_row, GPU_result_matrix_row, M*box_col*sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "result_xor_row hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(result_matrix_col, GPU_result_matrix_col, M*box_col*sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "result_xor_row hipMemcpy failed!\n"); goto Error; } hipEventRecord(time4, 0); hipEventSynchronize(time1); hipEventSynchronize(time2); hipEventSynchronize(time3); hipEventSynchronize(time4); float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; hipEventElapsedTime(&totalTime, time1, time4); hipEventElapsedTime(&tfrCPUtoGPU, time1, time2); hipEventElapsedTime(&kernelExecutionTime, time2, time3); hipEventElapsedTime(&tfrGPUtoCPU, time3, time4); Runtimes[0] = totalTime; Runtimes[1] = tfrCPUtoGPU; Runtimes[2] = kernelExecutionTime; Runtimes[3] = tfrGPUtoCPU; Error: hipFree(GPU_csvMat); hipFree(GPU_result_matrix_row); hipFree(GPU_result_matrix_col); hipFree(GPU_result_xor_row); hipFree(GPU_result_xor_col); hipFree(GPU_image); hipFree(GPU_row_xor); hipFree(GPU_col_xor); hipEventDestroy(time1); hipEventDestroy(time2); hipEventDestroy(time3); hipEventDestroy(time4); return cudaStatus; }
6486493e8994ef3644b7f13a1266c178e1638e43.cu
////////////////////////////////////////ECE 406 Final Project/////////////////////////////////////////////////// // GPU Version by Renfei Wang and Shaowei Su // // This program will unscramble the image // // Please input three arguments // // <image filename> <csv filename> <Box size> // // the BoxSize is 2,4 or 8 according to the csv file // //////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <stdio.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <string.h> #include <stdlib.h> #include <math.h> #include <stdint.h> #include <sys/time.h> using namespace cv; int M; // number of rows in image int N; // number of columns in image int numBox; int boxSize; int box_col; // equals to the box_row cudaError_t launch_unscramble(uchar *p,uint64_t *csvMat,int boxSize,int box_col,int *result_matrix_row,int *result_matrix_col,int *result_xor_row,int *result_xor_col,int M,float* Runtimes,int *row_xor,int *col_xor); __global__ void unscramble_kernel(uchar *GPU_image,uint64_t *GPU_csvMat,int boxSize,int box_col,int M,int *GPU_result_matrix_row,int *GPU_result_matrix_col,int *GPU_result_xor_row,int *GPU_result_xor_col,int *GPU_row_xor,int *GPU_col_xor){ int x = blockIdx.x; int i= threadIdx.x; int k = 0; int tid = blockIdx.x * blockDim.x + threadIdx.x; uint64_t temp1,temp2,temp3; int result; extern __shared__ int sdata_row[]; extern __shared__ int sdata_col[]; ///////////This is calculate the xor of every row in the checkbox///////////////// if(i<box_col){ temp1 = GPU_csvMat[i*2+1+x*box_col*2]; for(k=0;k<boxSize;k++){ result=0; temp2 = temp1>>8; temp3 = temp2<<8; result = temp1 - temp3; temp1 = GPU_csvMat[i*2+1+x*box_col*2]>>8*(k+1); GPU_result_matrix_row[(boxSize-1-k)*box_col+i+boxSize*x*box_col]=result; } } ///////////This is calculate the xor of every column in the checkbox///////////////// if(i>=box_col&&i<2*box_col){ temp1 = GPU_csvMat[(i-box_col)*2+x*box_col*2]; for(k=0;k<boxSize;k++){ result=0; temp2 = temp1>>8; temp3 = temp2<<8; result = temp1 - temp3; temp1 = GPU_csvMat[(i-box_col)*2+x*box_col*2]>>8*(k+1); GPU_result_matrix_col[(i-box_col)*box_col*boxSize+x+(boxSize-1-k)*box_col]=result; } } __syncthreads(); ///////////////This is the xor of each row//////////////////////////////////////// if(tid<M){ GPU_result_xor_row[tid]=GPU_result_matrix_row[tid*box_col]; for(k=1;k<box_col;k++){ GPU_result_xor_row[tid]=GPU_result_xor_row[tid]^GPU_result_matrix_row[tid*box_col+k]; } } ///////////////This is the xor of each column//////////////////////////////////////// if(tid>=M&&tid<2*M){ for(k=0;k<box_col-1;k++){ GPU_result_matrix_col[k+1+(tid-M)*box_col]=GPU_result_matrix_col[k+(tid-M)*box_col]^GPU_result_matrix_col[k+1+(tid-M)*box_col]; } GPU_result_xor_col[tid-M]=GPU_result_matrix_col[box_col-1+(tid-M)*box_col]; } /////////////This is to calculate the xor of row in the scramble image/////////////// if(tid>=2*M&&tid<3*M){ sdata_row[tid-2*M] = GPU_image[(tid-2*M)*M]; for(k=0;k<M-1;k++){ sdata_row[tid-2*M]=sdata_row[tid-2*M]^GPU_image[(tid-2*M)*M+k+1]; } GPU_row_xor[tid-2*M]=sdata_row[tid-2*M]; } /////////////This is to calculate the xor of column in the scramble image/////////////// if(tid>=3*M&&tid<4*M){ sdata_col[tid-3*M] = GPU_image[tid-3*M]; for(k=1;k<M;k++){ sdata_col[tid-3*M]=sdata_col[tid-3*M]^GPU_image[k*M+tid-3*M]; } GPU_col_xor[tid-3*M]=sdata_col[tid-3*M]; } __syncthreads(); } int main(int argc, char *argv[]){ int i, j; int *row_xor, *col_xor; float GPURuntimes[4]; // run times of the GPU code cudaError_t cudaStatus; if( argc != 4) { printf("Usage: input format: <image filename><csv filename><Box size>\n"); printf("box size should be 2, 4 or 8\n"); exit(EXIT_FAILURE); } /////////////////////image load///////////////////////////////////////// Mat image; image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);//read the image if(! image.data ) { fprintf(stderr, "Could not open the image.\n"); exit(EXIT_FAILURE); } printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.rows, image.cols, image.dims); // Set up global variables based on image size: M = image.rows; N = image.cols; boxSize = atoi(argv[3]); numBox = pow(M / boxSize, 2); box_col= M/boxSize;// how many box in one col ///////////////////malloc memory for the xor//////////////////////////// row_xor = (int*) malloc(M*sizeof(int)); if(row_xor == NULL){ printf("Fail to melloc \n\n"); exit(EXIT_FAILURE); } col_xor = (int*) malloc(N*sizeof(int)); if(col_xor == NULL){ printf("Fail to melloc \n\n"); exit(EXIT_FAILURE); } uchar *p = image.data; char buffer[1024] ; char *record,*line; i = 0; j = 0; uint64_t csvmat_read[numBox][2]; uint64_t csvMat[numBox*2]; /////////////////csv file load///////////////////////////////////////// FILE *fstream = fopen(argv[2],"r"); if(fstream == NULL) { printf("\n file opening failed "); exit(EXIT_FAILURE); } while((line=fgets(buffer,sizeof(buffer),fstream))!=NULL) { j=0; record = strtok(line,","); while(record != NULL) { csvmat_read[i][j] = strtoull(record,0,0) ; //printf("record : %lld at %d, %d \n", csvmat_read[i][j], i, j) ; record = strtok(NULL,","); j++; } ++i ; } for(int i=0;i<numBox;i=i+1){ csvMat[2*i]=csvmat_read[i][0]; csvMat[2*i+1]=csvmat_read[i][1]; } ////////////some varibles and memories malloc/////////////////// int *result_matrix_row; int *result_matrix_col; int *result_xor_row; int *result_xor_col; result_matrix_row= (int*) malloc(M*box_col*sizeof(int));// this is to store the decimal which is transformed from the 8 digits if(result_matrix_row == NULL){ printf("Fail to melloc result_matrix_row\n\n"); exit(EXIT_FAILURE); } result_matrix_col= (int*) malloc(M*box_col*sizeof(int));// this is to store the decimal which is transformed from the 8 digits if(result_matrix_col == NULL){ printf("Fail to melloc result_matrix_col\n\n"); exit(EXIT_FAILURE); } result_xor_row= (int*) malloc(M*sizeof(int)); if(result_xor_row == NULL){ printf("Fail to melloc result_xor_row\n\n"); exit(EXIT_FAILURE); } result_xor_col= (int*) malloc(M*sizeof(int)); if(result_xor_col == NULL){ printf("Fail to melloc result_xor_col\n\n"); exit(EXIT_FAILURE); } uchar *temp_image; temp_image=(uchar*) malloc(M*N*sizeof(uchar)); if(temp_image == NULL){ printf("Fail to melloc p\n\n"); exit(EXIT_FAILURE); } Mat temp = Mat(M, N, CV_8UC1, temp_image); /////////////////////launch the GPU part//////////////////////// cudaStatus = launch_unscramble(p,csvMat,boxSize,box_col,result_matrix_row,result_matrix_col,result_xor_row,result_xor_col,M,GPURuntimes,row_xor,col_xor); if (cudaStatus != cudaSuccess) { fprintf(stderr, "launch_unscramble failed!\n"); exit(EXIT_FAILURE); } printf("-----------------------------------------------------------------\n"); printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \n Total=%5.2f ms\n", GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]); printf("-----------------------------------------------------------------\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!\n"); exit(EXIT_FAILURE); } ////////////////get the unscramble image, first by row////////////////////////////////// int flag1=0; int flag2=0; int swap[256]; for(int i=0;i<256;i++){ swap[i]=0; } for(int j=0;j<N;j++){ for(int i=0;i<M;i++){//swap from this line if(result_xor_row[j]==row_xor[i] && swap[i]==0){// if find the targets, then swap swap[i]=1; Mat M1 = temp.row(j); image.row(i).copyTo(M1); flag1++; //printf("has swaped column %d and %d and the result_xor is %d the row_xor is %d\n",j,i,result_xor[j],row_xor[i]); break; } } } ////////////////get the unscramble image, then by column////////////////////////////////// for(int i=0;i<256;i++){ swap[i]=0; } for(int j=0;j<N;j++){ for(int i=0;i<M;i++){//swap from this line if(result_xor_col[j]==col_xor[i] && swap[i]==0){// if find the targets, then swap swap[i]=1; flag2++; Mat M2 = image.col(j); temp.col(i).copyTo(M2); //printf("has swaped row %d and %dand the result_xor is %d the row_xor is %d\n",j,i,result_xor[j],col_xor[i]); break; } } } printf("%d, %d \n",flag1,flag2); // Display the output image: Mat result = Mat(M, N, CV_8UC1, image.data); // and save it to disk: string output_filename = "unscramble.png"; if (!imwrite(output_filename, result)) { fprintf(stderr, "couldn't write output to disk!\n"); exit(EXIT_FAILURE); } printf("Saved image '%s', size = %dx%d (dims = %d).\n", output_filename.c_str(), result.rows, result.cols, result.dims); free(row_xor); free(col_xor); free(result_matrix_row); free(result_matrix_col); free(result_xor_row); free(result_xor_col); free(temp_image); exit(EXIT_SUCCESS); } // Helper function for launching a CUDA kernel (including memcpy, timing, etc.): cudaError_t launch_unscramble(uchar *p,uint64_t *csvMat,int boxSize,int box_col,int *result_matrix_row,int *result_matrix_col,int *result_xor_row,int *result_xor_col,int M,float* Runtimes,int *row_xor,int *col_xor) { cudaEvent_t time1, time2, time3, time4; uint64_t *GPU_csvMat; int *GPU_result_matrix_row; int *GPU_result_matrix_col; int *GPU_result_xor_row; int *GPU_result_xor_col; uchar *GPU_image; int *GPU_row_xor; int *GPU_col_xor; // Choose which GPU to run on; change this on a multi-GPU system. cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } cudaEventCreate(&time1); cudaEventCreate(&time2); cudaEventCreate(&time3); cudaEventCreate(&time4); cudaEventRecord(time1, 0); // Allocate GPU buffer for inputs and outputs: cudaStatus = cudaMalloc((void**)&GPU_image, M*N*sizeof(uchar)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_image cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_csvMat, 2*box_col*box_col*sizeof(uint64_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_csvMat cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_result_matrix_row, M*box_col*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_result_matrix_row cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_result_matrix_col, M*box_col*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_result_matrix_col cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_result_xor_row, M*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_result_xor_row cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_result_xor_col, M*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_result_xor_col cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_row_xor, M*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_row_xor cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&GPU_col_xor, M*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_col_xor cudaMalloc failed!\n"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(GPU_csvMat, csvMat, 2*box_col*box_col*sizeof(uint64_t), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_csvMat cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(GPU_image, p, M*N*sizeof(uchar), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "GPU_csvMat cudaMemcpy failed!\n"); goto Error; } cudaEventRecord(time2, 0); // Launch a kernel on the GPU unscramble_kernel<<<box_col,M,2*M*sizeof(int)>>>(GPU_image,GPU_csvMat,boxSize,box_col,M,GPU_result_matrix_row,GPU_result_matrix_col,GPU_result_xor_row,GPU_result_xor_col,GPU_row_xor,GPU_col_xor); // Check for errors immediately after kernel launch. cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus)); goto Error; } cudaEventRecord(time3, 0); // Copy output (results) from GPU buffer to host (CPU) memory. cudaStatus = cudaMemcpy(result_xor_row, GPU_result_xor_row, M*sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "result_xor_row cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(result_xor_col, GPU_result_xor_col, M*sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "result_xor_row cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(row_xor, GPU_row_xor, M*sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "row_xor cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(col_xor, GPU_col_xor, M*sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "row_xor cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(result_matrix_row, GPU_result_matrix_row, M*box_col*sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "result_xor_row cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(result_matrix_col, GPU_result_matrix_col, M*box_col*sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "result_xor_row cudaMemcpy failed!\n"); goto Error; } cudaEventRecord(time4, 0); cudaEventSynchronize(time1); cudaEventSynchronize(time2); cudaEventSynchronize(time3); cudaEventSynchronize(time4); float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; cudaEventElapsedTime(&totalTime, time1, time4); cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2); cudaEventElapsedTime(&kernelExecutionTime, time2, time3); cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4); Runtimes[0] = totalTime; Runtimes[1] = tfrCPUtoGPU; Runtimes[2] = kernelExecutionTime; Runtimes[3] = tfrGPUtoCPU; Error: cudaFree(GPU_csvMat); cudaFree(GPU_result_matrix_row); cudaFree(GPU_result_matrix_col); cudaFree(GPU_result_xor_row); cudaFree(GPU_result_xor_col); cudaFree(GPU_image); cudaFree(GPU_row_xor); cudaFree(GPU_col_xor); cudaEventDestroy(time1); cudaEventDestroy(time2); cudaEventDestroy(time3); cudaEventDestroy(time4); return cudaStatus; }
c9e3f4256460633ea49c619367ef6a635b84421b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_calc_dt_kernel; int xdim0_calc_dt_kernel_h = -1; __constant__ int ydim0_calc_dt_kernel; int ydim0_calc_dt_kernel_h = -1; __constant__ int xdim1_calc_dt_kernel; int xdim1_calc_dt_kernel_h = -1; __constant__ int ydim1_calc_dt_kernel; int ydim1_calc_dt_kernel_h = -1; __constant__ int xdim2_calc_dt_kernel; int xdim2_calc_dt_kernel_h = -1; __constant__ int ydim2_calc_dt_kernel; int ydim2_calc_dt_kernel_h = -1; __constant__ int xdim3_calc_dt_kernel; int xdim3_calc_dt_kernel_h = -1; __constant__ int ydim3_calc_dt_kernel; int ydim3_calc_dt_kernel_h = -1; __constant__ int xdim4_calc_dt_kernel; int xdim4_calc_dt_kernel_h = -1; __constant__ int ydim4_calc_dt_kernel; int ydim4_calc_dt_kernel_h = -1; __constant__ int xdim5_calc_dt_kernel; int xdim5_calc_dt_kernel_h = -1; __constant__ int ydim5_calc_dt_kernel; int ydim5_calc_dt_kernel_h = -1; __constant__ int xdim6_calc_dt_kernel; int xdim6_calc_dt_kernel_h = -1; __constant__ int ydim6_calc_dt_kernel; int ydim6_calc_dt_kernel_h = -1; __constant__ int xdim7_calc_dt_kernel; int xdim7_calc_dt_kernel_h = -1; __constant__ int ydim7_calc_dt_kernel; int ydim7_calc_dt_kernel_h = -1; __constant__ int xdim8_calc_dt_kernel; int xdim8_calc_dt_kernel_h = -1; __constant__ int ydim8_calc_dt_kernel; int ydim8_calc_dt_kernel_h = -1; __constant__ int xdim9_calc_dt_kernel; int xdim9_calc_dt_kernel_h = -1; __constant__ int ydim9_calc_dt_kernel; int ydim9_calc_dt_kernel_h = -1; __constant__ int xdim10_calc_dt_kernel; int xdim10_calc_dt_kernel_h = -1; __constant__ int ydim10_calc_dt_kernel; int ydim10_calc_dt_kernel_h = -1; __constant__ int xdim11_calc_dt_kernel; int xdim11_calc_dt_kernel_h = -1; __constant__ int ydim11_calc_dt_kernel; int ydim11_calc_dt_kernel_h = -1; __constant__ int xdim12_calc_dt_kernel; int xdim12_calc_dt_kernel_h = -1; __constant__ int ydim12_calc_dt_kernel; int ydim12_calc_dt_kernel_h = -1; __constant__ int xdim13_calc_dt_kernel; int xdim13_calc_dt_kernel_h = -1; __constant__ int ydim13_calc_dt_kernel; int ydim13_calc_dt_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 #define OPS_ACC0(x, y, z) \ (x + xdim0_calc_dt_kernel * (y) + \ xdim0_calc_dt_kernel * ydim0_calc_dt_kernel * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_calc_dt_kernel * (y) + \ xdim1_calc_dt_kernel * ydim1_calc_dt_kernel * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_calc_dt_kernel * (y) + \ xdim2_calc_dt_kernel * ydim2_calc_dt_kernel * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_calc_dt_kernel * (y) + \ xdim3_calc_dt_kernel * ydim3_calc_dt_kernel * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_calc_dt_kernel * (y) + \ xdim4_calc_dt_kernel * ydim4_calc_dt_kernel * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_calc_dt_kernel * (y) + \ xdim5_calc_dt_kernel * ydim5_calc_dt_kernel * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_calc_dt_kernel * (y) + \ xdim6_calc_dt_kernel * ydim6_calc_dt_kernel * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_calc_dt_kernel * (y) + \ xdim7_calc_dt_kernel * ydim7_calc_dt_kernel * (z)) #define OPS_ACC8(x, y, z) \ (x + xdim8_calc_dt_kernel * (y) + \ xdim8_calc_dt_kernel * ydim8_calc_dt_kernel * (z)) #define OPS_ACC9(x, y, z) \ (x + xdim9_calc_dt_kernel * (y) + \ xdim9_calc_dt_kernel * ydim9_calc_dt_kernel * (z)) #define OPS_ACC10(x, y, z) \ (x + xdim10_calc_dt_kernel * (y) + \ xdim10_calc_dt_kernel * ydim10_calc_dt_kernel * (z)) #define OPS_ACC11(x, y, z) \ (x + xdim11_calc_dt_kernel * (y) + \ xdim11_calc_dt_kernel * ydim11_calc_dt_kernel * (z)) #define OPS_ACC12(x, y, z) \ (x + xdim12_calc_dt_kernel * (y) + \ xdim12_calc_dt_kernel * ydim12_calc_dt_kernel * (z)) #define OPS_ACC13(x, y, z) \ (x + xdim13_calc_dt_kernel * (y) + \ xdim13_calc_dt_kernel * ydim13_calc_dt_kernel * (z)) // user function __device__ void calc_dt_kernel_gpu(const double *celldx, const double *celldy, const double *soundspeed, const double *viscosity, const double *density0, const double *xvel0, const double *xarea, const double *volume, const double *yvel0, const double *yarea, double *dt_min, const double *celldz, const double *zvel0, const double *zarea) { double div, ds, dtut, dtvt, dtct, dtwt, dtdivt, cc, dv1, dv2, du1, du2, dw1, dw2; ds = MIN(MIN(celldx[OPS_ACC0(0, 0, 0)], celldy[OPS_ACC1(0, 0, 0)]), celldz[OPS_ACC11(0, 0, 0)]); ds = 1.0 / (ds * ds); cc = soundspeed[OPS_ACC2(0, 0, 0)] * soundspeed[OPS_ACC2(0, 0, 0)]; cc = cc + 2.0 * viscosity[OPS_ACC3(0, 0, 0)] / density0[OPS_ACC4(0, 0, 0)]; dtct = ds * cc; dtct = dtc_safe * 1.0 / MAX(sqrt(dtct), g_small); du1 = (xvel0[OPS_ACC5(0, 0, 0)] + xvel0[OPS_ACC5(0, 1, 0)] + xvel0[OPS_ACC5(0, 0, 1)] + xvel0[OPS_ACC5(0, 1, 1)]) * xarea[OPS_ACC6(0, 0, 0)]; du2 = (xvel0[OPS_ACC5(1, 0, 0)] + xvel0[OPS_ACC5(1, 1, 0)] + xvel0[OPS_ACC5(1, 0, 1)] + xvel0[OPS_ACC5(1, 1, 1)]) * xarea[OPS_ACC6(0, 0, 0)]; dtut = dtu_safe * 4.0 * volume[OPS_ACC7(0, 0, 0)] / MAX(MAX(fabs(du1), fabs(du2)), 1.0e-5 * volume[OPS_ACC7(0, 0, 0)]); dv1 = (yvel0[OPS_ACC8(0, 0, 0)] + yvel0[OPS_ACC8(1, 0, 0)] + yvel0[OPS_ACC8(0, 0, 1)] + yvel0[OPS_ACC8(1, 0, 1)]) * yarea[OPS_ACC9(0, 0, 0)]; dv2 = (yvel0[OPS_ACC8(0, 1, 0)] + yvel0[OPS_ACC8(1, 1, 0)] + yvel0[OPS_ACC8(0, 1, 1)] + yvel0[OPS_ACC8(1, 1, 1)]) * yarea[OPS_ACC9(0, 0, 0)]; dtvt = dtv_safe * 4.0 * volume[OPS_ACC7(0, 0, 0)] / MAX(MAX(fabs(dv1), fabs(dv2)), 1.0e-5 * volume[OPS_ACC7(0, 0, 0)]); dw1 = (zvel0[OPS_ACC12(0, 0, 0)] + zvel0[OPS_ACC12(0, 1, 0)] + zvel0[OPS_ACC12(1, 0, 0)] + zvel0[OPS_ACC12(1, 1, 0)]) * zarea[OPS_ACC13(0, 0, 0)]; dw2 = (zvel0[OPS_ACC12(0, 0, 1)] + zvel0[OPS_ACC12(0, 1, 1)] + zvel0[OPS_ACC12(1, 0, 1)] + zvel0[OPS_ACC12(1, 1, 1)]) * zarea[OPS_ACC13(0, 0, 0)]; dtwt = dtw_safe * 4.0 * volume[OPS_ACC7(0, 0, 0)] / MAX(MAX(fabs(dw1), fabs(dw2)), 1.0e-5 * volume[OPS_ACC7(0, 0, 0)]); div = du2 - du1 + dv2 - dv1 + dw2 - dw1; dtdivt = dtdiv_safe * 4.0 * (volume[OPS_ACC7(0, 0, 0)]) / MAX(volume[OPS_ACC7(0, 0, 0)] * 1.0e-05, fabs(div)); dt_min[OPS_ACC10(0, 0, 0)] = MIN(MIN(MIN(dtct, dtut), MIN(dtvt, dtdivt)), dtwt); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 __global__ void ops_calc_dt_kernel(const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, const double *__restrict arg6, const double *__restrict arg7, const double *__restrict arg8, const double *__restrict arg9, double *__restrict arg10, const double *__restrict arg11, const double *__restrict arg12, const double *__restrict arg13, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_calc_dt_kernel + idx_z * 0 * 1 * xdim0_calc_dt_kernel * ydim0_calc_dt_kernel; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel + idx_z * 0 * 1 * xdim1_calc_dt_kernel * ydim1_calc_dt_kernel; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_calc_dt_kernel + idx_z * 1 * 1 * xdim2_calc_dt_kernel * ydim2_calc_dt_kernel; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_calc_dt_kernel + idx_z * 1 * 1 * xdim3_calc_dt_kernel * ydim3_calc_dt_kernel; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_calc_dt_kernel + idx_z * 1 * 1 * xdim4_calc_dt_kernel * ydim4_calc_dt_kernel; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_calc_dt_kernel + idx_z * 1 * 1 * xdim5_calc_dt_kernel * ydim5_calc_dt_kernel; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_calc_dt_kernel + idx_z * 1 * 1 * xdim6_calc_dt_kernel * ydim6_calc_dt_kernel; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_calc_dt_kernel + idx_z * 1 * 1 * xdim7_calc_dt_kernel * ydim7_calc_dt_kernel; arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_calc_dt_kernel + idx_z * 1 * 1 * xdim8_calc_dt_kernel * ydim8_calc_dt_kernel; arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_calc_dt_kernel + idx_z * 1 * 1 * xdim9_calc_dt_kernel * ydim9_calc_dt_kernel; arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_calc_dt_kernel + idx_z * 1 * 1 * xdim10_calc_dt_kernel * ydim10_calc_dt_kernel; arg11 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim11_calc_dt_kernel + idx_z * 1 * 1 * xdim11_calc_dt_kernel * ydim11_calc_dt_kernel; arg12 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim12_calc_dt_kernel + idx_z * 1 * 1 * xdim12_calc_dt_kernel * ydim12_calc_dt_kernel; arg13 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim13_calc_dt_kernel + idx_z * 1 * 1 * xdim13_calc_dt_kernel * ydim13_calc_dt_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { calc_dt_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); } } // host stub function void ops_par_loop_calc_dt_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) { // Timing double t1, t2, c1, c2; ops_arg args[14] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 14, range, 37)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(37, "calc_dt_kernel"); OPS_kernels[37].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]; int ydim13 = args[13].dat->size[1]; if (xdim0 != xdim0_calc_dt_kernel_h || ydim0 != ydim0_calc_dt_kernel_h || xdim1 != xdim1_calc_dt_kernel_h || ydim1 != ydim1_calc_dt_kernel_h || xdim2 != xdim2_calc_dt_kernel_h || ydim2 != ydim2_calc_dt_kernel_h || xdim3 != xdim3_calc_dt_kernel_h || ydim3 != ydim3_calc_dt_kernel_h || xdim4 != xdim4_calc_dt_kernel_h || ydim4 != ydim4_calc_dt_kernel_h || xdim5 != xdim5_calc_dt_kernel_h || ydim5 != ydim5_calc_dt_kernel_h || xdim6 != xdim6_calc_dt_kernel_h || ydim6 != ydim6_calc_dt_kernel_h || xdim7 != xdim7_calc_dt_kernel_h || ydim7 != ydim7_calc_dt_kernel_h || xdim8 != xdim8_calc_dt_kernel_h || ydim8 != ydim8_calc_dt_kernel_h || xdim9 != xdim9_calc_dt_kernel_h || ydim9 != ydim9_calc_dt_kernel_h || xdim10 != xdim10_calc_dt_kernel_h || ydim10 != ydim10_calc_dt_kernel_h || xdim11 != xdim11_calc_dt_kernel_h || ydim11 != ydim11_calc_dt_kernel_h || xdim12 != xdim12_calc_dt_kernel_h || ydim12 != ydim12_calc_dt_kernel_h || xdim13 != xdim13_calc_dt_kernel_h || ydim13 != ydim13_calc_dt_kernel_h) { hipMemcpyToSymbol(xdim0_calc_dt_kernel, &xdim0, sizeof(int)); xdim0_calc_dt_kernel_h = xdim0; hipMemcpyToSymbol(ydim0_calc_dt_kernel, &ydim0, sizeof(int)); ydim0_calc_dt_kernel_h = ydim0; hipMemcpyToSymbol(xdim1_calc_dt_kernel, &xdim1, sizeof(int)); xdim1_calc_dt_kernel_h = xdim1; hipMemcpyToSymbol(ydim1_calc_dt_kernel, &ydim1, sizeof(int)); ydim1_calc_dt_kernel_h = ydim1; hipMemcpyToSymbol(xdim2_calc_dt_kernel, &xdim2, sizeof(int)); xdim2_calc_dt_kernel_h = xdim2; hipMemcpyToSymbol(ydim2_calc_dt_kernel, &ydim2, sizeof(int)); ydim2_calc_dt_kernel_h = ydim2; hipMemcpyToSymbol(xdim3_calc_dt_kernel, &xdim3, sizeof(int)); xdim3_calc_dt_kernel_h = xdim3; hipMemcpyToSymbol(ydim3_calc_dt_kernel, &ydim3, sizeof(int)); ydim3_calc_dt_kernel_h = ydim3; hipMemcpyToSymbol(xdim4_calc_dt_kernel, &xdim4, sizeof(int)); xdim4_calc_dt_kernel_h = xdim4; hipMemcpyToSymbol(ydim4_calc_dt_kernel, &ydim4, sizeof(int)); ydim4_calc_dt_kernel_h = ydim4; hipMemcpyToSymbol(xdim5_calc_dt_kernel, &xdim5, sizeof(int)); xdim5_calc_dt_kernel_h = xdim5; hipMemcpyToSymbol(ydim5_calc_dt_kernel, &ydim5, sizeof(int)); ydim5_calc_dt_kernel_h = ydim5; hipMemcpyToSymbol(xdim6_calc_dt_kernel, &xdim6, sizeof(int)); xdim6_calc_dt_kernel_h = xdim6; hipMemcpyToSymbol(ydim6_calc_dt_kernel, &ydim6, sizeof(int)); ydim6_calc_dt_kernel_h = ydim6; hipMemcpyToSymbol(xdim7_calc_dt_kernel, &xdim7, sizeof(int)); xdim7_calc_dt_kernel_h = xdim7; hipMemcpyToSymbol(ydim7_calc_dt_kernel, &ydim7, sizeof(int)); ydim7_calc_dt_kernel_h = ydim7; hipMemcpyToSymbol(xdim8_calc_dt_kernel, &xdim8, sizeof(int)); xdim8_calc_dt_kernel_h = xdim8; hipMemcpyToSymbol(ydim8_calc_dt_kernel, &ydim8, sizeof(int)); ydim8_calc_dt_kernel_h = ydim8; hipMemcpyToSymbol(xdim9_calc_dt_kernel, &xdim9, sizeof(int)); xdim9_calc_dt_kernel_h = xdim9; hipMemcpyToSymbol(ydim9_calc_dt_kernel, &ydim9, sizeof(int)); ydim9_calc_dt_kernel_h = ydim9; hipMemcpyToSymbol(xdim10_calc_dt_kernel, &xdim10, sizeof(int)); xdim10_calc_dt_kernel_h = xdim10; hipMemcpyToSymbol(ydim10_calc_dt_kernel, &ydim10, sizeof(int)); ydim10_calc_dt_kernel_h = ydim10; hipMemcpyToSymbol(xdim11_calc_dt_kernel, &xdim11, sizeof(int)); xdim11_calc_dt_kernel_h = xdim11; hipMemcpyToSymbol(ydim11_calc_dt_kernel, &ydim11, sizeof(int)); ydim11_calc_dt_kernel_h = ydim11; hipMemcpyToSymbol(xdim12_calc_dt_kernel, &xdim12, sizeof(int)); xdim12_calc_dt_kernel_h = xdim12; hipMemcpyToSymbol(ydim12_calc_dt_kernel, &ydim12, sizeof(int)); ydim12_calc_dt_kernel_h = ydim12; hipMemcpyToSymbol(xdim13_calc_dt_kernel, &xdim13, sizeof(int)); xdim13_calc_dt_kernel_h = xdim13; hipMemcpyToSymbol(ydim13_calc_dt_kernel, &ydim13, sizeof(int)); ydim13_calc_dt_kernel_h = ydim13; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; int dat11 = args[11].dat->elem_size; int dat12 = args[12].dat->elem_size; int dat13 = args[13].dat->elem_size; char *p_a[14]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8 + dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8 + dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9 + dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9 + dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10 + dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10 + dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d] + OPS_sub_dat_list[args[11].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d]; #endif int base11 = dat11 * 1 * (start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]); base11 = base11 + dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]); base11 = base11 + dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]); p_a[11] = (char *)args[11].data_d + base11; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d]; #endif int base12 = dat12 * 1 * (start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]); base12 = base12 + dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]); base12 = base12 + dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]); p_a[12] = (char *)args[12].data_d + base12; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d]; #endif int base13 = dat13 * 1 * (start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]); base13 = base13 + dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]); base13 = base13 + dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]); p_a[13] = (char *)args[13].data_d + base13; ops_H_D_exchanges_device(args, 14); ops_halo_exchanges(args, 14, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[37].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_calc_dt_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[37].time += t1 - t2; } ops_set_dirtybit_device(args, 14); ops_set_halo_dirtybit3(&args[10], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[37].mpi_time += t2 - t1; OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg7); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg8); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg9); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg10); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg11); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg12); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg13); } }
c9e3f4256460633ea49c619367ef6a635b84421b.cu
// // auto-generated by ops.py // __constant__ int xdim0_calc_dt_kernel; int xdim0_calc_dt_kernel_h = -1; __constant__ int ydim0_calc_dt_kernel; int ydim0_calc_dt_kernel_h = -1; __constant__ int xdim1_calc_dt_kernel; int xdim1_calc_dt_kernel_h = -1; __constant__ int ydim1_calc_dt_kernel; int ydim1_calc_dt_kernel_h = -1; __constant__ int xdim2_calc_dt_kernel; int xdim2_calc_dt_kernel_h = -1; __constant__ int ydim2_calc_dt_kernel; int ydim2_calc_dt_kernel_h = -1; __constant__ int xdim3_calc_dt_kernel; int xdim3_calc_dt_kernel_h = -1; __constant__ int ydim3_calc_dt_kernel; int ydim3_calc_dt_kernel_h = -1; __constant__ int xdim4_calc_dt_kernel; int xdim4_calc_dt_kernel_h = -1; __constant__ int ydim4_calc_dt_kernel; int ydim4_calc_dt_kernel_h = -1; __constant__ int xdim5_calc_dt_kernel; int xdim5_calc_dt_kernel_h = -1; __constant__ int ydim5_calc_dt_kernel; int ydim5_calc_dt_kernel_h = -1; __constant__ int xdim6_calc_dt_kernel; int xdim6_calc_dt_kernel_h = -1; __constant__ int ydim6_calc_dt_kernel; int ydim6_calc_dt_kernel_h = -1; __constant__ int xdim7_calc_dt_kernel; int xdim7_calc_dt_kernel_h = -1; __constant__ int ydim7_calc_dt_kernel; int ydim7_calc_dt_kernel_h = -1; __constant__ int xdim8_calc_dt_kernel; int xdim8_calc_dt_kernel_h = -1; __constant__ int ydim8_calc_dt_kernel; int ydim8_calc_dt_kernel_h = -1; __constant__ int xdim9_calc_dt_kernel; int xdim9_calc_dt_kernel_h = -1; __constant__ int ydim9_calc_dt_kernel; int ydim9_calc_dt_kernel_h = -1; __constant__ int xdim10_calc_dt_kernel; int xdim10_calc_dt_kernel_h = -1; __constant__ int ydim10_calc_dt_kernel; int ydim10_calc_dt_kernel_h = -1; __constant__ int xdim11_calc_dt_kernel; int xdim11_calc_dt_kernel_h = -1; __constant__ int ydim11_calc_dt_kernel; int ydim11_calc_dt_kernel_h = -1; __constant__ int xdim12_calc_dt_kernel; int xdim12_calc_dt_kernel_h = -1; __constant__ int ydim12_calc_dt_kernel; int ydim12_calc_dt_kernel_h = -1; __constant__ int xdim13_calc_dt_kernel; int xdim13_calc_dt_kernel_h = -1; __constant__ int ydim13_calc_dt_kernel; int ydim13_calc_dt_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 #define OPS_ACC0(x, y, z) \ (x + xdim0_calc_dt_kernel * (y) + \ xdim0_calc_dt_kernel * ydim0_calc_dt_kernel * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_calc_dt_kernel * (y) + \ xdim1_calc_dt_kernel * ydim1_calc_dt_kernel * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_calc_dt_kernel * (y) + \ xdim2_calc_dt_kernel * ydim2_calc_dt_kernel * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_calc_dt_kernel * (y) + \ xdim3_calc_dt_kernel * ydim3_calc_dt_kernel * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_calc_dt_kernel * (y) + \ xdim4_calc_dt_kernel * ydim4_calc_dt_kernel * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_calc_dt_kernel * (y) + \ xdim5_calc_dt_kernel * ydim5_calc_dt_kernel * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_calc_dt_kernel * (y) + \ xdim6_calc_dt_kernel * ydim6_calc_dt_kernel * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_calc_dt_kernel * (y) + \ xdim7_calc_dt_kernel * ydim7_calc_dt_kernel * (z)) #define OPS_ACC8(x, y, z) \ (x + xdim8_calc_dt_kernel * (y) + \ xdim8_calc_dt_kernel * ydim8_calc_dt_kernel * (z)) #define OPS_ACC9(x, y, z) \ (x + xdim9_calc_dt_kernel * (y) + \ xdim9_calc_dt_kernel * ydim9_calc_dt_kernel * (z)) #define OPS_ACC10(x, y, z) \ (x + xdim10_calc_dt_kernel * (y) + \ xdim10_calc_dt_kernel * ydim10_calc_dt_kernel * (z)) #define OPS_ACC11(x, y, z) \ (x + xdim11_calc_dt_kernel * (y) + \ xdim11_calc_dt_kernel * ydim11_calc_dt_kernel * (z)) #define OPS_ACC12(x, y, z) \ (x + xdim12_calc_dt_kernel * (y) + \ xdim12_calc_dt_kernel * ydim12_calc_dt_kernel * (z)) #define OPS_ACC13(x, y, z) \ (x + xdim13_calc_dt_kernel * (y) + \ xdim13_calc_dt_kernel * ydim13_calc_dt_kernel * (z)) // user function __device__ void calc_dt_kernel_gpu(const double *celldx, const double *celldy, const double *soundspeed, const double *viscosity, const double *density0, const double *xvel0, const double *xarea, const double *volume, const double *yvel0, const double *yarea, double *dt_min, const double *celldz, const double *zvel0, const double *zarea) { double div, ds, dtut, dtvt, dtct, dtwt, dtdivt, cc, dv1, dv2, du1, du2, dw1, dw2; ds = MIN(MIN(celldx[OPS_ACC0(0, 0, 0)], celldy[OPS_ACC1(0, 0, 0)]), celldz[OPS_ACC11(0, 0, 0)]); ds = 1.0 / (ds * ds); cc = soundspeed[OPS_ACC2(0, 0, 0)] * soundspeed[OPS_ACC2(0, 0, 0)]; cc = cc + 2.0 * viscosity[OPS_ACC3(0, 0, 0)] / density0[OPS_ACC4(0, 0, 0)]; dtct = ds * cc; dtct = dtc_safe * 1.0 / MAX(sqrt(dtct), g_small); du1 = (xvel0[OPS_ACC5(0, 0, 0)] + xvel0[OPS_ACC5(0, 1, 0)] + xvel0[OPS_ACC5(0, 0, 1)] + xvel0[OPS_ACC5(0, 1, 1)]) * xarea[OPS_ACC6(0, 0, 0)]; du2 = (xvel0[OPS_ACC5(1, 0, 0)] + xvel0[OPS_ACC5(1, 1, 0)] + xvel0[OPS_ACC5(1, 0, 1)] + xvel0[OPS_ACC5(1, 1, 1)]) * xarea[OPS_ACC6(0, 0, 0)]; dtut = dtu_safe * 4.0 * volume[OPS_ACC7(0, 0, 0)] / MAX(MAX(fabs(du1), fabs(du2)), 1.0e-5 * volume[OPS_ACC7(0, 0, 0)]); dv1 = (yvel0[OPS_ACC8(0, 0, 0)] + yvel0[OPS_ACC8(1, 0, 0)] + yvel0[OPS_ACC8(0, 0, 1)] + yvel0[OPS_ACC8(1, 0, 1)]) * yarea[OPS_ACC9(0, 0, 0)]; dv2 = (yvel0[OPS_ACC8(0, 1, 0)] + yvel0[OPS_ACC8(1, 1, 0)] + yvel0[OPS_ACC8(0, 1, 1)] + yvel0[OPS_ACC8(1, 1, 1)]) * yarea[OPS_ACC9(0, 0, 0)]; dtvt = dtv_safe * 4.0 * volume[OPS_ACC7(0, 0, 0)] / MAX(MAX(fabs(dv1), fabs(dv2)), 1.0e-5 * volume[OPS_ACC7(0, 0, 0)]); dw1 = (zvel0[OPS_ACC12(0, 0, 0)] + zvel0[OPS_ACC12(0, 1, 0)] + zvel0[OPS_ACC12(1, 0, 0)] + zvel0[OPS_ACC12(1, 1, 0)]) * zarea[OPS_ACC13(0, 0, 0)]; dw2 = (zvel0[OPS_ACC12(0, 0, 1)] + zvel0[OPS_ACC12(0, 1, 1)] + zvel0[OPS_ACC12(1, 0, 1)] + zvel0[OPS_ACC12(1, 1, 1)]) * zarea[OPS_ACC13(0, 0, 0)]; dtwt = dtw_safe * 4.0 * volume[OPS_ACC7(0, 0, 0)] / MAX(MAX(fabs(dw1), fabs(dw2)), 1.0e-5 * volume[OPS_ACC7(0, 0, 0)]); div = du2 - du1 + dv2 - dv1 + dw2 - dw1; dtdivt = dtdiv_safe * 4.0 * (volume[OPS_ACC7(0, 0, 0)]) / MAX(volume[OPS_ACC7(0, 0, 0)] * 1.0e-05, fabs(div)); dt_min[OPS_ACC10(0, 0, 0)] = MIN(MIN(MIN(dtct, dtut), MIN(dtvt, dtdivt)), dtwt); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 __global__ void ops_calc_dt_kernel(const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, const double *__restrict arg6, const double *__restrict arg7, const double *__restrict arg8, const double *__restrict arg9, double *__restrict arg10, const double *__restrict arg11, const double *__restrict arg12, const double *__restrict arg13, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_calc_dt_kernel + idx_z * 0 * 1 * xdim0_calc_dt_kernel * ydim0_calc_dt_kernel; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel + idx_z * 0 * 1 * xdim1_calc_dt_kernel * ydim1_calc_dt_kernel; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_calc_dt_kernel + idx_z * 1 * 1 * xdim2_calc_dt_kernel * ydim2_calc_dt_kernel; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_calc_dt_kernel + idx_z * 1 * 1 * xdim3_calc_dt_kernel * ydim3_calc_dt_kernel; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_calc_dt_kernel + idx_z * 1 * 1 * xdim4_calc_dt_kernel * ydim4_calc_dt_kernel; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_calc_dt_kernel + idx_z * 1 * 1 * xdim5_calc_dt_kernel * ydim5_calc_dt_kernel; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_calc_dt_kernel + idx_z * 1 * 1 * xdim6_calc_dt_kernel * ydim6_calc_dt_kernel; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_calc_dt_kernel + idx_z * 1 * 1 * xdim7_calc_dt_kernel * ydim7_calc_dt_kernel; arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_calc_dt_kernel + idx_z * 1 * 1 * xdim8_calc_dt_kernel * ydim8_calc_dt_kernel; arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_calc_dt_kernel + idx_z * 1 * 1 * xdim9_calc_dt_kernel * ydim9_calc_dt_kernel; arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_calc_dt_kernel + idx_z * 1 * 1 * xdim10_calc_dt_kernel * ydim10_calc_dt_kernel; arg11 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim11_calc_dt_kernel + idx_z * 1 * 1 * xdim11_calc_dt_kernel * ydim11_calc_dt_kernel; arg12 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim12_calc_dt_kernel + idx_z * 1 * 1 * xdim12_calc_dt_kernel * ydim12_calc_dt_kernel; arg13 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim13_calc_dt_kernel + idx_z * 1 * 1 * xdim13_calc_dt_kernel * ydim13_calc_dt_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { calc_dt_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); } } // host stub function void ops_par_loop_calc_dt_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) { // Timing double t1, t2, c1, c2; ops_arg args[14] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 14, range, 37)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(37, "calc_dt_kernel"); OPS_kernels[37].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]; int ydim13 = args[13].dat->size[1]; if (xdim0 != xdim0_calc_dt_kernel_h || ydim0 != ydim0_calc_dt_kernel_h || xdim1 != xdim1_calc_dt_kernel_h || ydim1 != ydim1_calc_dt_kernel_h || xdim2 != xdim2_calc_dt_kernel_h || ydim2 != ydim2_calc_dt_kernel_h || xdim3 != xdim3_calc_dt_kernel_h || ydim3 != ydim3_calc_dt_kernel_h || xdim4 != xdim4_calc_dt_kernel_h || ydim4 != ydim4_calc_dt_kernel_h || xdim5 != xdim5_calc_dt_kernel_h || ydim5 != ydim5_calc_dt_kernel_h || xdim6 != xdim6_calc_dt_kernel_h || ydim6 != ydim6_calc_dt_kernel_h || xdim7 != xdim7_calc_dt_kernel_h || ydim7 != ydim7_calc_dt_kernel_h || xdim8 != xdim8_calc_dt_kernel_h || ydim8 != ydim8_calc_dt_kernel_h || xdim9 != xdim9_calc_dt_kernel_h || ydim9 != ydim9_calc_dt_kernel_h || xdim10 != xdim10_calc_dt_kernel_h || ydim10 != ydim10_calc_dt_kernel_h || xdim11 != xdim11_calc_dt_kernel_h || ydim11 != ydim11_calc_dt_kernel_h || xdim12 != xdim12_calc_dt_kernel_h || ydim12 != ydim12_calc_dt_kernel_h || xdim13 != xdim13_calc_dt_kernel_h || ydim13 != ydim13_calc_dt_kernel_h) { cudaMemcpyToSymbol(xdim0_calc_dt_kernel, &xdim0, sizeof(int)); xdim0_calc_dt_kernel_h = xdim0; cudaMemcpyToSymbol(ydim0_calc_dt_kernel, &ydim0, sizeof(int)); ydim0_calc_dt_kernel_h = ydim0; cudaMemcpyToSymbol(xdim1_calc_dt_kernel, &xdim1, sizeof(int)); xdim1_calc_dt_kernel_h = xdim1; cudaMemcpyToSymbol(ydim1_calc_dt_kernel, &ydim1, sizeof(int)); ydim1_calc_dt_kernel_h = ydim1; cudaMemcpyToSymbol(xdim2_calc_dt_kernel, &xdim2, sizeof(int)); xdim2_calc_dt_kernel_h = xdim2; cudaMemcpyToSymbol(ydim2_calc_dt_kernel, &ydim2, sizeof(int)); ydim2_calc_dt_kernel_h = ydim2; cudaMemcpyToSymbol(xdim3_calc_dt_kernel, &xdim3, sizeof(int)); xdim3_calc_dt_kernel_h = xdim3; cudaMemcpyToSymbol(ydim3_calc_dt_kernel, &ydim3, sizeof(int)); ydim3_calc_dt_kernel_h = ydim3; cudaMemcpyToSymbol(xdim4_calc_dt_kernel, &xdim4, sizeof(int)); xdim4_calc_dt_kernel_h = xdim4; cudaMemcpyToSymbol(ydim4_calc_dt_kernel, &ydim4, sizeof(int)); ydim4_calc_dt_kernel_h = ydim4; cudaMemcpyToSymbol(xdim5_calc_dt_kernel, &xdim5, sizeof(int)); xdim5_calc_dt_kernel_h = xdim5; cudaMemcpyToSymbol(ydim5_calc_dt_kernel, &ydim5, sizeof(int)); ydim5_calc_dt_kernel_h = ydim5; cudaMemcpyToSymbol(xdim6_calc_dt_kernel, &xdim6, sizeof(int)); xdim6_calc_dt_kernel_h = xdim6; cudaMemcpyToSymbol(ydim6_calc_dt_kernel, &ydim6, sizeof(int)); ydim6_calc_dt_kernel_h = ydim6; cudaMemcpyToSymbol(xdim7_calc_dt_kernel, &xdim7, sizeof(int)); xdim7_calc_dt_kernel_h = xdim7; cudaMemcpyToSymbol(ydim7_calc_dt_kernel, &ydim7, sizeof(int)); ydim7_calc_dt_kernel_h = ydim7; cudaMemcpyToSymbol(xdim8_calc_dt_kernel, &xdim8, sizeof(int)); xdim8_calc_dt_kernel_h = xdim8; cudaMemcpyToSymbol(ydim8_calc_dt_kernel, &ydim8, sizeof(int)); ydim8_calc_dt_kernel_h = ydim8; cudaMemcpyToSymbol(xdim9_calc_dt_kernel, &xdim9, sizeof(int)); xdim9_calc_dt_kernel_h = xdim9; cudaMemcpyToSymbol(ydim9_calc_dt_kernel, &ydim9, sizeof(int)); ydim9_calc_dt_kernel_h = ydim9; cudaMemcpyToSymbol(xdim10_calc_dt_kernel, &xdim10, sizeof(int)); xdim10_calc_dt_kernel_h = xdim10; cudaMemcpyToSymbol(ydim10_calc_dt_kernel, &ydim10, sizeof(int)); ydim10_calc_dt_kernel_h = ydim10; cudaMemcpyToSymbol(xdim11_calc_dt_kernel, &xdim11, sizeof(int)); xdim11_calc_dt_kernel_h = xdim11; cudaMemcpyToSymbol(ydim11_calc_dt_kernel, &ydim11, sizeof(int)); ydim11_calc_dt_kernel_h = ydim11; cudaMemcpyToSymbol(xdim12_calc_dt_kernel, &xdim12, sizeof(int)); xdim12_calc_dt_kernel_h = xdim12; cudaMemcpyToSymbol(ydim12_calc_dt_kernel, &ydim12, sizeof(int)); ydim12_calc_dt_kernel_h = ydim12; cudaMemcpyToSymbol(xdim13_calc_dt_kernel, &xdim13, sizeof(int)); xdim13_calc_dt_kernel_h = xdim13; cudaMemcpyToSymbol(ydim13_calc_dt_kernel, &ydim13, sizeof(int)); ydim13_calc_dt_kernel_h = ydim13; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; int dat11 = args[11].dat->elem_size; int dat12 = args[12].dat->elem_size; int dat13 = args[13].dat->elem_size; char *p_a[14]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8 + dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8 + dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9 + dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9 + dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10 + dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10 + dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d] + OPS_sub_dat_list[args[11].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d]; #endif int base11 = dat11 * 1 * (start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]); base11 = base11 + dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]); base11 = base11 + dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]); p_a[11] = (char *)args[11].data_d + base11; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d]; #endif int base12 = dat12 * 1 * (start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]); base12 = base12 + dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]); base12 = base12 + dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]); p_a[12] = (char *)args[12].data_d + base12; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d]; #endif int base13 = dat13 * 1 * (start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]); base13 = base13 + dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]); base13 = base13 + dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]); p_a[13] = (char *)args[13].data_d + base13; ops_H_D_exchanges_device(args, 14); ops_halo_exchanges(args, 14, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[37].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_calc_dt_kernel<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[37].time += t1 - t2; } ops_set_dirtybit_device(args, 14); ops_set_halo_dirtybit3(&args[10], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[37].mpi_time += t2 - t1; OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg7); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg8); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg9); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg10); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg11); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg12); OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg13); } }
9eb5e7536770e8a1cbebc4acf9bba00e75531d47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Kernels.cu is the implementation of the kernels */ #include "kernels.h" __global__ void reduction(int* signal_d, int n) { int* smem = SharedMemory<int>(); // load shared memory int tidx = threadIdx.x; int idx = blockIdx.x*blockDim.x + tidx; if(idx < n) { smem[tidx] = signal_d[idx]; } else { smem[tidx] = 0; } __syncthreads(); //perform reduction for(unsigned int s=blockDim.x/2; s > 0; s>>=1){ if(tidx < s) { smem[tidx] += smem[tidx + s]; } __syncthreads(); } //assign the result if(tidx == 0){ signal_d[blockIdx.x] = smem[0]; } return; }
9eb5e7536770e8a1cbebc4acf9bba00e75531d47.cu
/* * Kernels.cu is the implementation of the kernels */ #include "kernels.h" __global__ void reduction(int* signal_d, int n) { int* smem = SharedMemory<int>(); // load shared memory int tidx = threadIdx.x; int idx = blockIdx.x*blockDim.x + tidx; if(idx < n) { smem[tidx] = signal_d[idx]; } else { smem[tidx] = 0; } __syncthreads(); //perform reduction for(unsigned int s=blockDim.x/2; s > 0; s>>=1){ if(tidx < s) { smem[tidx] += smem[tidx + s]; } __syncthreads(); } //assign the result if(tidx == 0){ signal_d[blockIdx.x] = smem[0]; } return; }
08bf2c1e7354803262282f8c8b716f8e7f765121.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #ifndef assert #define assert(e) \ if (!(e)) { \ printf("failed assertion `%s'\n", #e); \ THError("aborting..."); \ }; #endif #define MIN(a,b) (a) < (b) ? (a) : (b) #define MAX(a,b) (a) > (b) ? (a) : (b) __global__ void maxPool(float *ptrinput, float *ptroutput, const int isize1, const int isize2, const int outsize1, const int outsize2, const int nOutputPlane, const int poolH, const int poolW, const int pooldH, const int pooldW, const int batchsize) { // each thread does a pixel of the output const int pixi = blockIdx.x; const int pixj = blockIdx.y; const int bidx = blockIdx.z*blockDim.z+threadIdx.z; if(bidx>=batchsize) return; int i,j,k; // move pointers ptrinput += (pixi * pooldH * isize2 + pixj * pooldW) * nOutputPlane + bidx*isize1*isize2*nOutputPlane; ptroutput += (pixi * outsize2 + pixj) * nOutputPlane + bidx*outsize1*outsize2*nOutputPlane; const int stridej = nOutputPlane; const int stridei = (isize2 - poolW) * nOutputPlane; // const int stridek = (isize1 - poolH) * isize2 * nOutputPlane; float * ptrinputsave = ptrinput; for(k=threadIdx.x; k<nOutputPlane; k+=blockDim.x) { float out=-2e38; for(i=0; i<poolH; i++) { for(j=0; j<poolW; j++) { out=MAX(out, ptrinput[k]); ptrinput += stridej; } ptrinput += stridei; } ptroutput[k]=out; ptrinput =ptrinputsave; } } __global__ void maxPoolBackward(float *ptrinput, float *ptroutput, float *ptrgradinput, float *ptrgradoutput, const int isize1, const int isize2, const int outsize1, const int outsize2, const int nOutputPlane, const int poolH, const int poolW, const int pooldH, const int pooldW, const int batchsize) { // this one is a bit tricky : we have to add up the gradient if the pooling overlaps... // so each block (each thread ?) will do one pixel of the input... // 1) find which outputs are related to the input // 2) go const int pixi = blockIdx.x; const int pixj = blockIdx.y; const int bidx = blockIdx.z*blockDim.z+threadIdx.z; __shared__ int _imin, _jmin, _imax, _jmax; int imin, jmin, imax, jmax; if(threadIdx.z==0) { imin=(pixi - (poolH - 1) + (pooldH -1))/pooldH > 0 ? (pixi - (poolH - 1) + (pooldH -1))/pooldH : 0 ; jmin=(pixj - (poolW - 1) + (pooldW -1))/pooldW > 0 ? (pixj - (poolW - 1) + (pooldW -1))/pooldW : 0 ; imax= pixi / pooldH < outsize1 ? pixi / pooldH : outsize1 - 1 ; jmax= pixj / pooldW < outsize2 ? pixj / pooldW : outsize2 - 1 ; if(threadIdx.x==0) { _imin=imin; _jmin=jmin; _imax=imax; _jmax=jmax; } } __syncthreads(); if(bidx>=batchsize) return; if(threadIdx.z>0) { if(threadIdx.x==0) { imin=_imin; jmin=_jmin; imax=_imax; jmax=_jmax; } imin=__shfl(imin,0); jmin=__shfl(jmin,0); imax=__shfl(imax,0); jmax=__shfl(jmax,0); } int i,j,k; // move pointers ptrinput += (pixi * isize2 + pixj) * nOutputPlane + bidx*isize1*isize2*nOutputPlane ; ptrgradinput += (pixi * isize2 + pixj) * nOutputPlane + bidx*isize1*isize2*nOutputPlane ; ptroutput += (imin * outsize2 + jmin) * nOutputPlane + bidx*outsize1*outsize2*nOutputPlane ; ptrgradoutput += (imin * outsize2 + jmin) * nOutputPlane + bidx*outsize1*outsize2*nOutputPlane ; float * ptroutputsave = ptroutput; float * ptrgradoutputsave = ptrgradoutput; const int stridej = nOutputPlane; const int stridei = (outsize2 -jmax+jmin-1) * nOutputPlane; // const int stridek = (imax+imin-1 ) * outsize2 * nOutputPlane; // this one just brings the pointer back to where it was... for(k=threadIdx.x; k<nOutputPlane; k+=blockDim.x) { float pixvalue=ptrinput[k]; float gradinputvalue=0; for(i=imin; i<imax+1; i++) { for(j=jmin; j<jmax+1; j++) { float out=ptroutput[k]; if(pixvalue==out) { // ptrgradinput[k*blk+tidx] += ptrgradoutput[k*blk+tidx]; gradinputvalue += ptrgradoutput[k]; } ptroutput += stridej; ptrgradoutput += stridej; } ptroutput += stridei; ptrgradoutput += stridei; } ptrgradinput[k]=gradinputvalue; ptroutput = ptroutputsave; ptrgradoutput = ptrgradoutputsave; } } static int cunxn_SpatialMaxPooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); long poolW = luaT_getfieldcheckint(L, 1, "poolW"); long poolH = luaT_getfieldcheckint(L, 1, "poolH"); long dW = luaT_getfieldcheckint(L, 1, "dW"); long dH = luaT_getfieldcheckint(L, 1, "dH"); // input should be contiguous already but... well. input = THCudaTensor_newContiguous(state, input); // find the size of kernelslices long bs = input->size[0]; long isize1 = input->size[1]; long isize2 = input->size[2]; long isize3 = input->size[3]; //assert(isize3%32 == 0); long outsize1 = (isize1 - poolH) / dH + 1; long outsize2 = (isize2 - poolW) / dW + 1; THCudaTensor_resize4d(state, output, bs, outsize1, outsize2, isize3); float* ptroutput = THCudaTensor_data(state, output); float* ptrinput = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks (outsize1, outsize2, (bs+3)/4); dim3 threads (32,1,4); hipLaunchKernelGGL(( maxPool), dim3(blocks),dim3(threads), 0, 0, ptrinput, ptroutput, isize1, isize2, outsize1, outsize2, isize3, poolH, poolW, dH, dW, bs); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in maxPool: %s\n", hipGetErrorString(err)); THError("aborting"); } // final cut: THCudaTensor_free(state, input); //THCudaTensor_select(output, NULL, dimension, 0); return 1; } static int cunxn_SpatialMaxPooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); long poolW = luaT_getfieldcheckint(L, 1, "poolW"); long poolH = luaT_getfieldcheckint(L, 1, "poolH"); long bs = input->size[0]; long isize1 = input->size[1]; long isize2 = input->size[2]; long isize3 = input->size[3]; long outsize1 = output->size[1]; long outsize2 = output->size[2]; THCudaTensor_resizeAs(state, gradInput, input); dim3 blocks (isize1, isize2, (bs+7)/8); dim3 threads (32,1,8); float* ptroutput = THCudaTensor_data(state, output); float* ptrinput = THCudaTensor_data(state, input); float* ptrgradoutput = THCudaTensor_data(state, gradOutput); float* ptrgradinput = THCudaTensor_data(state, gradInput); hipLaunchKernelGGL(( maxPoolBackward) , dim3(blocks),dim3(threads), 0, 0, ptrinput, ptroutput, ptrgradinput, ptrgradoutput, isize1, isize2, outsize1, outsize2, isize3, poolH, poolW, dH, dW, bs); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in maxPoolBackward: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunxn_SpatialMaxPoolingBHWD__ [] = { {"SpatialMaxPoolingBHWD_updateOutput", cunxn_SpatialMaxPooling_updateOutput}, {"SpatialMaxPoolingBHWD_updateGradInput", cunxn_SpatialMaxPooling_updateGradInput}, {NULL, NULL} }; static void cunxn_SpatialMaxPoolingBHWD_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunxn_SpatialMaxPoolingBHWD__, "nn"); lua_pop(L,1); }
08bf2c1e7354803262282f8c8b716f8e7f765121.cu
#include "utils.h" #ifndef assert #define assert(e) \ if (!(e)) { \ printf("failed assertion `%s'\n", #e); \ THError("aborting..."); \ }; #endif #define MIN(a,b) (a) < (b) ? (a) : (b) #define MAX(a,b) (a) > (b) ? (a) : (b) __global__ void maxPool(float *ptrinput, float *ptroutput, const int isize1, const int isize2, const int outsize1, const int outsize2, const int nOutputPlane, const int poolH, const int poolW, const int pooldH, const int pooldW, const int batchsize) { // each thread does a pixel of the output const int pixi = blockIdx.x; const int pixj = blockIdx.y; const int bidx = blockIdx.z*blockDim.z+threadIdx.z; if(bidx>=batchsize) return; int i,j,k; // move pointers ptrinput += (pixi * pooldH * isize2 + pixj * pooldW) * nOutputPlane + bidx*isize1*isize2*nOutputPlane; ptroutput += (pixi * outsize2 + pixj) * nOutputPlane + bidx*outsize1*outsize2*nOutputPlane; const int stridej = nOutputPlane; const int stridei = (isize2 - poolW) * nOutputPlane; // const int stridek = (isize1 - poolH) * isize2 * nOutputPlane; float * ptrinputsave = ptrinput; for(k=threadIdx.x; k<nOutputPlane; k+=blockDim.x) { float out=-2e38; for(i=0; i<poolH; i++) { for(j=0; j<poolW; j++) { out=MAX(out, ptrinput[k]); ptrinput += stridej; } ptrinput += stridei; } ptroutput[k]=out; ptrinput =ptrinputsave; } } __global__ void maxPoolBackward(float *ptrinput, float *ptroutput, float *ptrgradinput, float *ptrgradoutput, const int isize1, const int isize2, const int outsize1, const int outsize2, const int nOutputPlane, const int poolH, const int poolW, const int pooldH, const int pooldW, const int batchsize) { // this one is a bit tricky : we have to add up the gradient if the pooling overlaps... // so each block (each thread ?) will do one pixel of the input... // 1) find which outputs are related to the input // 2) go const int pixi = blockIdx.x; const int pixj = blockIdx.y; const int bidx = blockIdx.z*blockDim.z+threadIdx.z; __shared__ int _imin, _jmin, _imax, _jmax; int imin, jmin, imax, jmax; if(threadIdx.z==0) { imin=(pixi - (poolH - 1) + (pooldH -1))/pooldH > 0 ? (pixi - (poolH - 1) + (pooldH -1))/pooldH : 0 ; jmin=(pixj - (poolW - 1) + (pooldW -1))/pooldW > 0 ? (pixj - (poolW - 1) + (pooldW -1))/pooldW : 0 ; imax= pixi / pooldH < outsize1 ? pixi / pooldH : outsize1 - 1 ; jmax= pixj / pooldW < outsize2 ? pixj / pooldW : outsize2 - 1 ; if(threadIdx.x==0) { _imin=imin; _jmin=jmin; _imax=imax; _jmax=jmax; } } __syncthreads(); if(bidx>=batchsize) return; if(threadIdx.z>0) { if(threadIdx.x==0) { imin=_imin; jmin=_jmin; imax=_imax; jmax=_jmax; } imin=__shfl(imin,0); jmin=__shfl(jmin,0); imax=__shfl(imax,0); jmax=__shfl(jmax,0); } int i,j,k; // move pointers ptrinput += (pixi * isize2 + pixj) * nOutputPlane + bidx*isize1*isize2*nOutputPlane ; ptrgradinput += (pixi * isize2 + pixj) * nOutputPlane + bidx*isize1*isize2*nOutputPlane ; ptroutput += (imin * outsize2 + jmin) * nOutputPlane + bidx*outsize1*outsize2*nOutputPlane ; ptrgradoutput += (imin * outsize2 + jmin) * nOutputPlane + bidx*outsize1*outsize2*nOutputPlane ; float * ptroutputsave = ptroutput; float * ptrgradoutputsave = ptrgradoutput; const int stridej = nOutputPlane; const int stridei = (outsize2 -jmax+jmin-1) * nOutputPlane; // const int stridek = (imax+imin-1 ) * outsize2 * nOutputPlane; // this one just brings the pointer back to where it was... for(k=threadIdx.x; k<nOutputPlane; k+=blockDim.x) { float pixvalue=ptrinput[k]; float gradinputvalue=0; for(i=imin; i<imax+1; i++) { for(j=jmin; j<jmax+1; j++) { float out=ptroutput[k]; if(pixvalue==out) { // ptrgradinput[k*blk+tidx] += ptrgradoutput[k*blk+tidx]; gradinputvalue += ptrgradoutput[k]; } ptroutput += stridej; ptrgradoutput += stridej; } ptroutput += stridei; ptrgradoutput += stridei; } ptrgradinput[k]=gradinputvalue; ptroutput = ptroutputsave; ptrgradoutput = ptrgradoutputsave; } } static int cunxn_SpatialMaxPooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); long poolW = luaT_getfieldcheckint(L, 1, "poolW"); long poolH = luaT_getfieldcheckint(L, 1, "poolH"); long dW = luaT_getfieldcheckint(L, 1, "dW"); long dH = luaT_getfieldcheckint(L, 1, "dH"); // input should be contiguous already but... well. input = THCudaTensor_newContiguous(state, input); // find the size of kernelslices long bs = input->size[0]; long isize1 = input->size[1]; long isize2 = input->size[2]; long isize3 = input->size[3]; //assert(isize3%32 == 0); long outsize1 = (isize1 - poolH) / dH + 1; long outsize2 = (isize2 - poolW) / dW + 1; THCudaTensor_resize4d(state, output, bs, outsize1, outsize2, isize3); float* ptroutput = THCudaTensor_data(state, output); float* ptrinput = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks (outsize1, outsize2, (bs+3)/4); dim3 threads (32,1,4); maxPool<<<blocks,threads>>>(ptrinput, ptroutput, isize1, isize2, outsize1, outsize2, isize3, poolH, poolW, dH, dW, bs); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in maxPool: %s\n", cudaGetErrorString(err)); THError("aborting"); } // final cut: THCudaTensor_free(state, input); //THCudaTensor_select(output, NULL, dimension, 0); return 1; } static int cunxn_SpatialMaxPooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); long poolW = luaT_getfieldcheckint(L, 1, "poolW"); long poolH = luaT_getfieldcheckint(L, 1, "poolH"); long bs = input->size[0]; long isize1 = input->size[1]; long isize2 = input->size[2]; long isize3 = input->size[3]; long outsize1 = output->size[1]; long outsize2 = output->size[2]; THCudaTensor_resizeAs(state, gradInput, input); dim3 blocks (isize1, isize2, (bs+7)/8); dim3 threads (32,1,8); float* ptroutput = THCudaTensor_data(state, output); float* ptrinput = THCudaTensor_data(state, input); float* ptrgradoutput = THCudaTensor_data(state, gradOutput); float* ptrgradinput = THCudaTensor_data(state, gradInput); maxPoolBackward <<<blocks,threads>>>(ptrinput, ptroutput, ptrgradinput, ptrgradoutput, isize1, isize2, outsize1, outsize2, isize3, poolH, poolW, dH, dW, bs); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in maxPoolBackward: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunxn_SpatialMaxPoolingBHWD__ [] = { {"SpatialMaxPoolingBHWD_updateOutput", cunxn_SpatialMaxPooling_updateOutput}, {"SpatialMaxPoolingBHWD_updateGradInput", cunxn_SpatialMaxPooling_updateGradInput}, {NULL, NULL} }; static void cunxn_SpatialMaxPoolingBHWD_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunxn_SpatialMaxPoolingBHWD__, "nn"); lua_pop(L,1); }
28e29d4745ac5dc60b8c112f181fb66e7a0a9edf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Test of DeviceSelect::If and DevicePartition::If utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <typeinfo> #include <thrust/device_ptr.h> #include <thrust/copy.h> #include <thrust/partition.h> #include <thrust/iterator/reverse_iterator.h> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <cub/device/device_partition.cuh> #include <cub/iterator/counting_input_iterator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; int g_timing_iterations = 0; int g_repeat = 0; float g_device_giga_bandwidth; CachingDeviceAllocator g_allocator(true); // Dispatch types enum Backend { CUB, // CUB method THRUST, // Thrust method CDP, // GPU-based (dynamic parallelism) dispatch to CUB method }; // Selection functor type template <typename T> struct LessThan { T compare; __host__ __device__ __forceinline__ LessThan(T compare) : compare(compare) {} __host__ __device__ __forceinline__ bool operator()(const T &a) const { return (a < compare); } }; //--------------------------------------------------------------------- // Dispatch to different CUB DeviceSelect entrypoints //--------------------------------------------------------------------- /** * Dispatch to select if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<false> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, hipStream_t stream, bool debug_synchronous) { hipError_t error = hipSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); } return error; } /** * Dispatch to partition if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<true> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, hipStream_t stream, bool debug_synchronous) { hipError_t error = hipSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); } return error; } /** * Dispatch to select flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<false> /*partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, hipStream_t stream, bool debug_synchronous) { hipError_t error = hipSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to partition flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<true> /*partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, hipStream_t stream, bool debug_synchronous) { hipError_t error = hipSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); } return error; } //--------------------------------------------------------------------- // Dispatch to different Thrust entrypoints //--------------------------------------------------------------------- /** * Dispatch to select if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ hipError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<false> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, hipStream_t /*stream*/, bool /*debug_synchronous*/) { // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::device_ptr<OutputT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op); } OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper); CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice)); } return hipSuccess; } /** * Dispatch to partition if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ hipError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<true> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, hipStream_t /*stream*/, bool /*debug_synchronous*/) { // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT; if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::partition_copy( d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, d_out_unselected, select_op); } OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper); CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice)); } return hipSuccess; } /** * Dispatch to select flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ hipError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<false> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, hipStream_t /*stream*/, bool /*debug_synchronous*/) { // The flag type typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::device_ptr<OutputT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); thrust::device_ptr<FlagT> d_flags_wrapper(d_flags); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, CastOp<bool>()); } OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper); CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice)); } return hipSuccess; } /** * Dispatch to partition flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ hipError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<true> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, hipError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, hipStream_t /*stream*/, bool /*debug_synchronous*/) { // The flag type typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT; if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); thrust::device_ptr<FlagT> d_flags_wrapper(d_flags); ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::partition_copy( d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, d_out_unselected, CastOp<bool>()); } OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper); CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice)); } return hipSuccess; } //--------------------------------------------------------------------- // CUDA Nested Parallelism Test Kernel //--------------------------------------------------------------------- /** * Simple wrapper kernel to invoke DeviceSelect */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag> __global__ void CnpDispatchKernel( IsFlaggedTag is_flagged, IsPartitionTag is_partition, int timing_timing_iterations, size_t* d_temp_storage_bytes, hipError_t* d_cdp_error, void* d_temp_storage, size_t temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, bool debug_synchronous) { #ifndef CUB_CDP (void)is_flagged; (void)is_partition; (void)timing_timing_iterations; (void)d_temp_storage_bytes; (void)d_temp_storage; (void)temp_storage_bytes; (void)d_in; (void)d_flags; (void)d_out; (void)d_num_selected_out; (void)num_items; (void)select_op; (void)debug_synchronous; *d_cdp_error = hipErrorNotSupported; #else *d_cdp_error = Dispatch(Int2Type<CUB>(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous); *d_temp_storage_bytes = temp_storage_bytes; #endif } /** * Dispatch to CDP kernel */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag> hipError_t Dispatch( Int2Type<CDP> dispatch_to, IsFlaggedTag is_flagged, IsPartitionTag is_partition, int timing_timing_iterations, size_t* d_temp_storage_bytes, hipError_t* d_cdp_error, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to invoke device-side dispatch hipLaunchKernelGGL(( CnpDispatchKernel), dim3(1),dim3(1), 0, 0, is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous); // Copy out temp_storage_bytes CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost)); // Copy out error hipError_t retval; CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost)); return retval; } //--------------------------------------------------------------------- // Test generation //--------------------------------------------------------------------- /** * Initialize problem */ template <typename T> void Initialize( T* h_in, int num_items) { for (int i = 0; i < num_items; ++i) { // Initialize each item to a randomly selected value from [0..126] unsigned int value; RandomBits(value, 0, 0, 7); if (value == 127) value = 126; InitValue(INTEGER_SEED, h_in[i], value); } if (g_verbose) { printf("Input:\n"); DisplayResults(h_in, num_items); printf("\n\n"); } } /** * Solve selection problem (and set corresponding flags) */ template < typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename T> int Solve( InputIteratorT h_in, SelectOpT select_op, T* h_reference, FlagIteratorT h_flags, int num_items) { int num_selected = 0; for (int i = 0; i < num_items; ++i) { if ((h_flags[i] = select_op(h_in[i]))) { h_reference[num_selected] = h_in[i]; num_selected++; } else { h_reference[num_items - (i - num_selected) - 1] = h_in[i]; } } return num_selected; } /** * Test DeviceSelect for a given problem input */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename DeviceInputIteratorT, typename FlagT, typename SelectOpT, typename T> void Test( DeviceInputIteratorT d_in, FlagT* h_flags, SelectOpT select_op, T* h_reference, int num_selected, int num_items) { // Allocate device flags, output, and num-selected FlagT* d_flags = NULL; T* d_out = NULL; int* d_num_selected_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); // Allocate CDP device arrays size_t* d_temp_storage_bytes = NULL; hipError_t* d_cdp_error = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1)); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true)); CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Copy flags and clear device output array CubDebugExit(hipMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, hipMemcpyHostToDevice)); CubDebugExit(hipMemset(d_out, 0, sizeof(T) * num_items)); CubDebugExit(hipMemset(d_num_selected_out, 0, sizeof(int))); // Run warmup/correctness iteration CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true)); // Check for correctness (and display results, if specified) int compare1 = (IS_PARTITION) ? CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) : CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); printf("\t Data %s\n", compare1 ? "FAIL" : "PASS"); int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); printf("\t Count %s\n", compare2 ? "FAIL" : "PASS"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Performance GpuTimer gpu_timer; gpu_timer.Start(); CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false)); gpu_timer.Stop(); float elapsed_millis = gpu_timer.ElapsedMillis(); // Display performance if (g_timing_iterations > 0) { float avg_millis = elapsed_millis / g_timing_iterations; float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; int num_output_items = (IS_PARTITION) ? num_items : num_selected; int num_flag_items = (IS_FLAGGED) ? num_items : 0; size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items; float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f; printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); } printf("\n\n"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Cleanup if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); // Correctness asserts AssertEquals(0, compare1 | compare2); } /** * Test on pointer type */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename T> void TestPointer( int num_items, float select_ratio) { typedef char FlagT; // Allocate host arrays T* h_in = new T[num_items]; FlagT* h_flags = new FlagT[num_items]; T* h_reference = new T[num_items]; // Initialize input Initialize(h_in, num_items); // Select a comparison value that is select_ratio through the space of [0,127] T compare; if (select_ratio <= 0.0) InitValue(INTEGER_SEED, compare, 0); // select none else if (select_ratio >= 1.0) InitValue(INTEGER_SEED, compare, 127); // select all else InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio))); LessThan<T> select_op(compare); int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items); if (g_verbose) std::cout << "\nComparison item: " << compare << "\n"; printf("\nPointer %s hipcub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n", (IS_PARTITION) ? "DevicePartition" : "DeviceSelect", (IS_FLAGGED) ? "Flagged" : "If", (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Allocate problem device arrays T *d_in = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items)); // Initialize device input CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice)); // Run Test Test<BACKEND, IS_FLAGGED, IS_PARTITION>(d_in, h_flags, select_op, h_reference, num_selected, num_items); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_flags) delete[] h_flags; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); } /** * Test on iterator type */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename T> void TestIterator( int num_items, float select_ratio) { typedef char FlagT; // Allocate host arrays T* h_reference = new T[num_items]; FlagT* h_flags = new FlagT[num_items]; // Use counting iterator as the input CountingInputIterator<T, int> h_in(0); // Select a comparison value that is select_ratio through the space of [0,127] T compare; if (select_ratio <= 0.0) InitValue(INTEGER_SEED, compare, 0); // select none else if (select_ratio >= 1.0) InitValue(INTEGER_SEED, compare, 127); // select all else InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio))); LessThan<T> select_op(compare); int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items); if (g_verbose) std::cout << "\nComparison item: " << compare << "\n"; printf("\nIterator %s hipcub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n", (IS_PARTITION) ? "DevicePartition" : "DeviceSelect", (IS_FLAGGED) ? "Flagged" : "If", (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Run Test Test<BACKEND, IS_FLAGGED, IS_PARTITION>(h_in, h_flags, select_op, h_reference, num_selected, num_items); // Cleanup if (h_reference) delete[] h_reference; if (h_flags) delete[] h_flags; } /** * Test different selection ratios */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename T> void Test( int num_items) { for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f) { TestPointer<BACKEND, IS_FLAGGED, IS_PARTITION, T>(num_items, select_ratio); } } /** * Test (select vs. partition) and (flagged vs. functor) */ template < Backend BACKEND, typename T> void TestMethod( int num_items) { // Functor Test<BACKEND, false, false, T>(num_items); Test<BACKEND, false, true, T>(num_items); // Flagged Test<BACKEND, true, false, T>(num_items); Test<BACKEND, true, true, T>(num_items); } /** * Test different dispatch */ template < typename T> void TestOp( int num_items) { TestMethod<CUB, T>(num_items); #ifdef CUB_CDP TestMethod<CDP, T>(num_items); #endif } /** * Test different input sizes */ template <typename T> void Test( int num_items) { if (num_items < 0) { TestOp<T>(0); TestOp<T>(1); TestOp<T>(100); TestOp<T>(10000); TestOp<T>(1000000); } else { TestOp<T>(num_items); } } /** * Test select/partition on pointer types */ template <typename T> void ComparePointer( int num_items, float select_ratio) { printf("-- Select-if ----------------------------\n"); TestPointer<CUB, false, false, T>(num_items, select_ratio); TestPointer<THRUST, false, false, T>(num_items, select_ratio); printf("-- Partition-if ----------------------------\n"); TestPointer<CUB, false, true, T>(num_items, select_ratio); TestPointer<THRUST, false, true, T>(num_items, select_ratio); printf("-- Select-flagged ----------------------------\n"); TestPointer<CUB, true, false, T>(num_items, select_ratio); TestPointer<THRUST, true, false, T>(num_items, select_ratio); printf("-- Partition-flagged ----------------------------\n"); TestPointer<CUB, true, true, T>(num_items, select_ratio); TestPointer<THRUST, true, true, T>(num_items, select_ratio); } //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { int num_items = -1; float select_ratio = 0.5; // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("n", num_items); args.GetCmdLineArgument("i", g_timing_iterations); args.GetCmdLineArgument("repeat", g_repeat); args.GetCmdLineArgument("ratio", select_ratio); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items> " "[--i=<timing iterations> " "[--device=<device-id>] " "[--ratio=<selection ratio, default 0.5>] " "[--repeat=<repetitions of entire test suite>] " "[--v] " "[--cdp] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); g_device_giga_bandwidth = args.device_giga_bandwidth; printf("\n"); #ifdef QUICKER_TEST // Compile/run basic CUB test if (num_items < 0) num_items = 32000000; printf("-- Select-if ----------------------------\n"); TestPointer<CUB, false, false, int>(num_items, select_ratio); printf("-- Partition-if ----------------------------\n"); TestPointer<CUB, false, true, int>(num_items, select_ratio); printf("-- Select-flagged ----------------------------\n"); TestPointer<CUB, true, false, int>(num_items, select_ratio); printf("-- Partition-flagged ----------------------------\n"); TestPointer<CUB, true, true, int>(num_items, select_ratio); #elif defined(QUICK_TEST) // Get device ordinal int device_ordinal; CubDebugExit(hipGetDevice(&device_ordinal)); // Get device SM version int sm_version; CubDebugExit(SmVersion(sm_version, device_ordinal)); // Compile/run quick tests if (num_items < 0) num_items = 32000000; printf("-- Iterator ----------------------------\n"); TestIterator<CUB, false, false, int>(num_items, select_ratio); ComparePointer<char>( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio); ComparePointer<short>( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio); ComparePointer<int>( num_items, select_ratio); ComparePointer<long long>( num_items / 2, select_ratio); ComparePointer<TestFoo>( num_items / 4, select_ratio); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test different input types Test<unsigned char>(num_items); Test<unsigned short>(num_items); Test<unsigned int>(num_items); Test<unsigned long long>(num_items); Test<uchar2>(num_items); Test<ushort2>(num_items); Test<uint2>(num_items); Test<ulonglong2>(num_items); Test<uchar4>(num_items); Test<ushort4>(num_items); Test<uint4>(num_items); Test<ulonglong4>(num_items); Test<TestFoo>(num_items); Test<TestBar>(num_items); } #endif return 0; }
28e29d4745ac5dc60b8c112f181fb66e7a0a9edf.cu
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Test of DeviceSelect::If and DevicePartition::If utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <typeinfo> #include <thrust/device_ptr.h> #include <thrust/copy.h> #include <thrust/partition.h> #include <thrust/iterator/reverse_iterator.h> #include <cub/util_allocator.cuh> #include <cub/device/device_select.cuh> #include <cub/device/device_partition.cuh> #include <cub/iterator/counting_input_iterator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; int g_timing_iterations = 0; int g_repeat = 0; float g_device_giga_bandwidth; CachingDeviceAllocator g_allocator(true); // Dispatch types enum Backend { CUB, // CUB method THRUST, // Thrust method CDP, // GPU-based (dynamic parallelism) dispatch to CUB method }; // Selection functor type template <typename T> struct LessThan { T compare; __host__ __device__ __forceinline__ LessThan(T compare) : compare(compare) {} __host__ __device__ __forceinline__ bool operator()(const T &a) const { return (a < compare); } }; //--------------------------------------------------------------------- // Dispatch to different CUB DeviceSelect entrypoints //--------------------------------------------------------------------- /** * Dispatch to select if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<false> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, cudaStream_t stream, bool debug_synchronous) { cudaError_t error = cudaSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); } return error; } /** * Dispatch to partition if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<true> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, cudaStream_t stream, bool debug_synchronous) { cudaError_t error = cudaSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); } return error; } /** * Dispatch to select flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<false> /*partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, cudaStream_t stream, bool debug_synchronous) { cudaError_t error = cudaSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to partition flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<true> /*partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, cudaStream_t stream, bool debug_synchronous) { cudaError_t error = cudaSuccess; for (int i = 0; i < timing_timing_iterations; ++i) { error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); } return error; } //--------------------------------------------------------------------- // Dispatch to different Thrust entrypoints //--------------------------------------------------------------------- /** * Dispatch to select if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ cudaError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<false> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, cudaStream_t /*stream*/, bool /*debug_synchronous*/) { // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::device_ptr<OutputT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op); } OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper); CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); } return cudaSuccess; } /** * Dispatch to partition if entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ cudaError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<false> /*is_flagged*/, Int2Type<true> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT /*d_flags*/, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, cudaStream_t /*stream*/, bool /*debug_synchronous*/) { // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT; if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::partition_copy( d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, d_out_unselected, select_op); } OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper); CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); } return cudaSuccess; } /** * Dispatch to select flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ cudaError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<false> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, cudaStream_t /*stream*/, bool /*debug_synchronous*/) { // The flag type typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::device_ptr<OutputT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); thrust::device_ptr<FlagT> d_flags_wrapper(d_flags); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, CastOp<bool>()); } OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper); CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); } return cudaSuccess; } /** * Dispatch to partition flagged entrypoint */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT> __host__ __forceinline__ cudaError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, Int2Type<true> /*is_flagged*/, Int2Type<true> /*is_partition*/, int timing_timing_iterations, size_t* /*d_temp_storage_bytes*/, cudaError_t* /*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT /*select_op*/, cudaStream_t /*stream*/, bool /*debug_synchronous*/) { // The flag type typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT; if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end; thrust::device_ptr<InputT> d_in_wrapper(d_in); thrust::device_ptr<OutputT> d_out_wrapper(d_out); thrust::device_ptr<FlagT> d_flags_wrapper(d_flags); ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items); for (int i = 0; i < timing_timing_iterations; ++i) { d_out_wrapper_end = thrust::partition_copy( d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, d_out_unselected, CastOp<bool>()); } OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper); CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); } return cudaSuccess; } //--------------------------------------------------------------------- // CUDA Nested Parallelism Test Kernel //--------------------------------------------------------------------- /** * Simple wrapper kernel to invoke DeviceSelect */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag> __global__ void CnpDispatchKernel( IsFlaggedTag is_flagged, IsPartitionTag is_partition, int timing_timing_iterations, size_t* d_temp_storage_bytes, cudaError_t* d_cdp_error, void* d_temp_storage, size_t temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, bool debug_synchronous) { #ifndef CUB_CDP (void)is_flagged; (void)is_partition; (void)timing_timing_iterations; (void)d_temp_storage_bytes; (void)d_temp_storage; (void)temp_storage_bytes; (void)d_in; (void)d_flags; (void)d_out; (void)d_num_selected_out; (void)num_items; (void)select_op; (void)debug_synchronous; *d_cdp_error = cudaErrorNotSupported; #else *d_cdp_error = Dispatch(Int2Type<CUB>(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous); *d_temp_storage_bytes = temp_storage_bytes; #endif } /** * Dispatch to CDP kernel */ template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag> cudaError_t Dispatch( Int2Type<CDP> dispatch_to, IsFlaggedTag is_flagged, IsPartitionTag is_partition, int timing_timing_iterations, size_t* d_temp_storage_bytes, cudaError_t* d_cdp_error, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, FlagIteratorT d_flags, OutputIteratorT d_out, NumSelectedIteratorT d_num_selected_out, OffsetT num_items, SelectOpT select_op, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to invoke device-side dispatch CnpDispatchKernel<<<1,1>>>(is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous); // Copy out temp_storage_bytes CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); // Copy out error cudaError_t retval; CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); return retval; } //--------------------------------------------------------------------- // Test generation //--------------------------------------------------------------------- /** * Initialize problem */ template <typename T> void Initialize( T* h_in, int num_items) { for (int i = 0; i < num_items; ++i) { // Initialize each item to a randomly selected value from [0..126] unsigned int value; RandomBits(value, 0, 0, 7); if (value == 127) value = 126; InitValue(INTEGER_SEED, h_in[i], value); } if (g_verbose) { printf("Input:\n"); DisplayResults(h_in, num_items); printf("\n\n"); } } /** * Solve selection problem (and set corresponding flags) */ template < typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename T> int Solve( InputIteratorT h_in, SelectOpT select_op, T* h_reference, FlagIteratorT h_flags, int num_items) { int num_selected = 0; for (int i = 0; i < num_items; ++i) { if ((h_flags[i] = select_op(h_in[i]))) { h_reference[num_selected] = h_in[i]; num_selected++; } else { h_reference[num_items - (i - num_selected) - 1] = h_in[i]; } } return num_selected; } /** * Test DeviceSelect for a given problem input */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename DeviceInputIteratorT, typename FlagT, typename SelectOpT, typename T> void Test( DeviceInputIteratorT d_in, FlagT* h_flags, SelectOpT select_op, T* h_reference, int num_selected, int num_items) { // Allocate device flags, output, and num-selected FlagT* d_flags = NULL; T* d_out = NULL; int* d_num_selected_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); // Allocate CDP device arrays size_t* d_temp_storage_bytes = NULL; cudaError_t* d_cdp_error = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true)); CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Copy flags and clear device output array CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items)); CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int))); // Run warmup/correctness iteration CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true)); // Check for correctness (and display results, if specified) int compare1 = (IS_PARTITION) ? CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) : CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); printf("\t Data %s\n", compare1 ? "FAIL" : "PASS"); int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); printf("\t Count %s\n", compare2 ? "FAIL" : "PASS"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Performance GpuTimer gpu_timer; gpu_timer.Start(); CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false)); gpu_timer.Stop(); float elapsed_millis = gpu_timer.ElapsedMillis(); // Display performance if (g_timing_iterations > 0) { float avg_millis = elapsed_millis / g_timing_iterations; float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; int num_output_items = (IS_PARTITION) ? num_items : num_selected; int num_flag_items = (IS_FLAGGED) ? num_items : 0; size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items; float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f; printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); } printf("\n\n"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Cleanup if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); // Correctness asserts AssertEquals(0, compare1 | compare2); } /** * Test on pointer type */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename T> void TestPointer( int num_items, float select_ratio) { typedef char FlagT; // Allocate host arrays T* h_in = new T[num_items]; FlagT* h_flags = new FlagT[num_items]; T* h_reference = new T[num_items]; // Initialize input Initialize(h_in, num_items); // Select a comparison value that is select_ratio through the space of [0,127] T compare; if (select_ratio <= 0.0) InitValue(INTEGER_SEED, compare, 0); // select none else if (select_ratio >= 1.0) InitValue(INTEGER_SEED, compare, 127); // select all else InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio))); LessThan<T> select_op(compare); int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items); if (g_verbose) std::cout << "\nComparison item: " << compare << "\n"; printf("\nPointer %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n", (IS_PARTITION) ? "DevicePartition" : "DeviceSelect", (IS_FLAGGED) ? "Flagged" : "If", (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Allocate problem device arrays T *d_in = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items)); // Initialize device input CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice)); // Run Test Test<BACKEND, IS_FLAGGED, IS_PARTITION>(d_in, h_flags, select_op, h_reference, num_selected, num_items); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_flags) delete[] h_flags; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); } /** * Test on iterator type */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename T> void TestIterator( int num_items, float select_ratio) { typedef char FlagT; // Allocate host arrays T* h_reference = new T[num_items]; FlagT* h_flags = new FlagT[num_items]; // Use counting iterator as the input CountingInputIterator<T, int> h_in(0); // Select a comparison value that is select_ratio through the space of [0,127] T compare; if (select_ratio <= 0.0) InitValue(INTEGER_SEED, compare, 0); // select none else if (select_ratio >= 1.0) InitValue(INTEGER_SEED, compare, 127); // select all else InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio))); LessThan<T> select_op(compare); int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items); if (g_verbose) std::cout << "\nComparison item: " << compare << "\n"; printf("\nIterator %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n", (IS_PARTITION) ? "DevicePartition" : "DeviceSelect", (IS_FLAGGED) ? "Flagged" : "If", (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Run Test Test<BACKEND, IS_FLAGGED, IS_PARTITION>(h_in, h_flags, select_op, h_reference, num_selected, num_items); // Cleanup if (h_reference) delete[] h_reference; if (h_flags) delete[] h_flags; } /** * Test different selection ratios */ template < Backend BACKEND, bool IS_FLAGGED, bool IS_PARTITION, typename T> void Test( int num_items) { for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f) { TestPointer<BACKEND, IS_FLAGGED, IS_PARTITION, T>(num_items, select_ratio); } } /** * Test (select vs. partition) and (flagged vs. functor) */ template < Backend BACKEND, typename T> void TestMethod( int num_items) { // Functor Test<BACKEND, false, false, T>(num_items); Test<BACKEND, false, true, T>(num_items); // Flagged Test<BACKEND, true, false, T>(num_items); Test<BACKEND, true, true, T>(num_items); } /** * Test different dispatch */ template < typename T> void TestOp( int num_items) { TestMethod<CUB, T>(num_items); #ifdef CUB_CDP TestMethod<CDP, T>(num_items); #endif } /** * Test different input sizes */ template <typename T> void Test( int num_items) { if (num_items < 0) { TestOp<T>(0); TestOp<T>(1); TestOp<T>(100); TestOp<T>(10000); TestOp<T>(1000000); } else { TestOp<T>(num_items); } } /** * Test select/partition on pointer types */ template <typename T> void ComparePointer( int num_items, float select_ratio) { printf("-- Select-if ----------------------------\n"); TestPointer<CUB, false, false, T>(num_items, select_ratio); TestPointer<THRUST, false, false, T>(num_items, select_ratio); printf("-- Partition-if ----------------------------\n"); TestPointer<CUB, false, true, T>(num_items, select_ratio); TestPointer<THRUST, false, true, T>(num_items, select_ratio); printf("-- Select-flagged ----------------------------\n"); TestPointer<CUB, true, false, T>(num_items, select_ratio); TestPointer<THRUST, true, false, T>(num_items, select_ratio); printf("-- Partition-flagged ----------------------------\n"); TestPointer<CUB, true, true, T>(num_items, select_ratio); TestPointer<THRUST, true, true, T>(num_items, select_ratio); } //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { int num_items = -1; float select_ratio = 0.5; // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("n", num_items); args.GetCmdLineArgument("i", g_timing_iterations); args.GetCmdLineArgument("repeat", g_repeat); args.GetCmdLineArgument("ratio", select_ratio); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items> " "[--i=<timing iterations> " "[--device=<device-id>] " "[--ratio=<selection ratio, default 0.5>] " "[--repeat=<repetitions of entire test suite>] " "[--v] " "[--cdp] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); g_device_giga_bandwidth = args.device_giga_bandwidth; printf("\n"); #ifdef QUICKER_TEST // Compile/run basic CUB test if (num_items < 0) num_items = 32000000; printf("-- Select-if ----------------------------\n"); TestPointer<CUB, false, false, int>(num_items, select_ratio); printf("-- Partition-if ----------------------------\n"); TestPointer<CUB, false, true, int>(num_items, select_ratio); printf("-- Select-flagged ----------------------------\n"); TestPointer<CUB, true, false, int>(num_items, select_ratio); printf("-- Partition-flagged ----------------------------\n"); TestPointer<CUB, true, true, int>(num_items, select_ratio); #elif defined(QUICK_TEST) // Get device ordinal int device_ordinal; CubDebugExit(cudaGetDevice(&device_ordinal)); // Get device SM version int sm_version; CubDebugExit(SmVersion(sm_version, device_ordinal)); // Compile/run quick tests if (num_items < 0) num_items = 32000000; printf("-- Iterator ----------------------------\n"); TestIterator<CUB, false, false, int>(num_items, select_ratio); ComparePointer<char>( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio); ComparePointer<short>( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio); ComparePointer<int>( num_items, select_ratio); ComparePointer<long long>( num_items / 2, select_ratio); ComparePointer<TestFoo>( num_items / 4, select_ratio); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test different input types Test<unsigned char>(num_items); Test<unsigned short>(num_items); Test<unsigned int>(num_items); Test<unsigned long long>(num_items); Test<uchar2>(num_items); Test<ushort2>(num_items); Test<uint2>(num_items); Test<ulonglong2>(num_items); Test<uchar4>(num_items); Test<ushort4>(num_items); Test<uint4>(num_items); Test<ulonglong4>(num_items); Test<TestFoo>(num_items); Test<TestBar>(num_items); } #endif return 0; }
ee7c14e2c40c16713d690429953308165218f16d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include "../../src/exact/fused_scan_reduce_by_key.cuh" #include "../../src/exact/node.cuh" #include "utils.cuh" namespace xgboost { namespace tree { namespace exact { template <typename node_id_t> class ReduceScanByKey: public Generator<node_id_t> { public: ReduceScanByKey(int nc, int nr, int nk, const std::string& tName): Generator<node_id_t>(nc, nr, nk, tName), hSums(nullptr), dSums(nullptr), hScans(nullptr), dScans(nullptr), outSize(this->size), nSegments(this->nKeys*this->nCols), hOffsets(nullptr), dOffsets(nullptr) { hSums = new gpu_gpair[nSegments]; allocateOnGpu<gpu_gpair>(dSums, nSegments); hScans = new gpu_gpair[outSize]; allocateOnGpu<gpu_gpair>(dScans, outSize); gpu_gpair* buckets = new gpu_gpair[nSegments]; for (int i = 0; i < nSegments; i++) { buckets[i] = gpu_gpair(); } for (int i = 0; i < nSegments; i++) { hSums[i] = gpu_gpair(); } for (size_t i = 0; i < this->size; i++) { if (this->hKeys[i] >= 0 && this->hKeys[i] < nSegments) { node_id_t key = abs2uniqKey<node_id_t>(i, this->hKeys, this->hColIds, 0, this->nKeys); hSums[key] += this->hVals[i]; } } for (int i = 0; i < this->size; ++i) { node_id_t key = abs2uniqKey<node_id_t>(i, this->hKeys, this->hColIds, 0, this->nKeys); hScans[i] = buckets[key]; buckets[key] += this->hVals[i]; } // it's a dense matrix that we are currently looking at, so offsets // are nicely aligned! (need not be the case in real datasets) hOffsets = new int[this->nCols]; size_t off = 0; for (int i = 0; i < this->nCols; ++i, off+=this->nRows) { hOffsets[i] = off; } allocateAndUpdateOnGpu<int>(dOffsets, hOffsets, this->nCols); } ~ReduceScanByKey() { delete [] hScans; delete [] hSums; delete [] hOffsets; dh::safe_cuda(hipFree(dScans)); dh::safe_cuda(hipFree(dSums)); dh::safe_cuda(hipFree(dOffsets)); } void run() { gpu_gpair* tmpScans; int* tmpKeys; int tmpSize = scanTempBufferSize(this->size); allocateOnGpu<gpu_gpair>(tmpScans, tmpSize); allocateOnGpu<int>(tmpKeys, tmpSize); TIMEIT(reduceScanByKey<node_id_t> (dSums, dScans, this->dVals, this->dInstIds, this->dKeys, this->size, this->nKeys, this->nCols, tmpScans, tmpKeys, this->dColIds, 0), this->testName); dh::safe_cuda(hipFree(tmpScans)); dh::safe_cuda(hipFree(tmpKeys)); this->compare(hSums, dSums, nSegments); this->compare(hScans, dScans, outSize); } private: gpu_gpair* hSums; gpu_gpair* dSums; gpu_gpair* hScans; gpu_gpair* dScans; int outSize; int nSegments; int* hOffsets; int* dOffsets; }; TEST(ReduceScanByKey, testInt16) { ReduceScanByKey<int16_t>(32, 512, 32, "ReduceScanByKey").run(); } TEST(ReduceScanByKey, testInt32) { ReduceScanByKey<int>(32, 512, 32, "ReduceScanByKey").run(); } } // namespace exact } // namespace tree } // namespace xgboost
ee7c14e2c40c16713d690429953308165218f16d.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include "../../src/exact/fused_scan_reduce_by_key.cuh" #include "../../src/exact/node.cuh" #include "utils.cuh" namespace xgboost { namespace tree { namespace exact { template <typename node_id_t> class ReduceScanByKey: public Generator<node_id_t> { public: ReduceScanByKey(int nc, int nr, int nk, const std::string& tName): Generator<node_id_t>(nc, nr, nk, tName), hSums(nullptr), dSums(nullptr), hScans(nullptr), dScans(nullptr), outSize(this->size), nSegments(this->nKeys*this->nCols), hOffsets(nullptr), dOffsets(nullptr) { hSums = new gpu_gpair[nSegments]; allocateOnGpu<gpu_gpair>(dSums, nSegments); hScans = new gpu_gpair[outSize]; allocateOnGpu<gpu_gpair>(dScans, outSize); gpu_gpair* buckets = new gpu_gpair[nSegments]; for (int i = 0; i < nSegments; i++) { buckets[i] = gpu_gpair(); } for (int i = 0; i < nSegments; i++) { hSums[i] = gpu_gpair(); } for (size_t i = 0; i < this->size; i++) { if (this->hKeys[i] >= 0 && this->hKeys[i] < nSegments) { node_id_t key = abs2uniqKey<node_id_t>(i, this->hKeys, this->hColIds, 0, this->nKeys); hSums[key] += this->hVals[i]; } } for (int i = 0; i < this->size; ++i) { node_id_t key = abs2uniqKey<node_id_t>(i, this->hKeys, this->hColIds, 0, this->nKeys); hScans[i] = buckets[key]; buckets[key] += this->hVals[i]; } // it's a dense matrix that we are currently looking at, so offsets // are nicely aligned! (need not be the case in real datasets) hOffsets = new int[this->nCols]; size_t off = 0; for (int i = 0; i < this->nCols; ++i, off+=this->nRows) { hOffsets[i] = off; } allocateAndUpdateOnGpu<int>(dOffsets, hOffsets, this->nCols); } ~ReduceScanByKey() { delete [] hScans; delete [] hSums; delete [] hOffsets; dh::safe_cuda(cudaFree(dScans)); dh::safe_cuda(cudaFree(dSums)); dh::safe_cuda(cudaFree(dOffsets)); } void run() { gpu_gpair* tmpScans; int* tmpKeys; int tmpSize = scanTempBufferSize(this->size); allocateOnGpu<gpu_gpair>(tmpScans, tmpSize); allocateOnGpu<int>(tmpKeys, tmpSize); TIMEIT(reduceScanByKey<node_id_t> (dSums, dScans, this->dVals, this->dInstIds, this->dKeys, this->size, this->nKeys, this->nCols, tmpScans, tmpKeys, this->dColIds, 0), this->testName); dh::safe_cuda(cudaFree(tmpScans)); dh::safe_cuda(cudaFree(tmpKeys)); this->compare(hSums, dSums, nSegments); this->compare(hScans, dScans, outSize); } private: gpu_gpair* hSums; gpu_gpair* dSums; gpu_gpair* hScans; gpu_gpair* dScans; int outSize; int nSegments; int* hOffsets; int* dOffsets; }; TEST(ReduceScanByKey, testInt16) { ReduceScanByKey<int16_t>(32, 512, 32, "ReduceScanByKey").run(); } TEST(ReduceScanByKey, testInt32) { ReduceScanByKey<int>(32, 512, 32, "ReduceScanByKey").run(); } } // namespace exact } // namespace tree } // namespace xgboost
f4418d13fb79dfdccbdf24f8defee9b21420acb3.hip
// !!! This is a file automatically generated by hipify!!! #define _USE_MATH_DEFINES #include <iostream> #include <iomanip> #include <fstream> #include <string> #include <cmath> #include <float.h> #include <hip/hip_runtime_api.h> using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ void RGBtoHSV(double r, double g, double b, double &h, double &s, double &v) { r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); r = r / 255.; g = g / 255.; b = b / 255.; h = 0.0f; v = max(r, max(g, b)); const float delta = v - min(r, min(g, b)); if (delta < FLT_MIN) s = 0.0f; else { s = delta / v; if (r >= v) { h = (g - b) / delta; if (h < 0.0f) h += 6.0f; } else if (g >= v) h = 2.0f + (b - r) / delta; else h = 4.0f + (r - g) / delta; } h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); v = max(0., min(1.0, v)); } __device__ void HSVtoRGB(double h, double s, double v, double &r, double &g, double &b) { h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); v = max(0., min(1.0, v)); if (s < FLT_MIN) r = g = b = v; else { const int i = (int) h; const float f = h - i; const float p = v * (1.0f - s); if (i & 1) { const float q = v * (1.0f - (s * f)); switch (i) { case 1: r = q; g = v; b = p; break; case 3: r = p; g = q; b = v; break; default: r = v; g = p; b = q; break; } } else { const float t = v * (1.0f - (s * (1.0f - f))); switch (i) { case 0: r = v; g = t; b = p; break; case 2: r = p; g = v; b = t; break; default: r = t; g = p; b = v; break; } } } r *= 255.; g *= 255.; b *= 255.; r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); } __device__ void RGBtoHSL(double r, double g, double b, double &h, double &s, double &l) { r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); r = r / 255.; g = g / 255.; b = b / 255.; const double maxRGB = max(r, max(g, b)); const double minRGB = min(r, min(g, b)); const double delta2 = maxRGB + minRGB; l = delta2 * 0.5f; const double delta = maxRGB - minRGB; if (delta < DBL_MIN) h = s = 0.0f; else { s = delta / (l > 0.5f ? 2.0f - delta2 : delta2); if (r >= maxRGB) { h = (g - b) / delta; if (h < 0.0f) h += 6.0f; } else if (g >= maxRGB) h = 2.0f + (b - r) / delta; else h = 4.0f + (r - g) / delta; } h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); l = max(0., min(1.0, l)); } __device__ void HSLtoRGB(double h, double s, double l, double &r, double &g, double &b) { h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); l = max(0., min(1.0, l)); if (s < DBL_MIN) r = g = b = l; else if (l < DBL_MIN) r = g = b = 0.0f; else { const double q = l < 0.5f ? l * (1.0f + s) : l + s - l * s; const double p = 2.0f * l - q; double t[] = {h + 2.0f, h, h - 2.0f}; for (int i = 0; i < 3; ++i) { double *color; switch (i) { case 0: color = &r; break; case 1: color = &g; break; case 2: color = &b; break; } if (t[i] < 0.0f) t[i] += 6.0f; else if (t[i] > 6.0f) t[i] -= 6.0f; if (t[i] < 1.0f) *color = p + (q - p) * t[i]; else if (t[i] < 3.0f) *color = q; else if (t[i] < 4.0f) *color = p + (q - p) * (4.0f - t[i]); else *color = p; } } r *= 255.; g *= 255.; b *= 255.; r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); } __device__ void color_lighten(unsigned char &r, unsigned char &g, unsigned char &b, double quantity) { double rD, gD, bD, h, s, l, v; rD = r; gD = g; bD = b; if (quantity > 1) { RGBtoHSL(rD, gD, bD, h, s, l); l *= quantity; HSLtoRGB(h, s, l, rD, gD, bD); } else if (quantity < 1) { RGBtoHSV(rD, gD, bD, h, s, v); v *= quantity; HSVtoRGB(h, s, v, rD, gD, bD); } r = floor(rD); g = floor(gD); b = floor(bD); } __global__ void multibrot_kernel( unsigned int unroll, unsigned char *image, int width, int height, double ratio, int exponent, int iterations, double R, double eps, unsigned char borderR, unsigned char borderG, unsigned char borderB, double borderThickness, long normOrbitSkip, double normLightIntensity, double normLightAngle, double normLightHeight, unsigned char bgR, unsigned char bgG, unsigned char bgB, double kR, double kG, double kB, double kD, unsigned char internalBorderR, unsigned char internalBorderG, unsigned char internalBorderB, unsigned char internalCoreR, unsigned char internalCoreG, unsigned char internalCoreB, double internalK, double stripeDensity, double stripeLightIntensity, double zoom, double posX, double posY ) { unsigned int threadIndex = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int unrollIndex = 0; unrollIndex < unroll; unrollIndex++) { unsigned int currentIndex = threadIndex * unroll + unrollIndex; if (currentIndex >= width * height) { return; } //region Calculations double c_r = (((currentIndex % width - 1) - width / 2.) / (width * zoom)) * ratio + posX; double c_i = ((double) currentIndex / width - height / 2.) / (height * zoom) + posY; double z_r = c_r; double z_i = c_i; double last_z_r = 0; double last_z_i = 0; double dz_r = 1.; double dz_i = 0.; double dc_r = 1.; double dc_i = 0.; double dzdz_r = 0.; double dzdz_i = 0.; double dcdc_r = 0.; double dcdc_i = 0.; double dcdz_r = 0.; double dcdz_i = 0.; double p = 1.; double orbitCount = 0; double V = 0; long i; for (i = 0; i < iterations; i++) { double z2 = z_r * z_r + z_i * z_i; if (z2 > R * R) { V = log(z2) / p; break; } if (eps > 0 && dz_r * dz_r + dz_i * dz_i < eps * eps) { V = 0; break; } double dzdz_r_temp = 2 * ((z_r * dzdz_r - z_i * dzdz_i) + (dz_r * dz_r - dz_i * dz_i)); dzdz_i = 2 * ((z_r * dzdz_i + z_i * dzdz_r) + (dz_r * dz_i + dz_i * dz_r)); dzdz_r = dzdz_r_temp; double dcdc_r_temp = 2 * ((z_r * dcdc_r - z_i * dcdc_i) + (dc_r * dc_r - dc_i * dc_i)); dcdc_i = 2 * ((z_r * dcdc_i + z_i * dcdc_r) + (dc_r * dc_i + dc_i * dc_r)); dcdc_r = dcdc_r_temp; double dcdz_r_temp = 2 * ((z_r * dcdz_r - z_i * dcdz_i) + (dz_r * dc_r - dz_i * dc_i)); dcdz_i = 2 * ((z_r * dcdz_i + z_i * dcdz_r) + (dc_r * dz_i + dc_i * dz_r)); dcdz_r = dcdz_r_temp; double dz_r_temp = 2 * (z_r * dz_r - z_i * dz_i); dz_i = 2 * (z_r * dz_i + z_i * dz_r); dz_r = dz_r_temp; double dc_r_temp = 2 * (z_r * dc_r - z_i * dc_i) + 1; dc_i = 2 * (z_r * dc_i + z_i * dc_r); dc_r = dc_r_temp; p *= 2.; if (i >= normOrbitSkip) { orbitCount += 0.5 + 0.5 * sin(stripeDensity * atan2(last_z_i, last_z_r)); } last_z_r = z_r; last_z_i = z_i; int esp = exponent; if (esp != 0) { if (esp < 0) { esp = -esp; double z_r_temp = z_r / (z_r * z_r + z_i * z_i); z_i = -z_i / (z_r * z_r + z_i * z_i); z_r = z_r_temp; } double z_esp_r = z_r; double z_esp_i = z_i; for (int e = 1; e < esp; e++) { double z_esp_r_temp = (z_r * z_esp_r - z_i * z_esp_i); z_esp_i = (z_esp_i * z_r + z_i * z_esp_r); z_esp_r = z_esp_r_temp; } z_r = z_esp_r + c_r; z_i = z_esp_i + c_i; } else { z_r = 1.0; z_i = 0.0; } } // endregion if (V == 0) { // Inside! //region Interior distance estimation double u_r = 1 - dz_r; double u_i = dz_i; double v_r = (dc_r * u_r + dc_i * u_i) / (u_r * u_r + u_i * u_i); double v_i = (dc_i * u_r - dc_r * u_i) / (u_r * u_r + u_i * u_i); double l_r = (dzdz_r * v_r - dzdz_i * v_i); double l_i = (dzdz_r * v_i + dzdz_i * v_r); l_r += dcdz_r; l_i += dcdz_i; double d = (1 - (dz_r * dz_r + dz_i * dz_i)) / sqrt(l_r * l_r + l_i * l_i); //endregion // if(d < 50000) { // image[currentIndex * 4] = internalCoreR; // image[currentIndex * 4 + 1] = internalCoreG; // image[currentIndex * 4 + 2] = internalCoreB; // } else { // image[currentIndex * 4] = internalBorderR; // image[currentIndex * 4 + 1] = internalBorderG; // image[currentIndex * 4 + 2] = internalBorderB; // } // if (d < 1) { // image[currentIndex * 4] = 0; // image[currentIndex * 4 + 1] = (int) max(0., min(255., (255. * tanh(d)))); // image[currentIndex * 4 + 1] = (unsigned char) (max(0., min(255., 0 + d * (255 - 0)))); // image[currentIndex * 4 + 2] = 0; // } else { // image[currentIndex * 4] = 0; // image[currentIndex * 4 + 1] = 255; // image[currentIndex * 4 + 2] = 0; // } double mix = internalK > 0 ? log(d) / internalK : 1; if(mix < 0) { mix = 0; } if (mix < 1) { image[currentIndex * 4] = max(0., min(255., internalBorderR + mix * (internalCoreR - internalBorderR))); image[currentIndex * 4 + 1] = max(0., min(255., internalBorderG + mix * (internalCoreG - internalBorderG))); image[currentIndex * 4 + 2] = max(0., min(255., internalBorderB + mix * (internalCoreB - internalBorderB))); } else { image[currentIndex * 4] = internalCoreR; image[currentIndex * 4 + 1] = internalCoreG; image[currentIndex * 4 + 2] = internalCoreB; } } else { // Outside! //region Exterior distance estimation double rad = sqrt(z_r * z_r + z_i * z_i); double d = rad * 2. * log(rad) / sqrt(dc_r * dc_r + dc_i * dc_i); //endregion unsigned char tempR = bgR; unsigned char tempG = bgG; unsigned char tempB = bgB; //region Gradient Background Setup if (kR > 0.01 || kR < -0.01) { tempR = (unsigned char) (max(0., min(255., tempR + (255. * (1 - cos(log(V) / (kR))) / 2. / kD)))); } if (kG > 0.01 || kG < -0.01) { tempG = (unsigned char) (max(0., min(255., tempG + (255. * (1 - cos(log(V) / (kG))) / 2. / kD)))); } if (kB > 0.01 || kB < -0.01) { tempB = (unsigned char) (max(0., min(255., tempB + (255. * (1 - cos(log(V) / (kB))) / 2. / kD)))); } //endregion //region 3D Normal if (normLightIntensity != 1) { double vR = cos(normLightAngle * 2. * M_PI / 360.); double vI = sin(normLightAngle * 2. * M_PI / 360.); double lo = 0.5 * log(z_r * z_r + z_i * z_i); double conjR = ((1. + lo) * (dc_r * dc_r - dc_i * dc_i) - (lo) * (z_r * dcdc_r - z_i * dcdc_i)); double conjI = ((1. + lo) * -(dc_r * dc_i + dc_i * dc_r) - (lo) * -(z_r * dcdc_i + z_i * dcdc_r)); double uR = (z_r * dc_r - z_i * dc_i); double uI = (z_r * dc_i + z_i * dc_r); double newUR = (uR * conjR - uI * conjI); uI = (uR * conjI + uI * conjR); uR = newUR; newUR = uR / sqrt(uR * uR + uI * uI); uI = uI / sqrt(uR * uR + uI * uI); uR = newUR; double t = uR * vR + uI * vI + normLightHeight; t = t / (1. + normLightHeight); if (t < 0) { t = 0; } else if (t > 1) { t = 1; } unsigned char normLightR = tempR; unsigned char normLightG = tempG; unsigned char normLightB = tempB; color_lighten(normLightR, normLightG, normLightB, normLightIntensity); double normShadowIntensity = 1 + (1 - normLightIntensity); unsigned char normShadowR = tempR; unsigned char normShadowG = tempG; unsigned char normShadowB = tempB; color_lighten(normShadowR, normShadowG, normShadowB, normShadowIntensity); tempR = (unsigned char) (normShadowR + (t * (normLightR - normShadowR))); tempG = (unsigned char) (normShadowG + (t * (normLightG - normShadowG))); tempB = (unsigned char) (normShadowB + (t * (normLightB - normShadowB))); } //endregion //region Stripe Average Colouring if (stripeLightIntensity != 1) { double lastOrbit = 0.5 + 0.5 * sin(stripeDensity * atan2(last_z_i, last_z_r)); double smallCount = orbitCount - lastOrbit; orbitCount /= (double) i; smallCount /= (double) i - 1; double frac = -1. + log10(2.0 * log(R * R)) / log10(2.) - log10(0.5 * log(last_z_r * last_z_r + last_z_i * last_z_i)) / log10(2.); double mix = frac * orbitCount + (1 - frac) * smallCount; if (mix < 0) { mix = 0; } else if (mix > 1) { mix = 1; } unsigned char stripeLightR = tempR; unsigned char stripeLightG = tempG; unsigned char stripeLightB = tempB; color_lighten(stripeLightR, stripeLightG, stripeLightB, stripeLightIntensity); double stripeShadowIntensity = 1 + (1 - stripeLightIntensity); unsigned char stripeShadowR = tempR; unsigned char stripeShadowG = tempG; unsigned char stripeShadowB = tempB; color_lighten(stripeShadowR, stripeShadowG, stripeShadowB, stripeShadowIntensity); tempR = (unsigned char) (stripeShadowR + (mix * (stripeLightR - stripeShadowR))); tempG = (unsigned char) (stripeShadowG + (mix * (stripeLightG - stripeShadowG))); tempB = (unsigned char) (stripeShadowB + (mix * (stripeLightB - stripeShadowB))); } //endregion //region Border if (borderThickness > 0) { double tBorder = d / borderThickness; if (tBorder < 1) { // Border tempR = (unsigned char) (borderR + tBorder * (tempR - borderR)); tempG = (unsigned char) (borderG + tBorder * (tempG - borderG)); tempB = (unsigned char) (borderB + tBorder * (tempB - borderB)); } } //endregion image[currentIndex * 4] = tempR; image[currentIndex * 4 + 1] = tempG; image[currentIndex * 4 + 2] = tempB; } } } void multibrot( unsigned int unroll, unsigned int blockSize, unsigned char *rgb, int width, int height, int exponent, int iterations, double R, double eps, unsigned char borderR, unsigned char borderG, unsigned char borderB, double borderThickness, long normOrbitSkip, double normLightIntensity, double normLightAngle, double normLightHeight, unsigned char bgR, unsigned char bgG, unsigned char bgB, double kR, double kG, double kB, double kD, unsigned char internalBorderR, unsigned char internalBorderG, unsigned char internalBorderB, unsigned char internalCoreR, unsigned char internalCoreG, unsigned char internalCoreB, double internalK, double stripeDensity, double stripeLightIntensity, double zoom, double posX, double posY) { hipProfilerStart(); //region Setup cout << "Setting up..." << endl; double ratio = (double) width / height; unsigned int size = width * height; unsigned char *imageHost; imageHost = (unsigned char *) malloc(4 * size * sizeof(unsigned char)); unsigned char *imageDevice; gpuErrchk(hipMallocManaged(&imageDevice, 4 * size * sizeof(unsigned char))); int suggestedBlockSize; int minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &suggestedBlockSize, multibrot_kernel, 0, 4 * size); cout << "Suggested BlockSize: " << suggestedBlockSize << endl << "Min GridSize: " << minGridSize << endl; int gridSize = (size + blockSize - 1) / blockSize / unroll; cout << "BlockSize: " << blockSize << endl << "GridSize: " << gridSize << endl << "Unroll: " << unroll << endl; cout << "Setup done!" << endl; //endregion //region Generation cout << "Fractal generation in process..." << endl; hipLaunchKernelGGL(( multibrot_kernel), dim3(gridSize), dim3(blockSize), 0, 0, unroll, imageDevice, width, height, ratio, exponent, iterations, R, eps, borderR, borderG, borderB, borderThickness, normOrbitSkip, normLightIntensity, normLightAngle, normLightHeight, bgR, bgG, bgB, kR, kG, kB, kD, internalBorderR, internalBorderG, internalBorderB, internalCoreR, internalCoreG, internalCoreB, internalK, stripeDensity, stripeLightIntensity, zoom, posX, posY); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(imageHost, imageDevice, 4 * size * sizeof(unsigned char), hipMemcpyDeviceToHost)); cout << "Generation done!" << endl; int maxActiveBlocks; hipOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, multibrot_kernel, blockSize, 0); int device; hipDeviceProp_t props; hipGetDevice(&device); hipGetDeviceProperties(&props, device); double occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (double) (props.maxThreadsPerMultiProcessor / props.warpSize); cout << std::setprecision(4) << "Theoretical occupancy: " << occupancy << "%" << endl; //endregion for (int i = 0; i < size; i++) { rgb[i * 3 + 2] = imageHost[i * 4]; rgb[i * 3 + 1] = imageHost[i * 4 + 1]; rgb[i * 3] = imageHost[i * 4 + 2]; } //region Cleanup free(imageHost); hipFree(imageDevice); hipDeviceReset(); //endregion hipProfilerStop(); }
f4418d13fb79dfdccbdf24f8defee9b21420acb3.cu
#define _USE_MATH_DEFINES #include <iostream> #include <iomanip> #include <fstream> #include <string> #include <cmath> #include <float.h> #include <cuda_profiler_api.h> using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ void RGBtoHSV(double r, double g, double b, double &h, double &s, double &v) { r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); r = r / 255.; g = g / 255.; b = b / 255.; h = 0.0f; v = max(r, max(g, b)); const float delta = v - min(r, min(g, b)); if (delta < FLT_MIN) s = 0.0f; else { s = delta / v; if (r >= v) { h = (g - b) / delta; if (h < 0.0f) h += 6.0f; } else if (g >= v) h = 2.0f + (b - r) / delta; else h = 4.0f + (r - g) / delta; } h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); v = max(0., min(1.0, v)); } __device__ void HSVtoRGB(double h, double s, double v, double &r, double &g, double &b) { h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); v = max(0., min(1.0, v)); if (s < FLT_MIN) r = g = b = v; else { const int i = (int) h; const float f = h - i; const float p = v * (1.0f - s); if (i & 1) { const float q = v * (1.0f - (s * f)); switch (i) { case 1: r = q; g = v; b = p; break; case 3: r = p; g = q; b = v; break; default: r = v; g = p; b = q; break; } } else { const float t = v * (1.0f - (s * (1.0f - f))); switch (i) { case 0: r = v; g = t; b = p; break; case 2: r = p; g = v; b = t; break; default: r = t; g = p; b = v; break; } } } r *= 255.; g *= 255.; b *= 255.; r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); } __device__ void RGBtoHSL(double r, double g, double b, double &h, double &s, double &l) { r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); r = r / 255.; g = g / 255.; b = b / 255.; const double maxRGB = max(r, max(g, b)); const double minRGB = min(r, min(g, b)); const double delta2 = maxRGB + minRGB; l = delta2 * 0.5f; const double delta = maxRGB - minRGB; if (delta < DBL_MIN) h = s = 0.0f; else { s = delta / (l > 0.5f ? 2.0f - delta2 : delta2); if (r >= maxRGB) { h = (g - b) / delta; if (h < 0.0f) h += 6.0f; } else if (g >= maxRGB) h = 2.0f + (b - r) / delta; else h = 4.0f + (r - g) / delta; } h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); l = max(0., min(1.0, l)); } __device__ void HSLtoRGB(double h, double s, double l, double &r, double &g, double &b) { h = max(0., min(6.0, h)); s = max(0., min(1.0, s)); l = max(0., min(1.0, l)); if (s < DBL_MIN) r = g = b = l; else if (l < DBL_MIN) r = g = b = 0.0f; else { const double q = l < 0.5f ? l * (1.0f + s) : l + s - l * s; const double p = 2.0f * l - q; double t[] = {h + 2.0f, h, h - 2.0f}; for (int i = 0; i < 3; ++i) { double *color; switch (i) { case 0: color = &r; break; case 1: color = &g; break; case 2: color = &b; break; } if (t[i] < 0.0f) t[i] += 6.0f; else if (t[i] > 6.0f) t[i] -= 6.0f; if (t[i] < 1.0f) *color = p + (q - p) * t[i]; else if (t[i] < 3.0f) *color = q; else if (t[i] < 4.0f) *color = p + (q - p) * (4.0f - t[i]); else *color = p; } } r *= 255.; g *= 255.; b *= 255.; r = max(0., min(255.0, r)); g = max(0., min(255.0, g)); b = max(0., min(255.0, b)); } __device__ void color_lighten(unsigned char &r, unsigned char &g, unsigned char &b, double quantity) { double rD, gD, bD, h, s, l, v; rD = r; gD = g; bD = b; if (quantity > 1) { RGBtoHSL(rD, gD, bD, h, s, l); l *= quantity; HSLtoRGB(h, s, l, rD, gD, bD); } else if (quantity < 1) { RGBtoHSV(rD, gD, bD, h, s, v); v *= quantity; HSVtoRGB(h, s, v, rD, gD, bD); } r = floor(rD); g = floor(gD); b = floor(bD); } __global__ void multibrot_kernel( unsigned int unroll, unsigned char *image, int width, int height, double ratio, int exponent, int iterations, double R, double eps, unsigned char borderR, unsigned char borderG, unsigned char borderB, double borderThickness, long normOrbitSkip, double normLightIntensity, double normLightAngle, double normLightHeight, unsigned char bgR, unsigned char bgG, unsigned char bgB, double kR, double kG, double kB, double kD, unsigned char internalBorderR, unsigned char internalBorderG, unsigned char internalBorderB, unsigned char internalCoreR, unsigned char internalCoreG, unsigned char internalCoreB, double internalK, double stripeDensity, double stripeLightIntensity, double zoom, double posX, double posY ) { unsigned int threadIndex = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int unrollIndex = 0; unrollIndex < unroll; unrollIndex++) { unsigned int currentIndex = threadIndex * unroll + unrollIndex; if (currentIndex >= width * height) { return; } //region Calculations double c_r = (((currentIndex % width - 1) - width / 2.) / (width * zoom)) * ratio + posX; double c_i = ((double) currentIndex / width - height / 2.) / (height * zoom) + posY; double z_r = c_r; double z_i = c_i; double last_z_r = 0; double last_z_i = 0; double dz_r = 1.; double dz_i = 0.; double dc_r = 1.; double dc_i = 0.; double dzdz_r = 0.; double dzdz_i = 0.; double dcdc_r = 0.; double dcdc_i = 0.; double dcdz_r = 0.; double dcdz_i = 0.; double p = 1.; double orbitCount = 0; double V = 0; long i; for (i = 0; i < iterations; i++) { double z2 = z_r * z_r + z_i * z_i; if (z2 > R * R) { V = log(z2) / p; break; } if (eps > 0 && dz_r * dz_r + dz_i * dz_i < eps * eps) { V = 0; break; } double dzdz_r_temp = 2 * ((z_r * dzdz_r - z_i * dzdz_i) + (dz_r * dz_r - dz_i * dz_i)); dzdz_i = 2 * ((z_r * dzdz_i + z_i * dzdz_r) + (dz_r * dz_i + dz_i * dz_r)); dzdz_r = dzdz_r_temp; double dcdc_r_temp = 2 * ((z_r * dcdc_r - z_i * dcdc_i) + (dc_r * dc_r - dc_i * dc_i)); dcdc_i = 2 * ((z_r * dcdc_i + z_i * dcdc_r) + (dc_r * dc_i + dc_i * dc_r)); dcdc_r = dcdc_r_temp; double dcdz_r_temp = 2 * ((z_r * dcdz_r - z_i * dcdz_i) + (dz_r * dc_r - dz_i * dc_i)); dcdz_i = 2 * ((z_r * dcdz_i + z_i * dcdz_r) + (dc_r * dz_i + dc_i * dz_r)); dcdz_r = dcdz_r_temp; double dz_r_temp = 2 * (z_r * dz_r - z_i * dz_i); dz_i = 2 * (z_r * dz_i + z_i * dz_r); dz_r = dz_r_temp; double dc_r_temp = 2 * (z_r * dc_r - z_i * dc_i) + 1; dc_i = 2 * (z_r * dc_i + z_i * dc_r); dc_r = dc_r_temp; p *= 2.; if (i >= normOrbitSkip) { orbitCount += 0.5 + 0.5 * sin(stripeDensity * atan2(last_z_i, last_z_r)); } last_z_r = z_r; last_z_i = z_i; int esp = exponent; if (esp != 0) { if (esp < 0) { esp = -esp; double z_r_temp = z_r / (z_r * z_r + z_i * z_i); z_i = -z_i / (z_r * z_r + z_i * z_i); z_r = z_r_temp; } double z_esp_r = z_r; double z_esp_i = z_i; for (int e = 1; e < esp; e++) { double z_esp_r_temp = (z_r * z_esp_r - z_i * z_esp_i); z_esp_i = (z_esp_i * z_r + z_i * z_esp_r); z_esp_r = z_esp_r_temp; } z_r = z_esp_r + c_r; z_i = z_esp_i + c_i; } else { z_r = 1.0; z_i = 0.0; } } // endregion if (V == 0) { // Inside! //region Interior distance estimation double u_r = 1 - dz_r; double u_i = dz_i; double v_r = (dc_r * u_r + dc_i * u_i) / (u_r * u_r + u_i * u_i); double v_i = (dc_i * u_r - dc_r * u_i) / (u_r * u_r + u_i * u_i); double l_r = (dzdz_r * v_r - dzdz_i * v_i); double l_i = (dzdz_r * v_i + dzdz_i * v_r); l_r += dcdz_r; l_i += dcdz_i; double d = (1 - (dz_r * dz_r + dz_i * dz_i)) / sqrt(l_r * l_r + l_i * l_i); //endregion // if(d < 50000) { // image[currentIndex * 4] = internalCoreR; // image[currentIndex * 4 + 1] = internalCoreG; // image[currentIndex * 4 + 2] = internalCoreB; // } else { // image[currentIndex * 4] = internalBorderR; // image[currentIndex * 4 + 1] = internalBorderG; // image[currentIndex * 4 + 2] = internalBorderB; // } // if (d < 1) { // image[currentIndex * 4] = 0; // image[currentIndex * 4 + 1] = (int) max(0., min(255., (255. * tanh(d)))); // image[currentIndex * 4 + 1] = (unsigned char) (max(0., min(255., 0 + d * (255 - 0)))); // image[currentIndex * 4 + 2] = 0; // } else { // image[currentIndex * 4] = 0; // image[currentIndex * 4 + 1] = 255; // image[currentIndex * 4 + 2] = 0; // } double mix = internalK > 0 ? log(d) / internalK : 1; if(mix < 0) { mix = 0; } if (mix < 1) { image[currentIndex * 4] = max(0., min(255., internalBorderR + mix * (internalCoreR - internalBorderR))); image[currentIndex * 4 + 1] = max(0., min(255., internalBorderG + mix * (internalCoreG - internalBorderG))); image[currentIndex * 4 + 2] = max(0., min(255., internalBorderB + mix * (internalCoreB - internalBorderB))); } else { image[currentIndex * 4] = internalCoreR; image[currentIndex * 4 + 1] = internalCoreG; image[currentIndex * 4 + 2] = internalCoreB; } } else { // Outside! //region Exterior distance estimation double rad = sqrt(z_r * z_r + z_i * z_i); double d = rad * 2. * log(rad) / sqrt(dc_r * dc_r + dc_i * dc_i); //endregion unsigned char tempR = bgR; unsigned char tempG = bgG; unsigned char tempB = bgB; //region Gradient Background Setup if (kR > 0.01 || kR < -0.01) { tempR = (unsigned char) (max(0., min(255., tempR + (255. * (1 - cos(log(V) / (kR))) / 2. / kD)))); } if (kG > 0.01 || kG < -0.01) { tempG = (unsigned char) (max(0., min(255., tempG + (255. * (1 - cos(log(V) / (kG))) / 2. / kD)))); } if (kB > 0.01 || kB < -0.01) { tempB = (unsigned char) (max(0., min(255., tempB + (255. * (1 - cos(log(V) / (kB))) / 2. / kD)))); } //endregion //region 3D Normal if (normLightIntensity != 1) { double vR = cos(normLightAngle * 2. * M_PI / 360.); double vI = sin(normLightAngle * 2. * M_PI / 360.); double lo = 0.5 * log(z_r * z_r + z_i * z_i); double conjR = ((1. + lo) * (dc_r * dc_r - dc_i * dc_i) - (lo) * (z_r * dcdc_r - z_i * dcdc_i)); double conjI = ((1. + lo) * -(dc_r * dc_i + dc_i * dc_r) - (lo) * -(z_r * dcdc_i + z_i * dcdc_r)); double uR = (z_r * dc_r - z_i * dc_i); double uI = (z_r * dc_i + z_i * dc_r); double newUR = (uR * conjR - uI * conjI); uI = (uR * conjI + uI * conjR); uR = newUR; newUR = uR / sqrt(uR * uR + uI * uI); uI = uI / sqrt(uR * uR + uI * uI); uR = newUR; double t = uR * vR + uI * vI + normLightHeight; t = t / (1. + normLightHeight); if (t < 0) { t = 0; } else if (t > 1) { t = 1; } unsigned char normLightR = tempR; unsigned char normLightG = tempG; unsigned char normLightB = tempB; color_lighten(normLightR, normLightG, normLightB, normLightIntensity); double normShadowIntensity = 1 + (1 - normLightIntensity); unsigned char normShadowR = tempR; unsigned char normShadowG = tempG; unsigned char normShadowB = tempB; color_lighten(normShadowR, normShadowG, normShadowB, normShadowIntensity); tempR = (unsigned char) (normShadowR + (t * (normLightR - normShadowR))); tempG = (unsigned char) (normShadowG + (t * (normLightG - normShadowG))); tempB = (unsigned char) (normShadowB + (t * (normLightB - normShadowB))); } //endregion //region Stripe Average Colouring if (stripeLightIntensity != 1) { double lastOrbit = 0.5 + 0.5 * sin(stripeDensity * atan2(last_z_i, last_z_r)); double smallCount = orbitCount - lastOrbit; orbitCount /= (double) i; smallCount /= (double) i - 1; double frac = -1. + log10(2.0 * log(R * R)) / log10(2.) - log10(0.5 * log(last_z_r * last_z_r + last_z_i * last_z_i)) / log10(2.); double mix = frac * orbitCount + (1 - frac) * smallCount; if (mix < 0) { mix = 0; } else if (mix > 1) { mix = 1; } unsigned char stripeLightR = tempR; unsigned char stripeLightG = tempG; unsigned char stripeLightB = tempB; color_lighten(stripeLightR, stripeLightG, stripeLightB, stripeLightIntensity); double stripeShadowIntensity = 1 + (1 - stripeLightIntensity); unsigned char stripeShadowR = tempR; unsigned char stripeShadowG = tempG; unsigned char stripeShadowB = tempB; color_lighten(stripeShadowR, stripeShadowG, stripeShadowB, stripeShadowIntensity); tempR = (unsigned char) (stripeShadowR + (mix * (stripeLightR - stripeShadowR))); tempG = (unsigned char) (stripeShadowG + (mix * (stripeLightG - stripeShadowG))); tempB = (unsigned char) (stripeShadowB + (mix * (stripeLightB - stripeShadowB))); } //endregion //region Border if (borderThickness > 0) { double tBorder = d / borderThickness; if (tBorder < 1) { // Border tempR = (unsigned char) (borderR + tBorder * (tempR - borderR)); tempG = (unsigned char) (borderG + tBorder * (tempG - borderG)); tempB = (unsigned char) (borderB + tBorder * (tempB - borderB)); } } //endregion image[currentIndex * 4] = tempR; image[currentIndex * 4 + 1] = tempG; image[currentIndex * 4 + 2] = tempB; } } } void multibrot( unsigned int unroll, unsigned int blockSize, unsigned char *rgb, int width, int height, int exponent, int iterations, double R, double eps, unsigned char borderR, unsigned char borderG, unsigned char borderB, double borderThickness, long normOrbitSkip, double normLightIntensity, double normLightAngle, double normLightHeight, unsigned char bgR, unsigned char bgG, unsigned char bgB, double kR, double kG, double kB, double kD, unsigned char internalBorderR, unsigned char internalBorderG, unsigned char internalBorderB, unsigned char internalCoreR, unsigned char internalCoreG, unsigned char internalCoreB, double internalK, double stripeDensity, double stripeLightIntensity, double zoom, double posX, double posY) { cudaProfilerStart(); //region Setup cout << "Setting up..." << endl; double ratio = (double) width / height; unsigned int size = width * height; unsigned char *imageHost; imageHost = (unsigned char *) malloc(4 * size * sizeof(unsigned char)); unsigned char *imageDevice; gpuErrchk(cudaMallocManaged(&imageDevice, 4 * size * sizeof(unsigned char))); int suggestedBlockSize; int minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &suggestedBlockSize, multibrot_kernel, 0, 4 * size); cout << "Suggested BlockSize: " << suggestedBlockSize << endl << "Min GridSize: " << minGridSize << endl; int gridSize = (size + blockSize - 1) / blockSize / unroll; cout << "BlockSize: " << blockSize << endl << "GridSize: " << gridSize << endl << "Unroll: " << unroll << endl; cout << "Setup done!" << endl; //endregion //region Generation cout << "Fractal generation in process..." << endl; multibrot_kernel<<<gridSize, blockSize>>>(unroll, imageDevice, width, height, ratio, exponent, iterations, R, eps, borderR, borderG, borderB, borderThickness, normOrbitSkip, normLightIntensity, normLightAngle, normLightHeight, bgR, bgG, bgB, kR, kG, kB, kD, internalBorderR, internalBorderG, internalBorderB, internalCoreR, internalCoreG, internalCoreB, internalK, stripeDensity, stripeLightIntensity, zoom, posX, posY); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(imageHost, imageDevice, 4 * size * sizeof(unsigned char), cudaMemcpyDeviceToHost)); cout << "Generation done!" << endl; int maxActiveBlocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, multibrot_kernel, blockSize, 0); int device; cudaDeviceProp props; cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); double occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (double) (props.maxThreadsPerMultiProcessor / props.warpSize); cout << std::setprecision(4) << "Theoretical occupancy: " << occupancy << "%" << endl; //endregion for (int i = 0; i < size; i++) { rgb[i * 3 + 2] = imageHost[i * 4]; rgb[i * 3 + 1] = imageHost[i * 4 + 1]; rgb[i * 3] = imageHost[i * 4 + 2]; } //region Cleanup free(imageHost); cudaFree(imageDevice); cudaDeviceReset(); //endregion cudaProfilerStop(); }
a645668682ac72be05b33640a9315ed48caf9de3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. // NVIDIA/apex is licensed under the // BSD 3 - Clause "New" or "Revised" License // /* Modifications Copyright (c) Microsoft. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename U, bool simplified> __device__ void cuWelfordOnlineSum( const U curr, U& mu, U& sigma2, U& count) { count = count + U(1); U delta = curr - mu; U lmean = mu + delta / count; mu = lmean; if (simplified) { sigma2 = sigma2 + curr * curr; } else { U delta2 = curr - lmean; sigma2 = sigma2 + delta * delta2; } } template <typename U, bool simplified> __device__ void cuChanOnlineSum( const U muB, const U sigma2B, const U countB, U& mu, U& sigma2, U& count) { U delta = muB - mu; U nA = count; U nB = countB; count = count + countB; U nX = count; if (nX > U(0)) { nA = nA / nX; nB = nB / nX; mu = nA * mu + nB * muB; if (simplified) { sigma2 = sigma2 + sigma2B; } else { sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX; } } else { mu = U(0); sigma2 = U(0); } } template <typename T, typename U, bool simplified> __device__ void cuWelfordMuSigma2( const T* __restrict__ vals, const int n1, const int n2, const int i1, U& mu, U& sigma2, U* buf) { // Assumptions: // 1) blockDim.x == GPU_WARP_SIZE // 2) Tensor is contiguous // 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available. // // compute variance and mean over n2 U count = U(0); mu = U(0); sigma2 = U(0); if (i1 < n1) { // one warp normalizes one n1 index, // synchronization is implicit // initialize with standard Welford algorithm const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const T* lvals = vals + i1 * n2; int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { U curr = static_cast<U>(lvals[l + k]); cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count); } } for (; l < n2; ++l) { U curr = static_cast<U>(lvals[l]); cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count); } // intra-warp reductions #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { U muB = WARP_SHFL_DOWN(mu, stride); U countB = WARP_SHFL_DOWN(count, stride); U sigma2B = WARP_SHFL_DOWN(sigma2, stride); cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { U* ubuf = (U*)buf; U* ibuf = (U*)(ubuf + blockDim.y); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_y = threadIdx.y - offset; ubuf[2 * wrt_y] = mu; ubuf[2 * wrt_y + 1] = sigma2; ibuf[wrt_y] = count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { U muB = ubuf[2 * threadIdx.y]; U sigma2B = ubuf[2 * threadIdx.y + 1]; U countB = ibuf[threadIdx.y]; cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count); } __syncthreads(); } // threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values if (threadIdx.x == 0 && threadIdx.y == 0) { ubuf[0] = mu; ubuf[1] = sigma2; } __syncthreads(); mu = ubuf[0]; sigma2 = ubuf[1] / U(n2); // don't care about final value of count, we know count == n2 } else { mu = WARP_SHFL(mu, 0); sigma2 = WARP_SHFL(sigma2 / U(n2), 0); } } } template <bool simplified> __device__ void cuWelfordMuSigma2( const half* __restrict__ vals, const int n1, const int n2, const int i1, float& mu, float& sigma2, float* buf) { // Assumptions: // 1) blockDim.x == GPU_WARP_SIZE // 2) Tensor is contiguous // 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available. // // compute variance and mean over n2 float count = 0.0f; mu = float(0); sigma2 = float(0); if (i1 < n1) { // one warp normalizes one n1 index, // synchronization is implicit // initialize with standard Welford algorithm const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const half* lvals = vals + i1 * n2; int l = 8 * thrx; if ((((size_t)lvals) & 3) != 0) { // 16 bit alignment // first thread consumes first point if (thrx == 0) { float curr = static_cast<float>(lvals[0]); cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count); } ++l; } // at this point, lvals[l] are 32 bit aligned for all threads. for (; l + 7 < n2; l += 8 * numx) { for (int k = 0; k < 8; k += 2) { float2 curr = __half22float2(*((__half2*)(lvals + l + k))); cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.x), mu, sigma2, count); cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.y), mu, sigma2, count); } } for (; l < n2; ++l) { float curr = static_cast<float>(lvals[l]); cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count); } // intra-warp reductions #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { float muB = WARP_SHFL_DOWN(mu, stride); float countB = WARP_SHFL_DOWN(count, stride); float sigma2B = WARP_SHFL_DOWN(sigma2, stride); cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { float* ubuf = (float*)buf; float* ibuf = (float*)(ubuf + blockDim.y); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_y = threadIdx.y - offset; ubuf[2 * wrt_y] = mu; ubuf[2 * wrt_y + 1] = sigma2; ibuf[wrt_y] = count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { float muB = ubuf[2 * threadIdx.y]; float sigma2B = ubuf[2 * threadIdx.y + 1]; float countB = ibuf[threadIdx.y]; cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count); } __syncthreads(); } // threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values if (threadIdx.x == 0 && threadIdx.y == 0) { ubuf[0] = mu; ubuf[1] = sigma2; } __syncthreads(); mu = ubuf[0]; sigma2 = ubuf[1] / float(n2); // don't care about final value of count, we know count == n2 } else { mu = WARP_SHFL(mu, 0); sigma2 = WARP_SHFL(sigma2 / float(n2), 0); } } } template <typename U> __device__ U rsqrt(U v) { return U(1) / sqrt(v); } template <> __device__ float rsqrt(float v) { return rsqrtf(v); } template <> __device__ double rsqrt(double v) { return rsqrt(v); } namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. // template <typename T> // struct SharedMemory // { // // Ensure that we won't compile any un-specialized types // __device__ T *getPointer() // { // extern __device__ void error(void); // error(); // return NULL; // } // }; // https://github.com/NVIDIA/apex/issues/246 template <typename T> struct SharedMemory; template <> struct SharedMemory<float> { __device__ float* getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory<double> { __device__ double* getPointer() { extern __shared__ double s_double[]; return s_double; } }; } // namespace template <typename T, typename U, bool simplified> __global__ void cuApplyLayerNorm( T* __restrict__ output_vals, U* __restrict__ mean, U* __restrict__ invvar, const T* __restrict__ vals, const int n1, const int n2, const U epsilon, const T* __restrict__ gamma, const T* __restrict__ beta) { // Assumptions: // 1) blockDim.x == GPU_WARP_SIZE // 2) Tensors are contiguous // for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { SharedMemory<U> shared; U* buf = shared.getPointer(); U mu, sigma2; cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf); const T* lvals = vals + i1 * n2; T* ovals = output_vals + i1 * n2; U c_invvar = rsqrt(sigma2 + epsilon); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; for (int i = thrx; i < n2; i += numx) { U curr = static_cast<U>(lvals[i]); T gamma_i = (gamma != NULL) ? gamma[i]: (T)1; T beta_i = (beta != NULL) ? beta[i] : (T) 0; if (simplified) { ovals[i] = gamma_i * static_cast<T>(c_invvar * curr); } else { ovals[i] = gamma_i * static_cast<T>(c_invvar * (curr - mu)) + beta_i; } } if (threadIdx.x == 0 && threadIdx.y == 0) { if (mean != nullptr) mean[i1] = mu; if (invvar != nullptr) invvar[i1] = c_invvar; } } } template <typename T, typename U, bool simplified> void HostApplyLayerNorm( const hipDeviceProp_t& prop, T* output, U* mean, U* invvar, const T* input, int n1, int n2, double epsilon, const T* gamma, const T* beta) { const int maxGridY = prop.maxGridSize[1]; const int warp_size = prop.warpSize; ORT_ENFORCE(warp_size == GPU_WARP_SIZE); const dim3 threads(warp_size, 4, 1); const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1); int nshared = threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0; hipLaunchKernelGGL(( cuApplyLayerNorm<T, U, simplified>), dim3(blocks), dim3(threads), nshared, 0, output, mean, invvar, input, n1, n2, U(epsilon), gamma, beta); } #define LAYERNORM_LINEAR_IMPL(T, U, simplified) \ template void HostApplyLayerNorm<T, U, simplified>(const hipDeviceProp_t& prop, T* output, U* mean, U* invvar, const T* input, int n1, int n2, \ double epsilon, const T* gamma, const T* beta); LAYERNORM_LINEAR_IMPL(float, float, true) LAYERNORM_LINEAR_IMPL(half, float, true) LAYERNORM_LINEAR_IMPL(double, double, true) LAYERNORM_LINEAR_IMPL(float, float, false) LAYERNORM_LINEAR_IMPL(half, float, false) LAYERNORM_LINEAR_IMPL(double, double, false) //LAYERNORM_LINEAR_IMPL(half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, true) LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, false) #endif } // namespace cuda } // namespace contrib } // namespace onnxruntime
a645668682ac72be05b33640a9315ed48caf9de3.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. // NVIDIA/apex is licensed under the // BSD 3 - Clause "New" or "Revised" License // /* Modifications Copyright (c) Microsoft. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename U, bool simplified> __device__ void cuWelfordOnlineSum( const U curr, U& mu, U& sigma2, U& count) { count = count + U(1); U delta = curr - mu; U lmean = mu + delta / count; mu = lmean; if (simplified) { sigma2 = sigma2 + curr * curr; } else { U delta2 = curr - lmean; sigma2 = sigma2 + delta * delta2; } } template <typename U, bool simplified> __device__ void cuChanOnlineSum( const U muB, const U sigma2B, const U countB, U& mu, U& sigma2, U& count) { U delta = muB - mu; U nA = count; U nB = countB; count = count + countB; U nX = count; if (nX > U(0)) { nA = nA / nX; nB = nB / nX; mu = nA * mu + nB * muB; if (simplified) { sigma2 = sigma2 + sigma2B; } else { sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX; } } else { mu = U(0); sigma2 = U(0); } } template <typename T, typename U, bool simplified> __device__ void cuWelfordMuSigma2( const T* __restrict__ vals, const int n1, const int n2, const int i1, U& mu, U& sigma2, U* buf) { // Assumptions: // 1) blockDim.x == GPU_WARP_SIZE // 2) Tensor is contiguous // 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available. // // compute variance and mean over n2 U count = U(0); mu = U(0); sigma2 = U(0); if (i1 < n1) { // one warp normalizes one n1 index, // synchronization is implicit // initialize with standard Welford algorithm const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const T* lvals = vals + i1 * n2; int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { U curr = static_cast<U>(lvals[l + k]); cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count); } } for (; l < n2; ++l) { U curr = static_cast<U>(lvals[l]); cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count); } // intra-warp reductions #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { U muB = WARP_SHFL_DOWN(mu, stride); U countB = WARP_SHFL_DOWN(count, stride); U sigma2B = WARP_SHFL_DOWN(sigma2, stride); cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { U* ubuf = (U*)buf; U* ibuf = (U*)(ubuf + blockDim.y); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_y = threadIdx.y - offset; ubuf[2 * wrt_y] = mu; ubuf[2 * wrt_y + 1] = sigma2; ibuf[wrt_y] = count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { U muB = ubuf[2 * threadIdx.y]; U sigma2B = ubuf[2 * threadIdx.y + 1]; U countB = ibuf[threadIdx.y]; cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count); } __syncthreads(); } // threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values if (threadIdx.x == 0 && threadIdx.y == 0) { ubuf[0] = mu; ubuf[1] = sigma2; } __syncthreads(); mu = ubuf[0]; sigma2 = ubuf[1] / U(n2); // don't care about final value of count, we know count == n2 } else { mu = WARP_SHFL(mu, 0); sigma2 = WARP_SHFL(sigma2 / U(n2), 0); } } } template <bool simplified> __device__ void cuWelfordMuSigma2( const half* __restrict__ vals, const int n1, const int n2, const int i1, float& mu, float& sigma2, float* buf) { // Assumptions: // 1) blockDim.x == GPU_WARP_SIZE // 2) Tensor is contiguous // 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available. // // compute variance and mean over n2 float count = 0.0f; mu = float(0); sigma2 = float(0); if (i1 < n1) { // one warp normalizes one n1 index, // synchronization is implicit // initialize with standard Welford algorithm const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const half* lvals = vals + i1 * n2; int l = 8 * thrx; if ((((size_t)lvals) & 3) != 0) { // 16 bit alignment // first thread consumes first point if (thrx == 0) { float curr = static_cast<float>(lvals[0]); cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count); } ++l; } // at this point, lvals[l] are 32 bit aligned for all threads. for (; l + 7 < n2; l += 8 * numx) { for (int k = 0; k < 8; k += 2) { float2 curr = __half22float2(*((__half2*)(lvals + l + k))); cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.x), mu, sigma2, count); cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.y), mu, sigma2, count); } } for (; l < n2; ++l) { float curr = static_cast<float>(lvals[l]); cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count); } // intra-warp reductions #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { float muB = WARP_SHFL_DOWN(mu, stride); float countB = WARP_SHFL_DOWN(count, stride); float sigma2B = WARP_SHFL_DOWN(sigma2, stride); cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { float* ubuf = (float*)buf; float* ibuf = (float*)(ubuf + blockDim.y); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_y = threadIdx.y - offset; ubuf[2 * wrt_y] = mu; ubuf[2 * wrt_y + 1] = sigma2; ibuf[wrt_y] = count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { float muB = ubuf[2 * threadIdx.y]; float sigma2B = ubuf[2 * threadIdx.y + 1]; float countB = ibuf[threadIdx.y]; cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count); } __syncthreads(); } // threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values if (threadIdx.x == 0 && threadIdx.y == 0) { ubuf[0] = mu; ubuf[1] = sigma2; } __syncthreads(); mu = ubuf[0]; sigma2 = ubuf[1] / float(n2); // don't care about final value of count, we know count == n2 } else { mu = WARP_SHFL(mu, 0); sigma2 = WARP_SHFL(sigma2 / float(n2), 0); } } } template <typename U> __device__ U rsqrt(U v) { return U(1) / sqrt(v); } template <> __device__ float rsqrt(float v) { return rsqrtf(v); } template <> __device__ double rsqrt(double v) { return rsqrt(v); } namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. // template <typename T> // struct SharedMemory // { // // Ensure that we won't compile any un-specialized types // __device__ T *getPointer() // { // extern __device__ void error(void); // error(); // return NULL; // } // }; // https://github.com/NVIDIA/apex/issues/246 template <typename T> struct SharedMemory; template <> struct SharedMemory<float> { __device__ float* getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory<double> { __device__ double* getPointer() { extern __shared__ double s_double[]; return s_double; } }; } // namespace template <typename T, typename U, bool simplified> __global__ void cuApplyLayerNorm( T* __restrict__ output_vals, U* __restrict__ mean, U* __restrict__ invvar, const T* __restrict__ vals, const int n1, const int n2, const U epsilon, const T* __restrict__ gamma, const T* __restrict__ beta) { // Assumptions: // 1) blockDim.x == GPU_WARP_SIZE // 2) Tensors are contiguous // for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { SharedMemory<U> shared; U* buf = shared.getPointer(); U mu, sigma2; cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf); const T* lvals = vals + i1 * n2; T* ovals = output_vals + i1 * n2; U c_invvar = rsqrt(sigma2 + epsilon); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; for (int i = thrx; i < n2; i += numx) { U curr = static_cast<U>(lvals[i]); T gamma_i = (gamma != NULL) ? gamma[i]: (T)1; T beta_i = (beta != NULL) ? beta[i] : (T) 0; if (simplified) { ovals[i] = gamma_i * static_cast<T>(c_invvar * curr); } else { ovals[i] = gamma_i * static_cast<T>(c_invvar * (curr - mu)) + beta_i; } } if (threadIdx.x == 0 && threadIdx.y == 0) { if (mean != nullptr) mean[i1] = mu; if (invvar != nullptr) invvar[i1] = c_invvar; } } } template <typename T, typename U, bool simplified> void HostApplyLayerNorm( const cudaDeviceProp& prop, T* output, U* mean, U* invvar, const T* input, int n1, int n2, double epsilon, const T* gamma, const T* beta) { const int maxGridY = prop.maxGridSize[1]; const int warp_size = prop.warpSize; ORT_ENFORCE(warp_size == GPU_WARP_SIZE); const dim3 threads(warp_size, 4, 1); const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1); int nshared = threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0; cuApplyLayerNorm<T, U, simplified><<<blocks, threads, nshared, 0>>>( output, mean, invvar, input, n1, n2, U(epsilon), gamma, beta); } #define LAYERNORM_LINEAR_IMPL(T, U, simplified) \ template void HostApplyLayerNorm<T, U, simplified>(const cudaDeviceProp& prop, T* output, U* mean, U* invvar, const T* input, int n1, int n2, \ double epsilon, const T* gamma, const T* beta); LAYERNORM_LINEAR_IMPL(float, float, true) LAYERNORM_LINEAR_IMPL(half, float, true) LAYERNORM_LINEAR_IMPL(double, double, true) LAYERNORM_LINEAR_IMPL(float, float, false) LAYERNORM_LINEAR_IMPL(half, float, false) LAYERNORM_LINEAR_IMPL(double, double, false) //LAYERNORM_LINEAR_IMPL(half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, true) LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, false) #endif } // namespace cuda } // namespace contrib } // namespace onnxruntime
b6a271f30e1bd6188ef5c29297fecd72afdf3334.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaDBSCAN.h" #include "hip/hip_runtime.h" #include "hip/device_functions.h" #include "rocblas.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include <ctime> #include <math.h> #include <queue> #include <string.h> #include <stdlib.h> #include <vector> using namespace std; struct Point { float dimensions[128]; int cluster; int noise; //-1 noise; string img; }; float __device__ dev_euclidean_distance(const Point &src, const Point &dest) { float res = 0.0; for(int i=0; i<128; i++){ res += (src.dimensions[i] - dest.dimensions[i]) * (src.dimensions[i] - dest.dimensions[i]); } return sqrt(res); } /*to get the total list*/ void __global__ dev_region_query(Point* sample, int num, int* neighbors, float eps, int min_nb) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int line,col,pointer = tid; unsigned int count; while (pointer < num * num) {//id line = pointer / num; col = pointer % num; float radius; if (line <= col) { radius = dev_euclidean_distance(sample[line], sample[col]); if (radius <= eps) { neighbors[pointer] = 1; } neighbors[col * num + line] = neighbors[pointer];// } pointer += blockDim.x * gridDim.x; } __syncthreads(); pointer = tid; while (pointer < num) { count = 1; line = pointer * num; for (int i = 0; i < num; i++) { if (pointer != i && neighbors[line+i]) {//p count++; } } if (count >= min_nb) { sample[pointer].noise++; } pointer += blockDim.x * gridDim.x; } } void host_algorithm_dbscan(Point* host_sample, int num, float eps, int min_nb, int block_num, int thread_num) { /*sample*/ Point* cuda_sample; hipMalloc((void**)&cuda_sample, num * sizeof(Point)); hipMemcpy(cuda_sample, host_sample, num * sizeof(Point), hipMemcpyHostToDevice); /*neighbor list*/ int *host_neighbor = new int[num*num](); int *dev_neighbor; hipMalloc((void**)&dev_neighbor, num * num * sizeof(int)); dev_region_query << <block_num, thread_num >> > (cuda_sample, num, dev_neighbor, eps, min_nb); hipMemcpy(host_sample, cuda_sample, num * sizeof(Point), hipMemcpyDeviceToHost); hipMemcpy(host_neighbor, dev_neighbor, num * num * sizeof(int), hipMemcpyDeviceToHost); hipFree(cuda_sample);hipFree(dev_neighbor); queue<int> expand; int cur_cluster = 0; for (int i = 0; i < num; i++) { if (host_sample[i].noise >= 0 && host_sample[i].cluster < 1) { host_sample[i].cluster = ++cur_cluster; int src = i * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j]) { host_sample[j].cluster = cur_cluster; expand.push(j); } } while (!expand.empty()) {/*expand the cluster*/ if (host_sample[expand.front()].noise >= 0) { src = expand.front() * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j] && host_sample[j].cluster < 1) { host_sample[j].cluster = cur_cluster; expand.push(j); } } } expand.pop(); } } } ofstream fout; fout.open("result.html"); for (int i = 0; i < num; i++) { fout <<"<img src='"<< host_sample[i].img << "'/>" <<host_sample[i].cluster<< endl; } fout.close(); } // int countLines(const char *filename){ ifstream fin(filename, ios::in); int n=0; string lineStr; while(getline(fin, lineStr)) n++; return n; } extern "C" JNIEXPORT void JNICALL Java_CudaDBSCAN_runDBSCAN__Ljava_lang_String_2IFIII (JNIEnv *env, jobject obj, jstring jfile_name, jint jsize, jfloat jeps, jint jmin_pts, jint jblock_num, jint jthread_num){ // step1: const char *file_name; file_name = env->GetStringUTFChars(jfile_name, NULL); /* C-String (char*) */ /* file_name == NULLJVMC-String (char*) */ if(file_name == NULL) { cout << "------>no file\n" << endl; } // step2: step1Point // //int point_count = countLines(file_name); int point_count = static_cast<int>(jsize); Point *host_sample = new Point[point_count]; // Point int sample_num = 0; string lineStr; ifstream fin(file_name, ios::in); while(getline(fin, lineStr)){ stringstream ss(lineStr); vector<string> lineArray; string str; // while (getline(ss, str, ',')) lineArray.push_back(str);/* vectorlineArray[0] */ // lineArray[0]_Pointdimensions char *datas; const int len = lineArray[0].length(); datas = new char[len + 1]; strcpy(datas, lineArray[0].c_str()); const char dims[2] = "_"; char *token; // token = strtok(datas, dims); // int i=0; while( token != NULL ) { host_sample[sample_num].dimensions[i++] = atof(token); token = strtok(NULL, dims); } host_sample[sample_num].noise = -1; host_sample[sample_num].cluster = -1; host_sample[sample_num].img = lineArray[1]; sample_num++; if(sample_num == point_count){ break; } } cout << "------>TOTAL SAMPLE NUMB0->" << sample_num << "<-----" << endl; clock_t start, finish; start = clock(); host_algorithm_dbscan(host_sample, sample_num, static_cast<float>(jeps), static_cast<int>(jmin_pts), static_cast<int>(jblock_num), static_cast<int>(jthread_num)); finish = clock(); delete []host_sample; cout<< file_name << " speed time: "<< (finish-start)*1.0/CLOCKS_PER_SEC <<"s\n"<<endl; env->ReleaseStringUTFChars(jfile_name, file_name); /* JVMString */ }
b6a271f30e1bd6188ef5c29297fecd72afdf3334.cu
#include "CudaDBSCAN.h" #include "cuda_runtime.h" #include "device_functions.h" #include "cublas_v2.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include <ctime> #include <math.h> #include <queue> #include <string.h> #include <stdlib.h> #include <vector> using namespace std; struct Point { float dimensions[128]; int cluster; int noise; //-1 noise; string img; }; float __device__ dev_euclidean_distance(const Point &src, const Point &dest) { float res = 0.0; for(int i=0; i<128; i++){ res += (src.dimensions[i] - dest.dimensions[i]) * (src.dimensions[i] - dest.dimensions[i]); } return sqrt(res); } /*to get the total list*/ void __global__ dev_region_query(Point* sample, int num, int* neighbors, float eps, int min_nb) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int line,col,pointer = tid; unsigned int count; while (pointer < num * num) {//全场唯一id line = pointer / num; col = pointer % num; float radius; if (line <= col) { radius = dev_euclidean_distance(sample[line], sample[col]); if (radius <= eps) { neighbors[pointer] = 1; } neighbors[col * num + line] = neighbors[pointer];//对角线 } pointer += blockDim.x * gridDim.x; } __syncthreads(); pointer = tid; while (pointer < num) { count = 1; line = pointer * num; for (int i = 0; i < num; i++) { if (pointer != i && neighbors[line+i]) {//包含p点邻域元素个数 count++; } } if (count >= min_nb) { sample[pointer].noise++; } pointer += blockDim.x * gridDim.x; } } void host_algorithm_dbscan(Point* host_sample, int num, float eps, int min_nb, int block_num, int thread_num) { /*sample*/ Point* cuda_sample; cudaMalloc((void**)&cuda_sample, num * sizeof(Point)); cudaMemcpy(cuda_sample, host_sample, num * sizeof(Point), cudaMemcpyHostToDevice); /*neighbor list*/ int *host_neighbor = new int[num*num](); int *dev_neighbor; cudaMalloc((void**)&dev_neighbor, num * num * sizeof(int)); dev_region_query << <block_num, thread_num >> > (cuda_sample, num, dev_neighbor, eps, min_nb); cudaMemcpy(host_sample, cuda_sample, num * sizeof(Point), cudaMemcpyDeviceToHost); cudaMemcpy(host_neighbor, dev_neighbor, num * num * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(cuda_sample);cudaFree(dev_neighbor); queue<int> expand; int cur_cluster = 0; for (int i = 0; i < num; i++) { if (host_sample[i].noise >= 0 && host_sample[i].cluster < 1) { host_sample[i].cluster = ++cur_cluster; int src = i * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j]) { host_sample[j].cluster = cur_cluster; expand.push(j); } } while (!expand.empty()) {/*expand the cluster*/ if (host_sample[expand.front()].noise >= 0) { src = expand.front() * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j] && host_sample[j].cluster < 1) { host_sample[j].cluster = cur_cluster; expand.push(j); } } } expand.pop(); } } } ofstream fout; fout.open("result.html"); for (int i = 0; i < num; i++) { fout <<"<img src='"<< host_sample[i].img << "'/>" <<host_sample[i].cluster<< endl; } fout.close(); } // 读取文件行数 int countLines(const char *filename){ ifstream fin(filename, ios::in); int n=0; string lineStr; while(getline(fin, lineStr)) n++; return n; } extern "C" JNIEXPORT void JNICALL Java_CudaDBSCAN_runDBSCAN__Ljava_lang_String_2IFIII (JNIEnv *env, jobject obj, jstring jfile_name, jint jsize, jfloat jeps, jint jmin_pts, jint jblock_num, jint jthread_num){ // step1: 读取文件 const char *file_name; file_name = env->GetStringUTFChars(jfile_name, NULL); /* 获得传入的文件名,将其转换为C-String (char*) */ /* file_name == NULL意味着JVM为C-String (char*)分配内存失败 */ if(file_name == NULL) { cout << "------>no file\n" << endl; } // step2: 从step1中获取的文件中解析出所有的特征点,初始化结构体Point数组 // 获取文件的行数 //int point_count = countLines(file_name); int point_count = static_cast<int>(jsize); Point *host_sample = new Point[point_count]; // 然后将每行的数据读到Point结构体中 int sample_num = 0; string lineStr; ifstream fin(file_name, ios::in); while(getline(fin, lineStr)){ stringstream ss(lineStr); vector<string> lineArray; string str; // 按照逗号分隔 while (getline(ss, str, ',')) lineArray.push_back(str);/* 将文件中每一行存入到vector中,其中lineArray[0]存放的是特征值 */ // 分离出特征值即lineArray[0]后,是一个以“_”分割的字符串,解析出来存到Point结构体的dimensions中 char *datas; const int len = lineArray[0].length(); datas = new char[len + 1]; strcpy(datas, lineArray[0].c_str()); const char dims[2] = "_"; char *token; // 获取第一个子字符串 token = strtok(datas, dims); // 继续获取其他的子字符串 int i=0; while( token != NULL ) { host_sample[sample_num].dimensions[i++] = atof(token); token = strtok(NULL, dims); } host_sample[sample_num].noise = -1; host_sample[sample_num].cluster = -1; host_sample[sample_num].img = lineArray[1]; sample_num++; if(sample_num == point_count){ break; } } cout << "------>TOTAL SAMPLE NUMB0->" << sample_num << "<-----" << endl; clock_t start, finish; start = clock(); host_algorithm_dbscan(host_sample, sample_num, static_cast<float>(jeps), static_cast<int>(jmin_pts), static_cast<int>(jblock_num), static_cast<int>(jthread_num)); finish = clock(); delete []host_sample; cout<< file_name << " speed time: "<< (finish-start)*1.0/CLOCKS_PER_SEC <<"s\n"<<endl; env->ReleaseStringUTFChars(jfile_name, file_name); /* 通知JVM释放String所占的内存 */ }
7a0b8c474249aa84ff2594ed01c2b20bf878d48f.hip
// !!! This is a file automatically generated by hipify!!! // $ nvcc --expt-extended-lambda -std=c++14 -Iagency chaining_executor_demo.cu #include <iostream> #include <typeinfo> #include <cassert> #include "chaining_executor.hpp" #include "submit.hpp" #include "cuda/single_executor.hpp" struct my_receiver { template<class T> __host__ __device__ void set_value(T arg) { #ifndef __CUDA_ARCH__ std::cout << "my_receiver::set_value: received " << typeid(arg).name() << std::endl; #else printf("Hello world from my_receiver\n"); #endif } }; int main() { chaining_executor<cuda::single_executor> ex = make_chaining_executor(cuda::single_executor()); just<chaining_executor<cuda::single_executor>> s1 = ex.schedule(); { // test submisison on a trivial sender op::submit(s1, my_receiver()); } { // test submission on a value task auto s2 = ex.make_value_task(std::move(s1), [] __host__ __device__ (chaining_executor<cuda::single_executor>) { printf("Hello world from value task\n"); return 0; }); op::submit(std::move(s2), my_receiver()); if(hipError_t error = hipDeviceSynchronize()) { throw std::runtime_error("CUDA error after hipDeviceSynchronize: " + std::string(hipGetErrorString(error))); } } //{ // // XXX this is currently not compiling // // // test share on a value task // auto s2 = ex.make_value_task(std::move(s1), [] __host__ __device__ (fusing_executor<cuda::single_executor> ex) // { // printf("Hello world from value task\n"); // return 13; // }); // auto fut = op::share(std::move(s2)); // assert(fut.get() == 13); //} return 0; }
7a0b8c474249aa84ff2594ed01c2b20bf878d48f.cu
// $ nvcc --expt-extended-lambda -std=c++14 -Iagency chaining_executor_demo.cu #include <iostream> #include <typeinfo> #include <cassert> #include "chaining_executor.hpp" #include "submit.hpp" #include "cuda/single_executor.hpp" struct my_receiver { template<class T> __host__ __device__ void set_value(T arg) { #ifndef __CUDA_ARCH__ std::cout << "my_receiver::set_value: received " << typeid(arg).name() << std::endl; #else printf("Hello world from my_receiver\n"); #endif } }; int main() { chaining_executor<cuda::single_executor> ex = make_chaining_executor(cuda::single_executor()); just<chaining_executor<cuda::single_executor>> s1 = ex.schedule(); { // test submisison on a trivial sender op::submit(s1, my_receiver()); } { // test submission on a value task auto s2 = ex.make_value_task(std::move(s1), [] __host__ __device__ (chaining_executor<cuda::single_executor>) { printf("Hello world from value task\n"); return 0; }); op::submit(std::move(s2), my_receiver()); if(cudaError_t error = cudaDeviceSynchronize()) { throw std::runtime_error("CUDA error after cudaDeviceSynchronize: " + std::string(cudaGetErrorString(error))); } } //{ // // XXX this is currently not compiling // // // test share on a value task // auto s2 = ex.make_value_task(std::move(s1), [] __host__ __device__ (fusing_executor<cuda::single_executor> ex) // { // printf("Hello world from value task\n"); // return 13; // }); // auto fut = op::share(std::move(s2)); // assert(fut.get() == 13); //} return 0; }