hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e48e6400a39494d406a58779458edcac2cd590b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ /////////////////////////////////////////////////////////////////////////////// //! Naive compute implementation of scan, one thread per element //! Not work efficient: log(n) steps, but n * (log(n) - 1) adds. //! Not shared storage efficient either -- this requires ping-ponging //! arrays in shared memory due to hazards so 2 * n storage space. //! //! Pro: Simple //! Con: Not work efficient //! //! @param g_odata output data in global memory //! @param g_idata input data in global memory //! @param n input number of elements to scan from input data /////////////////////////////////////////////////////////////////////////////// __global__ void scan_naive(float *g_odata, float *g_idata, int n) { // Dynamically allocated shared memory for scan kernels extern __shared__ float temp[]; int thid = threadIdx.x; int pout = 0; int pin = 1; // Cache the computational window in shared memory temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout*n+thid] = temp[pin*n+thid]; if (thid >= offset) temp[pout*n+thid] += temp[pin*n+thid - offset]; } __syncthreads(); g_odata[thid] = temp[pout*n+thid]; } // Add additional kernels here __global__ void compact_stream_kernel() { } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
e48e6400a39494d406a58779458edcac2cd590b2.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ /////////////////////////////////////////////////////////////////////////////// //! Naive compute implementation of scan, one thread per element //! Not work efficient: log(n) steps, but n * (log(n) - 1) adds. //! Not shared storage efficient either -- this requires ping-ponging //! arrays in shared memory due to hazards so 2 * n storage space. //! //! Pro: Simple //! Con: Not work efficient //! //! @param g_odata output data in global memory //! @param g_idata input data in global memory //! @param n input number of elements to scan from input data /////////////////////////////////////////////////////////////////////////////// __global__ void scan_naive(float *g_odata, float *g_idata, int n) { // Dynamically allocated shared memory for scan kernels extern __shared__ float temp[]; int thid = threadIdx.x; int pout = 0; int pin = 1; // Cache the computational window in shared memory temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout*n+thid] = temp[pin*n+thid]; if (thid >= offset) temp[pout*n+thid] += temp[pin*n+thid - offset]; } __syncthreads(); g_odata[thid] = temp[pout*n+thid]; } // Add additional kernels here __global__ void compact_stream_kernel() { } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
885090298166038a772feb928601679432a9414e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "path_aggregation.hpp" #include "vertical_path_aggregation.hpp" #include "horizontal_path_aggregation.hpp" #include "oblique_path_aggregation.hpp" namespace sgm { template <size_t MAX_DISPARITY> PathAggregation<MAX_DISPARITY>::PathAggregation() : m_cost_buffer() { for(unsigned int i = 0; i < NUM_PATHS; ++i){ hipStreamCreate(&m_streams[i]); hipEventCreate(&m_events[i]); } } template <size_t MAX_DISPARITY> PathAggregation<MAX_DISPARITY>::~PathAggregation(){ for(unsigned int i = 0; i < NUM_PATHS; ++i){ hipStreamSynchronize(m_streams[i]); hipStreamDestroy(m_streams[i]); hipEventDestroy(m_events[i]); } } template <size_t MAX_DISPARITY> void PathAggregation<MAX_DISPARITY>::enqueue( const feature_type *left, const feature_type *right, size_t width, size_t height, unsigned int p1, unsigned int p2, hipStream_t stream) { const size_t buffer_size = width * height * MAX_DISPARITY * NUM_PATHS; if(m_cost_buffer.size() != buffer_size){ m_cost_buffer = DeviceBuffer<cost_type>(buffer_size); } const size_t buffer_step = width * height * MAX_DISPARITY; hipStreamSynchronize(stream); path_aggregation::enqueue_aggregate_up2down_path<MAX_DISPARITY>( m_cost_buffer.data() + 0 * buffer_step, left, right, width, height, p1, p2, m_streams[0]); path_aggregation::enqueue_aggregate_down2up_path<MAX_DISPARITY>( m_cost_buffer.data() + 1 * buffer_step, left, right, width, height, p1, p2, m_streams[1]); path_aggregation::enqueue_aggregate_left2right_path<MAX_DISPARITY>( m_cost_buffer.data() + 2 * buffer_step, left, right, width, height, p1, p2, m_streams[2]); path_aggregation::enqueue_aggregate_right2left_path<MAX_DISPARITY>( m_cost_buffer.data() + 3 * buffer_step, left, right, width, height, p1, p2, m_streams[3]); path_aggregation::enqueue_aggregate_upleft2downright_path<MAX_DISPARITY>( m_cost_buffer.data() + 4 * buffer_step, left, right, width, height, p1, p2, m_streams[4]); path_aggregation::enqueue_aggregate_upright2downleft_path<MAX_DISPARITY>( m_cost_buffer.data() + 5 * buffer_step, left, right, width, height, p1, p2, m_streams[5]); path_aggregation::enqueue_aggregate_downright2upleft_path<MAX_DISPARITY>( m_cost_buffer.data() + 6 * buffer_step, left, right, width, height, p1, p2, m_streams[6]); path_aggregation::enqueue_aggregate_downleft2upright_path<MAX_DISPARITY>( m_cost_buffer.data() + 7 * buffer_step, left, right, width, height, p1, p2, m_streams[7]); for(unsigned int i = 0; i < NUM_PATHS; ++i){ hipEventRecord(m_events[i], m_streams[i]); hipStreamWaitEvent(stream, m_events[i], 0); } } template class PathAggregation< 64>; template class PathAggregation<128>; }
885090298166038a772feb928601679432a9414e.cu
/* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "path_aggregation.hpp" #include "vertical_path_aggregation.hpp" #include "horizontal_path_aggregation.hpp" #include "oblique_path_aggregation.hpp" namespace sgm { template <size_t MAX_DISPARITY> PathAggregation<MAX_DISPARITY>::PathAggregation() : m_cost_buffer() { for(unsigned int i = 0; i < NUM_PATHS; ++i){ cudaStreamCreate(&m_streams[i]); cudaEventCreate(&m_events[i]); } } template <size_t MAX_DISPARITY> PathAggregation<MAX_DISPARITY>::~PathAggregation(){ for(unsigned int i = 0; i < NUM_PATHS; ++i){ cudaStreamSynchronize(m_streams[i]); cudaStreamDestroy(m_streams[i]); cudaEventDestroy(m_events[i]); } } template <size_t MAX_DISPARITY> void PathAggregation<MAX_DISPARITY>::enqueue( const feature_type *left, const feature_type *right, size_t width, size_t height, unsigned int p1, unsigned int p2, cudaStream_t stream) { const size_t buffer_size = width * height * MAX_DISPARITY * NUM_PATHS; if(m_cost_buffer.size() != buffer_size){ m_cost_buffer = DeviceBuffer<cost_type>(buffer_size); } const size_t buffer_step = width * height * MAX_DISPARITY; cudaStreamSynchronize(stream); path_aggregation::enqueue_aggregate_up2down_path<MAX_DISPARITY>( m_cost_buffer.data() + 0 * buffer_step, left, right, width, height, p1, p2, m_streams[0]); path_aggregation::enqueue_aggregate_down2up_path<MAX_DISPARITY>( m_cost_buffer.data() + 1 * buffer_step, left, right, width, height, p1, p2, m_streams[1]); path_aggregation::enqueue_aggregate_left2right_path<MAX_DISPARITY>( m_cost_buffer.data() + 2 * buffer_step, left, right, width, height, p1, p2, m_streams[2]); path_aggregation::enqueue_aggregate_right2left_path<MAX_DISPARITY>( m_cost_buffer.data() + 3 * buffer_step, left, right, width, height, p1, p2, m_streams[3]); path_aggregation::enqueue_aggregate_upleft2downright_path<MAX_DISPARITY>( m_cost_buffer.data() + 4 * buffer_step, left, right, width, height, p1, p2, m_streams[4]); path_aggregation::enqueue_aggregate_upright2downleft_path<MAX_DISPARITY>( m_cost_buffer.data() + 5 * buffer_step, left, right, width, height, p1, p2, m_streams[5]); path_aggregation::enqueue_aggregate_downright2upleft_path<MAX_DISPARITY>( m_cost_buffer.data() + 6 * buffer_step, left, right, width, height, p1, p2, m_streams[6]); path_aggregation::enqueue_aggregate_downleft2upright_path<MAX_DISPARITY>( m_cost_buffer.data() + 7 * buffer_step, left, right, width, height, p1, p2, m_streams[7]); for(unsigned int i = 0; i < NUM_PATHS; ++i){ cudaEventRecord(m_events[i], m_streams[i]); cudaStreamWaitEvent(stream, m_events[i], 0); } } template class PathAggregation< 64>; template class PathAggregation<128>; }
bca16800bea2866635bc29702d65e5d82261fc89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu #include "box_iou_rotated_cuda.cuh" #include "pytorch_cuda_helper.hpp" void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, const bool aligned) { using scalar_t = float; AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); int output_size = ious.numel(); int num_boxes1 = boxes1.size(0); int num_boxes2 = boxes2.size(0); at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( box_iou_rotated_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, num_boxes1, num_boxes2, boxes1.data_ptr<scalar_t>(), boxes2.data_ptr<scalar_t>(), (scalar_t*)ious.data_ptr<scalar_t>(), aligned); AT_CUDA_CHECK(hipGetLastError()); }
bca16800bea2866635bc29702d65e5d82261fc89.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu #include "box_iou_rotated_cuda.cuh" #include "pytorch_cuda_helper.hpp" void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, const bool aligned) { using scalar_t = float; AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); int output_size = ious.numel(); int num_boxes1 = boxes1.size(0); int num_boxes2 = boxes2.size(0); at::cuda::CUDAGuard device_guard(boxes1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); box_iou_rotated_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( num_boxes1, num_boxes2, boxes1.data_ptr<scalar_t>(), boxes2.data_ptr<scalar_t>(), (scalar_t*)ious.data_ptr<scalar_t>(), aligned); AT_CUDA_CHECK(cudaGetLastError()); }
f79faab67a5035b643c5588da0a87369f05ff2a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <math.h> #include "ext_cuda_chunk.hpp" #include "kernels/ppcg_solve.cuknl" /* * PPCG SOLVER KERNEL */ // Entry point for PPCG initialisation extern "C" void ext_ppcg_init_( const int* chunk, const int* preconditionerOn, const double* alphas, const double* betas, int* numSteps) { Chunks[*chunk-1]->PPCGInit( *preconditionerOn, alphas, betas, *numSteps); } // Entry point for initialising sd extern "C" void ext_ppcg_init_sd_( const int* chunk, const double* theta) { Chunks[*chunk-1]->PPCGInitSd(*theta); } // Entry point for PPCG inner step extern "C" void ext_ppcg_inner_( const int* chunk, const int* currentStep) { Chunks[*chunk-1]->PPCGInner(*currentStep); } // Initialises the PPCG solver void TeaLeafCudaChunk::PPCGInit( const bool preconditionerOn, const double* alphas, const double* betas, const int numSteps) { preconditioner = preconditionerOn; LoadAlphaBeta(alphas, betas, numSteps); } // Initialises sd void TeaLeafCudaChunk::PPCGInitSd( const double theta) { PRE_KERNEL(2*HALO_PAD); hipLaunchKernelGGL(( CuKnlPPCGInitSd), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, innerX, innerY, xCells, theta, preconditioner, dR, dMi, dSd); POST_KERNEL("PPCG Init SD"); } // The PPCG inner step void TeaLeafCudaChunk::PPCGInner( const int currentStep) { PRE_KERNEL(2*HALO_PAD); hipLaunchKernelGGL(( CuKnlPPCGUpdateR), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, innerX, innerY, xCells, dKx, dKy, dSd, dU, dR); POST_KERNEL("PPCG Calc U"); START_PROFILING(); hipLaunchKernelGGL(( CuKnlPPCGCalcSd), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, innerX, innerY, xCells, currentStep-1, preconditioner, dR, dMi, dAlphas, dBetas, dSd); POST_KERNEL("PPCG Calc SD"); }
f79faab67a5035b643c5588da0a87369f05ff2a1.cu
#include <cstdio> #include <math.h> #include "ext_cuda_chunk.hpp" #include "kernels/ppcg_solve.cuknl" /* * PPCG SOLVER KERNEL */ // Entry point for PPCG initialisation extern "C" void ext_ppcg_init_( const int* chunk, const int* preconditionerOn, const double* alphas, const double* betas, int* numSteps) { Chunks[*chunk-1]->PPCGInit( *preconditionerOn, alphas, betas, *numSteps); } // Entry point for initialising sd extern "C" void ext_ppcg_init_sd_( const int* chunk, const double* theta) { Chunks[*chunk-1]->PPCGInitSd(*theta); } // Entry point for PPCG inner step extern "C" void ext_ppcg_inner_( const int* chunk, const int* currentStep) { Chunks[*chunk-1]->PPCGInner(*currentStep); } // Initialises the PPCG solver void TeaLeafCudaChunk::PPCGInit( const bool preconditionerOn, const double* alphas, const double* betas, const int numSteps) { preconditioner = preconditionerOn; LoadAlphaBeta(alphas, betas, numSteps); } // Initialises sd void TeaLeafCudaChunk::PPCGInitSd( const double theta) { PRE_KERNEL(2*HALO_PAD); CuKnlPPCGInitSd<<<numBlocks, BLOCK_SIZE>>>( innerX, innerY, xCells, theta, preconditioner, dR, dMi, dSd); POST_KERNEL("PPCG Init SD"); } // The PPCG inner step void TeaLeafCudaChunk::PPCGInner( const int currentStep) { PRE_KERNEL(2*HALO_PAD); CuKnlPPCGUpdateR<<<numBlocks, BLOCK_SIZE>>>( innerX, innerY, xCells, dKx, dKy, dSd, dU, dR); POST_KERNEL("PPCG Calc U"); START_PROFILING(); CuKnlPPCGCalcSd<<<numBlocks, BLOCK_SIZE>>>( innerX, innerY, xCells, currentStep-1, preconditioner, dR, dMi, dAlphas, dBetas, dSd); POST_KERNEL("PPCG Calc SD"); }
0d931b95bac93aa78ac0c34e41cb6ccd1aef713c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include "mish.h" namespace nvinfer1 { MishPlugin::MishPlugin() { } MishPlugin::~MishPlugin() { } // create the plugin at runtime from a byte stream MishPlugin::MishPlugin(const void* data, size_t length) { assert(length == sizeof(input_size_)); input_size_ = *reinterpret_cast<const int*>(data); } void MishPlugin::serialize(void* buffer) const noexcept { *reinterpret_cast<int*>(buffer) = input_size_; } size_t MishPlugin::getSerializationSize() const noexcept { return sizeof(input_size_); } int MishPlugin::initialize()noexcept { return 0; } bool MishPlugin::supportsFormat(DataType type, PluginFormat format) const noexcept { return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } void MishPlugin::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept { } Dims MishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)noexcept { assert(nbInputDims == 1); assert(index == 0); input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2]; // Output dimensions return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } // Set plugin namespace void MishPlugin::setPluginNamespace(const char* pluginNamespace)noexcept { mPluginNamespace = pluginNamespace; } const char* MishPlugin::getPluginNamespace() const noexcept { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType MishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool MishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool MishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept { return false; } void MishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)noexcept { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void MishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)noexcept { } // Detach the plugin object from its execution context. void MishPlugin::detachFromContext()noexcept {} const char* MishPlugin::getPluginType() const noexcept { return "Mish_TRT"; } const char* MishPlugin::getPluginVersion() const noexcept { return "1"; } void MishPlugin::destroy()noexcept { delete this; } // Clone the plugin IPluginV2* MishPlugin::clone() const noexcept { MishPlugin *p = new MishPlugin(); p->input_size_ = input_size_; p->setPluginNamespace(mPluginNamespace); return p; } __device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} __device__ float softplus_kernel(float x, float threshold = 20) { if (x > threshold) return x; // too large else if (x < -threshold) return expf(x); // too small return logf(expf(x) + 1); } __global__ void mish_kernel(const float *input, float *output, int num_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; //float t = exp(input[idx]); //if (input[idx] > 20.0) { // t *= t; // output[idx] = (t - 1.0) / (t + 1.0); //} else { // float tt = t * t; // output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0); //} //output[idx] *= input[idx]; output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx])); } void MishPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) { int block_size = thread_count_; int grid_size = (input_size_ * batchSize + block_size - 1) / block_size; hipLaunchKernelGGL(( mish_kernel), dim3(grid_size), dim3(block_size), 0, 0, inputs[0], output, input_size_ * batchSize); } int MishPlugin::enqueue(int batchSize, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept { //assert(batchSize == 1); //GPU //CUDA_CHECK(hipStreamSynchronize(stream)); forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize); return 0; } int MishPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) noexcept { return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream); } PluginFieldCollection MishPluginCreator::mFC{}; std::vector<PluginField> MishPluginCreator::mPluginAttributes; MishPluginCreator::MishPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* MishPluginCreator::getPluginName() const noexcept { return "Mish_TRT"; } const char* MishPluginCreator::getPluginVersion() const noexcept { return "1"; } const PluginFieldCollection* MishPluginCreator::getFieldNames()noexcept { return &mFC; } IPluginV2* MishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)noexcept { MishPlugin* obj = new MishPlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2* MishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)noexcept { // This object will be deleted when the network is destroyed, which will // call MishPlugin::destroy() MishPlugin* obj = new MishPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } void MishPluginCreator::setPluginNamespace(const char* libNamespace)noexcept { mNamespace = libNamespace; } const char* MishPluginCreator::getPluginNamespace() const noexcept { return mNamespace.c_str(); } }
0d931b95bac93aa78ac0c34e41cb6ccd1aef713c.cu
#include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include "mish.h" namespace nvinfer1 { MishPlugin::MishPlugin() { } MishPlugin::~MishPlugin() { } // create the plugin at runtime from a byte stream MishPlugin::MishPlugin(const void* data, size_t length) { assert(length == sizeof(input_size_)); input_size_ = *reinterpret_cast<const int*>(data); } void MishPlugin::serialize(void* buffer) const noexcept { *reinterpret_cast<int*>(buffer) = input_size_; } size_t MishPlugin::getSerializationSize() const noexcept { return sizeof(input_size_); } int MishPlugin::initialize()noexcept { return 0; } bool MishPlugin::supportsFormat(DataType type, PluginFormat format) const noexcept { return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } void MishPlugin::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept { } Dims MishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)noexcept { assert(nbInputDims == 1); assert(index == 0); input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2]; // Output dimensions return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } // Set plugin namespace void MishPlugin::setPluginNamespace(const char* pluginNamespace)noexcept { mPluginNamespace = pluginNamespace; } const char* MishPlugin::getPluginNamespace() const noexcept { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType MishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool MishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool MishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept { return false; } void MishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)noexcept { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void MishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)noexcept { } // Detach the plugin object from its execution context. void MishPlugin::detachFromContext()noexcept {} const char* MishPlugin::getPluginType() const noexcept { return "Mish_TRT"; } const char* MishPlugin::getPluginVersion() const noexcept { return "1"; } void MishPlugin::destroy()noexcept { delete this; } // Clone the plugin IPluginV2* MishPlugin::clone() const noexcept { MishPlugin *p = new MishPlugin(); p->input_size_ = input_size_; p->setPluginNamespace(mPluginNamespace); return p; } __device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} __device__ float softplus_kernel(float x, float threshold = 20) { if (x > threshold) return x; // too large else if (x < -threshold) return expf(x); // too small return logf(expf(x) + 1); } __global__ void mish_kernel(const float *input, float *output, int num_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; //float t = exp(input[idx]); //if (input[idx] > 20.0) { // t *= t; // output[idx] = (t - 1.0) / (t + 1.0); //} else { // float tt = t * t; // output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0); //} //output[idx] *= input[idx]; output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx])); } void MishPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) { int block_size = thread_count_; int grid_size = (input_size_ * batchSize + block_size - 1) / block_size; mish_kernel<<<grid_size, block_size>>>(inputs[0], output, input_size_ * batchSize); } int MishPlugin::enqueue(int batchSize, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { //assert(batchSize == 1); //GPU //CUDA_CHECK(cudaStreamSynchronize(stream)); forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize); return 0; } int MishPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) noexcept { return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream); } PluginFieldCollection MishPluginCreator::mFC{}; std::vector<PluginField> MishPluginCreator::mPluginAttributes; MishPluginCreator::MishPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* MishPluginCreator::getPluginName() const noexcept { return "Mish_TRT"; } const char* MishPluginCreator::getPluginVersion() const noexcept { return "1"; } const PluginFieldCollection* MishPluginCreator::getFieldNames()noexcept { return &mFC; } IPluginV2* MishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)noexcept { MishPlugin* obj = new MishPlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2* MishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)noexcept { // This object will be deleted when the network is destroyed, which will // call MishPlugin::destroy() MishPlugin* obj = new MishPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } void MishPluginCreator::setPluginNamespace(const char* libNamespace)noexcept { mNamespace = libNamespace; } const char* MishPluginCreator::getPluginNamespace() const noexcept { return mNamespace.c_str(); } }
781c2155e42dbfc345721cd3495531a5bb27f6af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.> #include<math.h> #include<string.h> #include<iostream> #include<cuda.h> __global__ void sq(int *d,int n) { __shared__ s[n]; int t= threadIdx.x; if(t==0 || t==2) s[t]= d[2*t] * d[2*t+1]; if(t==1) s[t]= 2*(d[2*t]*d[2*t+1]); __syncthreads(); d[2*t]=s[t]; } int main() { int n=6; int ori[6],d[6],ans1; int no,x,y; cout<<"Enter the number"; cin>>no; x=(no/10)*10; y=no%10; ori[0]=ori[2]=ori[4]=x; ori[1]=ori[3]=ori[6]=y; int *d_d; hipMalloc(&d_d,n*sizeof(int)); hipMemcpy(d_d,ori,n*sizeof(int)),hipMemcpyHostToDevice); hipLaunchKernelGGL(( sq), dim3(1),dim3(n/2), 0, 0, d_d,n/2); hipMemcpy(d,d_d,n*sizeof(int)),hipMemcpyDeviceToHost); hipFree(d_d); ans1=d[0]+d[2]+d[4]; cout<<"Ans is"<<ans1; return 0; }
781c2155e42dbfc345721cd3495531a5bb27f6af.cu
#include<stdio.> #include<math.h> #include<string.h> #include<iostream> #include<cuda.h> __global__ void sq(int *d,int n) { __shared__ s[n]; int t= threadIdx.x; if(t==0 || t==2) s[t]= d[2*t] * d[2*t+1]; if(t==1) s[t]= 2*(d[2*t]*d[2*t+1]); __syncthreads(); d[2*t]=s[t]; } int main() { int n=6; int ori[6],d[6],ans1; int no,x,y; cout<<"Enter the number"; cin>>no; x=(no/10)*10; y=no%10; ori[0]=ori[2]=ori[4]=x; ori[1]=ori[3]=ori[6]=y; int *d_d; cudaMalloc(&d_d,n*sizeof(int)); cudaMemcpy(d_d,ori,n*sizeof(int)),cudaMemcpyHostToDevice); sq<<<1,n/2>>>(d_d,n/2); cudaMemcpy(d,d_d,n*sizeof(int)),cudaMemcpyDeviceToHost); cudaFree(d_d); ans1=d[0]+d[2]+d[4]; cout<<"Ans is"<<ans1; return 0; }
a409d8f8aa7c7b2daa2ae072141ee13bfa45d82e.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 4, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
a409d8f8aa7c7b2daa2ae072141ee13bfa45d82e.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 4, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
16d225782fff50e9db7ea6cd343adf748140e021.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "src/cuda/conv_bias/ptx_helper.cuh" #include "src/cuda/integer_subbyte_utils.cuh" #include "src/cuda/query_blocksize.cuh" using namespace megdnn; using namespace cuda; using namespace ptx; namespace { template <uint32_t size_bits, uint32_t interleaved> __device__ __forceinline__ void reorder_imma_filter_func( int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, uint32_t lane) { static constexpr uint32_t elements_per_lane = 128 / size_bits; uint32_t elements = lane * elements_per_lane; uint32_t row = elements / (IC * FH * FW); uint32_t col = elements - row * IC * FH * FW; uint32_t sec = row / 4; uint32_t res = col & (interleaved - 1); uint32_t sec_sec = row & 3; uint32_t sec_res = (row & 15) / 4; uint32_t crosswise_offset = ((sec_sec >> 1) * 2 * interleaved) + (((sec_sec & 1) ^ (sec_res >> 1)) * interleaved); uint32_t residue_offset = ((res / elements_per_lane) ^ (sec_res & 1)) * elements_per_lane; uint32_t dst_offset = (sec / 2) * 8 * FH * FW * IC + (col / interleaved) * (8 * interleaved) + (sec & 1) * (4 * interleaved) + crosswise_offset + residue_offset; static constexpr uint32_t instruction_shape_col = 8; // 4 threads per Quad static constexpr uint32_t elements_per_thread = instruction_shape_col / 4; // 4 threads per Quad static constexpr uint32_t reordered_elements_per_thread = interleaved / 4; uint32_t elem_in_interleaved = row % interleaved; uint32_t elem_in_interleaved_pack = elem_in_interleaved / elements_per_thread; int elem_new = (row / interleaved * interleaved + elem_in_interleaved_pack % 4 * reordered_elements_per_thread + elem_in_interleaved_pack / 4 * elements_per_thread + elem_in_interleaved % elements_per_thread) * (IC * FH * FW) + col; *(reinterpret_cast<int4*>(dst + (dst_offset * size_bits / 8))) = *(reinterpret_cast<const int4*>(src + (elem_new * size_bits / 8))); } template <uint32_t interleaved> __device__ __forceinline__ void reorder_imma_bias_func( float* __restrict__ dst, float src_value, uint32_t OC, uint32_t lane) { dst[lane] = src_value; } template <uint32_t size_bits, uint32_t interleaved> __global__ void reorder_imma_filter_bias_kernel( int8_t* __restrict__ dst_filter, float* __restrict__ dst_bias, const int8_t* __restrict__ src_filter, const int32_t* __restrict__ src_bias, float bias_scale, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW) { static constexpr uint32_t elements_per_lane = 128 / size_bits; const uint32_t size1 = OC * IC * FH * FW / elements_per_lane; const uint32_t size2 = OC; uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x; if (lane < size1) { reorder_imma_filter_func<size_bits, interleaved>( dst_filter, src_filter, OC, IC, FH, FW, lane); } else if (lane < size1 + size2) { lane = lane - size1; float src_bias_value = src_bias[lane] * bias_scale; reorder_imma_bias_func<interleaved>(dst_bias, src_bias_value, OC, lane); } } template <uint32_t size_bits, uint32_t interleaved> __global__ void reorder_imma_filter_bias_fusion_zero_point_kernel( int8_t* __restrict__ dst_filter, float* __restrict__ dst_bias, const int8_t* __restrict__ src_filter, const int32_t* __restrict__ src_bias, float bias_scale, const int32_t* reduce_filter, float zero_point, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW) { static constexpr uint32_t elements_per_lane = 128 / size_bits; const uint32_t size1 = OC * IC * FH * FW / elements_per_lane; const uint32_t size2 = OC; uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x; if (lane < size1) { reorder_imma_filter_func<size_bits, interleaved>( dst_filter, src_filter, OC, IC, FH, FW, lane); } else if (lane < size1 + size2) { lane = lane - size1; // fusion bias and zero_point // zero_point = zero_point * src_scale * filter_scale float src_bias_value = src_bias[lane] * bias_scale - reduce_filter[lane] * zero_point; reorder_imma_bias_func<interleaved>(dst_bias, src_bias_value, OC, lane); } } } // namespace template <uint32_t size_bits, uint32_t interleaved> void megdnn::cuda::ptx::reorder_imma_filter_bias( int8_t* dst_filter, float* dst_bias, const int8_t* src_filter, const int32_t* src_bias, float bias_scale, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, hipStream_t stream) { static constexpr uint32_t elements_per_lane = 128 / size_bits; uint32_t nr_threads = query_blocksize_for_kernel(reinterpret_cast<const void*>( reorder_imma_filter_bias_kernel<size_bits, interleaved>)); uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane) + OC; nr_threads = ::min(nr_threads, vthreads); uint32_t nr_blocks = DIVUP(vthreads, nr_threads); hipLaunchKernelGGL(( reorder_imma_filter_bias_kernel<size_bits, interleaved>) , dim3(nr_blocks), dim3(nr_threads), 0, stream, dst_filter, dst_bias, src_filter, src_bias, bias_scale, OC, IC, FH, FW); after_kernel_launch(); } template <uint32_t size_bits, uint32_t interleaved> void megdnn::cuda::ptx::reorder_imma_filter_bias_fusion_zero_point( int8_t* dst_filter, float* dst_bias, const int8_t* src_filter, const int32_t* src_bias, float bias_scale, const int32_t* reduce_filter, float zero_point, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, hipStream_t stream) { static constexpr uint32_t elements_per_lane = 128 / size_bits; uint32_t nr_threads = query_blocksize_for_kernel(reinterpret_cast<const void*>( reorder_imma_filter_bias_fusion_zero_point_kernel<size_bits, interleaved>)); uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane) + OC; nr_threads = ::min(nr_threads, vthreads); uint32_t nr_blocks = DIVUP(vthreads, nr_threads); hipLaunchKernelGGL(( reorder_imma_filter_bias_fusion_zero_point_kernel<size_bits, interleaved>) , dim3(nr_blocks), dim3(nr_threads), 0, stream, dst_filter, dst_bias, src_filter, src_bias, bias_scale, reduce_filter, zero_point, OC, IC, FH, FW); after_kernel_launch(); } #define INST(_size_bits, _interleaved) \ template void \ megdnn::cuda::ptx::reorder_imma_filter_bias<_size_bits, _interleaved>( \ int8_t * dst_filter, float* dst_bias, const int8_t* src_filter, \ const int32_t* src_bias, float bias_scale, uint32_t OC, uint32_t IC, \ uint32_t FH, uint32_t FW, hipStream_t stream); INST(8, 32) INST(4, 64) #undef INST #define INST(_size_bits, _interleaved) \ template void megdnn::cuda::ptx::reorder_imma_filter_bias_fusion_zero_point< \ _size_bits, _interleaved>( \ int8_t * dst_filter, float* dst_bias, const int8_t* src_filter, \ const int32_t* src_bias, float bias_scale, const int32_t* reduce_filter, \ float zero_point, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, \ hipStream_t stream); INST(4, 64) #undef INST // vim: syntax=cuda.doxygen
16d225782fff50e9db7ea6cd343adf748140e021.cu
#include "src/cuda/conv_bias/ptx_helper.cuh" #include "src/cuda/integer_subbyte_utils.cuh" #include "src/cuda/query_blocksize.cuh" using namespace megdnn; using namespace cuda; using namespace ptx; namespace { template <uint32_t size_bits, uint32_t interleaved> __device__ __forceinline__ void reorder_imma_filter_func( int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, uint32_t lane) { static constexpr uint32_t elements_per_lane = 128 / size_bits; uint32_t elements = lane * elements_per_lane; uint32_t row = elements / (IC * FH * FW); uint32_t col = elements - row * IC * FH * FW; uint32_t sec = row / 4; uint32_t res = col & (interleaved - 1); uint32_t sec_sec = row & 3; uint32_t sec_res = (row & 15) / 4; uint32_t crosswise_offset = ((sec_sec >> 1) * 2 * interleaved) + (((sec_sec & 1) ^ (sec_res >> 1)) * interleaved); uint32_t residue_offset = ((res / elements_per_lane) ^ (sec_res & 1)) * elements_per_lane; uint32_t dst_offset = (sec / 2) * 8 * FH * FW * IC + (col / interleaved) * (8 * interleaved) + (sec & 1) * (4 * interleaved) + crosswise_offset + residue_offset; static constexpr uint32_t instruction_shape_col = 8; // 4 threads per Quad static constexpr uint32_t elements_per_thread = instruction_shape_col / 4; // 4 threads per Quad static constexpr uint32_t reordered_elements_per_thread = interleaved / 4; uint32_t elem_in_interleaved = row % interleaved; uint32_t elem_in_interleaved_pack = elem_in_interleaved / elements_per_thread; int elem_new = (row / interleaved * interleaved + elem_in_interleaved_pack % 4 * reordered_elements_per_thread + elem_in_interleaved_pack / 4 * elements_per_thread + elem_in_interleaved % elements_per_thread) * (IC * FH * FW) + col; *(reinterpret_cast<int4*>(dst + (dst_offset * size_bits / 8))) = *(reinterpret_cast<const int4*>(src + (elem_new * size_bits / 8))); } template <uint32_t interleaved> __device__ __forceinline__ void reorder_imma_bias_func( float* __restrict__ dst, float src_value, uint32_t OC, uint32_t lane) { dst[lane] = src_value; } template <uint32_t size_bits, uint32_t interleaved> __global__ void reorder_imma_filter_bias_kernel( int8_t* __restrict__ dst_filter, float* __restrict__ dst_bias, const int8_t* __restrict__ src_filter, const int32_t* __restrict__ src_bias, float bias_scale, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW) { static constexpr uint32_t elements_per_lane = 128 / size_bits; const uint32_t size1 = OC * IC * FH * FW / elements_per_lane; const uint32_t size2 = OC; uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x; if (lane < size1) { reorder_imma_filter_func<size_bits, interleaved>( dst_filter, src_filter, OC, IC, FH, FW, lane); } else if (lane < size1 + size2) { lane = lane - size1; float src_bias_value = src_bias[lane] * bias_scale; reorder_imma_bias_func<interleaved>(dst_bias, src_bias_value, OC, lane); } } template <uint32_t size_bits, uint32_t interleaved> __global__ void reorder_imma_filter_bias_fusion_zero_point_kernel( int8_t* __restrict__ dst_filter, float* __restrict__ dst_bias, const int8_t* __restrict__ src_filter, const int32_t* __restrict__ src_bias, float bias_scale, const int32_t* reduce_filter, float zero_point, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW) { static constexpr uint32_t elements_per_lane = 128 / size_bits; const uint32_t size1 = OC * IC * FH * FW / elements_per_lane; const uint32_t size2 = OC; uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x; if (lane < size1) { reorder_imma_filter_func<size_bits, interleaved>( dst_filter, src_filter, OC, IC, FH, FW, lane); } else if (lane < size1 + size2) { lane = lane - size1; // fusion bias and zero_point // zero_point = zero_point * src_scale * filter_scale float src_bias_value = src_bias[lane] * bias_scale - reduce_filter[lane] * zero_point; reorder_imma_bias_func<interleaved>(dst_bias, src_bias_value, OC, lane); } } } // namespace template <uint32_t size_bits, uint32_t interleaved> void megdnn::cuda::ptx::reorder_imma_filter_bias( int8_t* dst_filter, float* dst_bias, const int8_t* src_filter, const int32_t* src_bias, float bias_scale, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, cudaStream_t stream) { static constexpr uint32_t elements_per_lane = 128 / size_bits; uint32_t nr_threads = query_blocksize_for_kernel(reinterpret_cast<const void*>( reorder_imma_filter_bias_kernel<size_bits, interleaved>)); uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane) + OC; nr_threads = std::min(nr_threads, vthreads); uint32_t nr_blocks = DIVUP(vthreads, nr_threads); reorder_imma_filter_bias_kernel<size_bits, interleaved> <<<nr_blocks, nr_threads, 0, stream>>>( dst_filter, dst_bias, src_filter, src_bias, bias_scale, OC, IC, FH, FW); after_kernel_launch(); } template <uint32_t size_bits, uint32_t interleaved> void megdnn::cuda::ptx::reorder_imma_filter_bias_fusion_zero_point( int8_t* dst_filter, float* dst_bias, const int8_t* src_filter, const int32_t* src_bias, float bias_scale, const int32_t* reduce_filter, float zero_point, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, cudaStream_t stream) { static constexpr uint32_t elements_per_lane = 128 / size_bits; uint32_t nr_threads = query_blocksize_for_kernel(reinterpret_cast<const void*>( reorder_imma_filter_bias_fusion_zero_point_kernel<size_bits, interleaved>)); uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane) + OC; nr_threads = std::min(nr_threads, vthreads); uint32_t nr_blocks = DIVUP(vthreads, nr_threads); reorder_imma_filter_bias_fusion_zero_point_kernel<size_bits, interleaved> <<<nr_blocks, nr_threads, 0, stream>>>( dst_filter, dst_bias, src_filter, src_bias, bias_scale, reduce_filter, zero_point, OC, IC, FH, FW); after_kernel_launch(); } #define INST(_size_bits, _interleaved) \ template void \ megdnn::cuda::ptx::reorder_imma_filter_bias<_size_bits, _interleaved>( \ int8_t * dst_filter, float* dst_bias, const int8_t* src_filter, \ const int32_t* src_bias, float bias_scale, uint32_t OC, uint32_t IC, \ uint32_t FH, uint32_t FW, cudaStream_t stream); INST(8, 32) INST(4, 64) #undef INST #define INST(_size_bits, _interleaved) \ template void megdnn::cuda::ptx::reorder_imma_filter_bias_fusion_zero_point< \ _size_bits, _interleaved>( \ int8_t * dst_filter, float* dst_bias, const int8_t* src_filter, \ const int32_t* src_bias, float bias_scale, const int32_t* reduce_filter, \ float zero_point, uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, \ cudaStream_t stream); INST(4, 64) #undef INST // vim: syntax=cuda.doxygen
ab0fa87be452d6bde25cf9f19bd7bbde348fb48f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/relux_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUXForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope, Dtype maximal_value) { CUDA_KERNEL_LOOP(index, n) { out[index] = min(max(Dtype(0), in[index]), maximal_value); // if (in[index] > Dtype(0) && in[index] < maximal_value) { // out[index] = in[index]; // } else if (in[index] <= Dtype(0)) { // out[index] = in[index] * negative_slope; // } else { // out[index] = maximal_value; // } } } template <typename Dtype> void ReLUXLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relux_param().negative_slope(); Dtype maximal_value = this->layer_param_.relux_param().maximal_value(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLUXForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope, maximal_value); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUXBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope, Dtype maximal_value) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0 && in_data[index] < maximal_value) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLUXLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relux_param().negative_slope(); Dtype maximal_value = this->layer_param_.relux_param().maximal_value(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLUXBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope, maximal_value); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLUXLayer); } // namespace caffe
ab0fa87be452d6bde25cf9f19bd7bbde348fb48f.cu
#include <algorithm> #include <vector> #include "caffe/layers/relux_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUXForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope, Dtype maximal_value) { CUDA_KERNEL_LOOP(index, n) { out[index] = min(max(Dtype(0), in[index]), maximal_value); // if (in[index] > Dtype(0) && in[index] < maximal_value) { // out[index] = in[index]; // } else if (in[index] <= Dtype(0)) { // out[index] = in[index] * negative_slope; // } else { // out[index] = maximal_value; // } } } template <typename Dtype> void ReLUXLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relux_param().negative_slope(); Dtype maximal_value = this->layer_param_.relux_param().maximal_value(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUXForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope, maximal_value); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUXBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope, Dtype maximal_value) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0 && in_data[index] < maximal_value) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLUXLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relux_param().negative_slope(); Dtype maximal_value = this->layer_param_.relux_param().maximal_value(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUXBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope, maximal_value); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLUXLayer); } // namespace caffe
houghGPUv1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Imported from https://rosettacode.org/wiki/Example:Hough_transform/C // It will be used as a baseline to observe transformation // Modified and Parallelized with CUDA by Vipin Bakshi and Andre Lo. // DETAILS: time on Car.png is 9109598818 ns #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <pthread.h> #include "cairo.h" #include "apptime.h" #ifndef M_PI #define M_PI 3.1415927 #endif // These are macros to access the R, G and B values // of the input (d) data/ output data (ht) image buffers #define GR(X,Y) (d[(stride)*(Y)+bpp*(X)+((2)%bpp)]) #define GG(X,Y) (d[(stride)*(Y)+bpp*(X)+((1)%bpp)]) #define GB(X,Y) (d[(stride)*(Y)+bpp*(X)+((0)%bpp)]) #define SR(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+2]) #define SG(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+1]) #define SB(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+0]) #define RAD(A) (M_PI*((double)(A))/180.0) #define tw 360 // Kernel // todo: experiment with 3D instead of 1D grid? static int grid; __global__ void computationalkernel(uint8_t *d, uint8_t *ht, int W, int H, int stride, int bpp, int th) { int rho, y, x; int theta = threadIdx.x + blockIdx.x * blockDim.x; // theta is based on grid/ block id for(rho = 0; rho < th; rho++) { double C = cos(RAD(theta)); // todo: call sincos instead? double S = sin(RAD(theta)); uint32_t totalred = 0; uint32_t totalgreen = 0; uint32_t totalblue = 0; uint32_t totalpix = 0; if ( theta < 45 || (theta > 135 && theta < 225) || theta > 315) { for(y = 0; y < H; y++) { double dx = W/2.0 + (rho - (H/2.0-y)*S)/C; if ( dx < 0 || dx >= W ) continue; x = floor(dx+.5); if (x == W) continue; totalpix++; totalred += GR(x, y); totalgreen += GG(x, y); totalblue += GB(x, y); } } else { for(x = 0; x < W; x++) { double dy = H/2.0 - (rho - (x - W/2.0)*C)/S; if ( dy < 0 || dy >= H ) continue; y = floor(dy+.5); if (y == H) continue; totalpix++; totalred += GR(x, y); totalgreen += GG(x, y); totalblue += GB(x, y); } } if ( totalpix > 0 ) { double dp = totalpix; SR(theta, rho) = (int)(totalred/dp) &0xff; SG(theta, rho) = (int)(totalgreen/dp) &0xff; SB(theta, rho) = (int)(totalblue/dp) &0xff; } } } // d is pointer to input data // w, h, s is input data's width, height, and stridge // bpp is bits per pixel of input data uint8_t *houghtransform(uint8_t *h_in, int *w, int *h, int *s, int bpp) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; int W = *w, H = *h; int th = sqrt(W*W + H*H)/2.0; int outputBytes= th*tw*4; // alloc space for output buffer CPU side uint8_t *h_ht = (uint8_t *)malloc(outputBytes); // alloc space for output buffer device side uint8_t *d_out; err = hipMalloc((void **)&d_out, outputBytes); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate %d bytes for d_out (error code %s)!\n", outputBytes, hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemset((void *)d_out, 0, outputBytes); // black bg if (err != hipSuccess) { fprintf(stderr, "Failed to hipMemset d_out (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("allocated output buffers\n"); // alloc space and init input buffer device side uint8_t *d_in; err = hipMalloc((void **)&d_in, (*s * *h)); // bytes = stride * height if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_in (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_in, h_in, (*s * *h), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy d_in from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("allocated input buffers\n"); // todo: play with grid, block dimensions // right now this spawns 360 total kernels, for 360 values of theta hipLaunchKernelGGL(( computationalkernel) , dim3(grid), dim3((360/ grid)), 0, 0, d_in, d_out, W, H, *s, bpp, th); hipDeviceSynchronize(); // wait for all GPU threads to complete printf("hipDeviceSynchronize done\n"); // Copy resulting output from device hipMemcpy(h_ht, d_out, outputBytes, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy d_out from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("copy result back to host done\n"); // Clean up err = hipFree(d_in); if (err != hipSuccess) { fprintf(stderr, "Failed to free d_in (error code %s)!\n", hipGetErrorString(err)); } err = hipFree(d_out); if (err != hipSuccess) { fprintf(stderr, "Failed to free d_out (error code %s)!\n", hipGetErrorString(err)); } // h, w, and s are returned as the height, width, stride of the output image // ht is the buffer containing the transformed output image *h = th; // sqrt(W*W+H*H)/2 *w = tw; // 360 *s = 4*tw; // 4 because 4 bytes per pixel output format return h_ht; } int main(int argc, char **argv) { cairo_surface_t *inputimg = NULL; cairo_surface_t *houghimg = NULL; uint8_t *houghdata = NULL, *inputdata = NULL; int w, h, s, bpp, format; uint64_t measurement_time = 0; #if (CAIRO_HAS_PNG_FUNCTIONS==1) printf("cairo supports PNG\n"); #else printf("cairo does not support PNG\n"); #endif if ( argc < 3 ) return EXIT_FAILURE; printf("input file: %s\n", argv[1]); printf("output file: %s\n", argv[2]); //todo: take in argv[3] as grid size? grid = 12; // must be a factor of 360 (we calculate using theta for every degree of 360 degs) apptime_print_res(); // Lets measure initialization time. apptime_start_session(&measurement_time); printf("Initialization...\n"); inputimg = cairo_image_surface_create_from_png(argv[1]); printf("After create from png: %s\n", cairo_status_to_string(cairo_surface_status(inputimg))); w = cairo_image_surface_get_width(inputimg); h = cairo_image_surface_get_height(inputimg); s = cairo_image_surface_get_stride(inputimg); format = cairo_image_surface_get_format(inputimg); switch(format) { case CAIRO_FORMAT_ARGB32: bpp = 4; break; case CAIRO_FORMAT_RGB24: bpp = 3; break; case CAIRO_FORMAT_A8: bpp = 1; break; default: fprintf(stderr, "unsupported %i\n", format); goto destroy; } inputdata = cairo_image_surface_get_data(inputimg); measurement_time = apptime_stop_session(&measurement_time); printf("Initialization Completed. Time: %lld ns\n", measurement_time); printf("input buffer width %d, height %d, stride %d, bpp %d\n", w, h, s, bpp); // Now lets measure the Hough Time. printf("Hough Transform using CUDA started...\n"); apptime_start_session(&measurement_time); houghdata = houghtransform(inputdata, &w, &h, &s, bpp); measurement_time = apptime_stop_session(&measurement_time); printf("Hought transform completed. Time: %llu ns\n", measurement_time); printf("w=%d, h=%d\n", w, h); houghimg = cairo_image_surface_create_for_data(houghdata, CAIRO_FORMAT_RGB24, w, h, s); cairo_surface_write_to_png(houghimg, argv[2]); destroy: if (inputimg != NULL) cairo_surface_destroy(inputimg); if (houghimg != NULL) cairo_surface_destroy(houghimg); return EXIT_SUCCESS; }
houghGPUv1.cu
// Imported from https://rosettacode.org/wiki/Example:Hough_transform/C // It will be used as a baseline to observe transformation // Modified and Parallelized with CUDA by Vipin Bakshi and Andre Lo. // DETAILS: time on Car.png is 9109598818 ns #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <pthread.h> #include "cairo.h" #include "apptime.h" #ifndef M_PI #define M_PI 3.1415927 #endif // These are macros to access the R, G and B values // of the input (d) data/ output data (ht) image buffers #define GR(X,Y) (d[(stride)*(Y)+bpp*(X)+((2)%bpp)]) #define GG(X,Y) (d[(stride)*(Y)+bpp*(X)+((1)%bpp)]) #define GB(X,Y) (d[(stride)*(Y)+bpp*(X)+((0)%bpp)]) #define SR(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+2]) #define SG(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+1]) #define SB(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+0]) #define RAD(A) (M_PI*((double)(A))/180.0) #define tw 360 // Kernel // todo: experiment with 3D instead of 1D grid? static int grid; __global__ void computationalkernel(uint8_t *d, uint8_t *ht, int W, int H, int stride, int bpp, int th) { int rho, y, x; int theta = threadIdx.x + blockIdx.x * blockDim.x; // theta is based on grid/ block id for(rho = 0; rho < th; rho++) { double C = cos(RAD(theta)); // todo: call sincos instead? double S = sin(RAD(theta)); uint32_t totalred = 0; uint32_t totalgreen = 0; uint32_t totalblue = 0; uint32_t totalpix = 0; if ( theta < 45 || (theta > 135 && theta < 225) || theta > 315) { for(y = 0; y < H; y++) { double dx = W/2.0 + (rho - (H/2.0-y)*S)/C; if ( dx < 0 || dx >= W ) continue; x = floor(dx+.5); if (x == W) continue; totalpix++; totalred += GR(x, y); totalgreen += GG(x, y); totalblue += GB(x, y); } } else { for(x = 0; x < W; x++) { double dy = H/2.0 - (rho - (x - W/2.0)*C)/S; if ( dy < 0 || dy >= H ) continue; y = floor(dy+.5); if (y == H) continue; totalpix++; totalred += GR(x, y); totalgreen += GG(x, y); totalblue += GB(x, y); } } if ( totalpix > 0 ) { double dp = totalpix; SR(theta, rho) = (int)(totalred/dp) &0xff; SG(theta, rho) = (int)(totalgreen/dp) &0xff; SB(theta, rho) = (int)(totalblue/dp) &0xff; } } } // d is pointer to input data // w, h, s is input data's width, height, and stridge // bpp is bits per pixel of input data uint8_t *houghtransform(uint8_t *h_in, int *w, int *h, int *s, int bpp) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int W = *w, H = *h; int th = sqrt(W*W + H*H)/2.0; int outputBytes= th*tw*4; // alloc space for output buffer CPU side uint8_t *h_ht = (uint8_t *)malloc(outputBytes); // alloc space for output buffer device side uint8_t *d_out; err = cudaMalloc((void **)&d_out, outputBytes); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate %d bytes for d_out (error code %s)!\n", outputBytes, cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemset((void *)d_out, 0, outputBytes); // black bg if (err != cudaSuccess) { fprintf(stderr, "Failed to cudaMemset d_out (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("allocated output buffers\n"); // alloc space and init input buffer device side uint8_t *d_in; err = cudaMalloc((void **)&d_in, (*s * *h)); // bytes = stride * height if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_in (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_in, h_in, (*s * *h), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy d_in from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("allocated input buffers\n"); // todo: play with grid, block dimensions // right now this spawns 360 total kernels, for 360 values of theta computationalkernel <<<grid, (360/ grid)>>> (d_in, d_out, W, H, *s, bpp, th); cudaThreadSynchronize(); // wait for all GPU threads to complete printf("cudaThreadSynchronize done\n"); // Copy resulting output from device cudaMemcpy(h_ht, d_out, outputBytes, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy d_out from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("copy result back to host done\n"); // Clean up err = cudaFree(d_in); if (err != cudaSuccess) { fprintf(stderr, "Failed to free d_in (error code %s)!\n", cudaGetErrorString(err)); } err = cudaFree(d_out); if (err != cudaSuccess) { fprintf(stderr, "Failed to free d_out (error code %s)!\n", cudaGetErrorString(err)); } // h, w, and s are returned as the height, width, stride of the output image // ht is the buffer containing the transformed output image *h = th; // sqrt(W*W+H*H)/2 *w = tw; // 360 *s = 4*tw; // 4 because 4 bytes per pixel output format return h_ht; } int main(int argc, char **argv) { cairo_surface_t *inputimg = NULL; cairo_surface_t *houghimg = NULL; uint8_t *houghdata = NULL, *inputdata = NULL; int w, h, s, bpp, format; uint64_t measurement_time = 0; #if (CAIRO_HAS_PNG_FUNCTIONS==1) printf("cairo supports PNG\n"); #else printf("cairo does not support PNG\n"); #endif if ( argc < 3 ) return EXIT_FAILURE; printf("input file: %s\n", argv[1]); printf("output file: %s\n", argv[2]); //todo: take in argv[3] as grid size? grid = 12; // must be a factor of 360 (we calculate using theta for every degree of 360 degs) apptime_print_res(); // Lets measure initialization time. apptime_start_session(&measurement_time); printf("Initialization...\n"); inputimg = cairo_image_surface_create_from_png(argv[1]); printf("After create from png: %s\n", cairo_status_to_string(cairo_surface_status(inputimg))); w = cairo_image_surface_get_width(inputimg); h = cairo_image_surface_get_height(inputimg); s = cairo_image_surface_get_stride(inputimg); format = cairo_image_surface_get_format(inputimg); switch(format) { case CAIRO_FORMAT_ARGB32: bpp = 4; break; case CAIRO_FORMAT_RGB24: bpp = 3; break; case CAIRO_FORMAT_A8: bpp = 1; break; default: fprintf(stderr, "unsupported %i\n", format); goto destroy; } inputdata = cairo_image_surface_get_data(inputimg); measurement_time = apptime_stop_session(&measurement_time); printf("Initialization Completed. Time: %lld ns\n", measurement_time); printf("input buffer width %d, height %d, stride %d, bpp %d\n", w, h, s, bpp); // Now lets measure the Hough Time. printf("Hough Transform using CUDA started...\n"); apptime_start_session(&measurement_time); houghdata = houghtransform(inputdata, &w, &h, &s, bpp); measurement_time = apptime_stop_session(&measurement_time); printf("Hought transform completed. Time: %llu ns\n", measurement_time); printf("w=%d, h=%d\n", w, h); houghimg = cairo_image_surface_create_for_data(houghdata, CAIRO_FORMAT_RGB24, w, h, s); cairo_surface_write_to_png(houghimg, argv[2]); destroy: if (inputimg != NULL) cairo_surface_destroy(inputimg); if (houghimg != NULL) cairo_surface_destroy(houghimg); return EXIT_SUCCESS; }
47d30ca68ecc136e070dfc3d33b0897388592a76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author:Arman Pazouki, Milad Rakhsha // ============================================================================= // This file contains miscellaneous macros and utilities used in the SPH code. // **************************************************************************** #ifndef CH_SPH_GENERAL_CU #define CH_SPH_GENERAL_CU // ---------------------------------------------------------------------------- // CUDA headers // ---------------------------------------------------------------------------- #include "chrono_fsi/physics/ChSphGeneral.cuh" namespace chrono { namespace fsi { void CopyParams_NumberOfObjects(std::shared_ptr<SimParams> paramsH, std::shared_ptr<NumberOfObjects> numObjectsH) { hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); hipDeviceSynchronize(); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calc_A_tensor(Real* A_tensor, Real* G_tensor, Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* sumWij_inv, uint* csrColInd, uint* numContacts, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } // Remember : we want to solve 6x6 system Bi*l=-[1 0 0 1 0 1]' // elements of matrix B depends on tensor A uint csrStartIdx = numContacts[i_idx]; uint csrEndIdx = numContacts[i_idx + 1]; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real A_ijk[27] = {0.0}; Real Gi[9] = {0.0}; for (int i = 0; i < 9; i++) Gi[i] = G_tensor[i_idx * 9 + i]; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; Real com_part = 0; com_part = (Gi[0] * grad_ij.x + Gi[1] * grad_ij.y + Gi[2] * grad_ij.z) * V_j; A_ijk[0] += rij.x * rij.x * com_part; // 111 A_ijk[1] += rij.x * rij.y * com_part; // 112 A_ijk[2] += rij.x * rij.z * com_part; // 113 A_ijk[3] += rij.y * rij.x * com_part; // 121 A_ijk[4] += rij.y * rij.y * com_part; // 122 A_ijk[5] += rij.y * rij.z * com_part; // 123 A_ijk[6] += rij.z * rij.x * com_part; // 131 A_ijk[7] += rij.z * rij.y * com_part; // 132 A_ijk[8] += rij.z * rij.z * com_part; // 133 com_part = (Gi[3] * grad_ij.x + Gi[4] * grad_ij.y + Gi[5] * grad_ij.z) * V_j; A_ijk[9] += rij.x * rij.x * com_part; // 211 A_ijk[10] += rij.x * rij.y * com_part; // 212 A_ijk[11] += rij.x * rij.z * com_part; // 213 A_ijk[12] += rij.y * rij.x * com_part; // 221 A_ijk[13] += rij.y * rij.y * com_part; // 222 A_ijk[14] += rij.y * rij.z * com_part; // 223 A_ijk[15] += rij.z * rij.x * com_part; // 231 A_ijk[16] += rij.z * rij.y * com_part; // 232 A_ijk[17] += rij.z * rij.z * com_part; // 233 com_part = (Gi[6] * grad_ij.x + Gi[7] * grad_ij.y + Gi[8] * grad_ij.z) * V_j; A_ijk[18] += rij.x * rij.x * com_part; // 311 A_ijk[19] += rij.x * rij.y * com_part; // 312 A_ijk[20] += rij.x * rij.z * com_part; // 313 A_ijk[21] += rij.y * rij.x * com_part; // 321 A_ijk[22] += rij.y * rij.y * com_part; // 322 A_ijk[23] += rij.y * rij.z * com_part; // 323 A_ijk[24] += rij.z * rij.x * com_part; // 331 A_ijk[25] += rij.z * rij.y * com_part; // 332 A_ijk[26] += rij.z * rij.z * com_part; // 333 } for (int i = 0; i < 27; i++) A_tensor[i_idx * 9 + i] = A_ijk[i]; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calc_L_tensor(Real* A_tensor, Real* L_tensor, Real* G_tensor, Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* sumWij_inv, uint* csrColInd, uint* numContacts, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w != -1) { return; } // Remember : we want to solve 6x6 system Bi*l=-[1 0 0 1 0 1]' // elements of matrix B depends on tensor A uint csrStartIdx = numContacts[i_idx]; uint csrEndIdx = numContacts[i_idx + 1]; // - paramsD.Pressure_Constraint; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real B[36] = {0.0}; // Real Gi[9] = {0.0}; // for (int i = 0; i < 9; i++) // Gi[i] = G_tensor[i_idx * 9 + i]; Real A_ijk[27] = {0.0}; for (int i = 0; i < 27; i++) A_ijk[i] = A_tensor[i_idx * 27 + i]; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; Real com_part = 0; // mn=11 Real XX = (eij.x * grad_ij.x); Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x); Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x); Real YY = (eij.y * grad_ij.y); Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y); Real ZZ = (eij.z * grad_ij.z); com_part = (A_ijk[0] * eij.x + A_ijk[9] * eij.y + A_ijk[18] * eij.z + rij.x * eij.x) * V_j; B[6 * 0 + 0] += com_part * XX; // 11 B[6 * 0 + 1] += com_part * XY; // 12 B[6 * 0 + 2] += com_part * XZ; // 13 B[6 * 0 + 3] += com_part * YY; // 14 B[6 * 0 + 4] += com_part * YZ; // 15 B[6 * 0 + 5] += com_part * ZZ; // 15 // mn=12 com_part = (A_ijk[1] * eij.x + A_ijk[10] * eij.y + A_ijk[19] * eij.z + rij.x * eij.y) * V_j; B[6 * 1 + 0] += com_part * XX; // 21 B[6 * 1 + 1] += com_part * XY; // 22 B[6 * 1 + 2] += com_part * XZ; // 23 B[6 * 1 + 3] += com_part * YY; // 24 B[6 * 1 + 4] += com_part * YZ; // 25 B[6 * 1 + 5] += com_part * ZZ; // 25 // mn=13 com_part = (A_ijk[2] * eij.x + A_ijk[11] * eij.y + A_ijk[20] * eij.z + rij.x * eij.z) * V_j; B[6 * 2 + 0] += com_part * XX; // 31 B[6 * 2 + 1] += com_part * XY; // 32 B[6 * 2 + 2] += com_part * XZ; // 33 B[6 * 2 + 3] += com_part * YY; // 34 B[6 * 2 + 4] += com_part * YZ; // 35 B[6 * 2 + 5] += com_part * ZZ; // 36 // Note that we skip mn=21 since it is similar to mn=12 // mn=22 com_part = (A_ijk[4] * eij.x + A_ijk[13] * eij.y + A_ijk[22] * eij.z + rij.y * eij.y) * V_j; B[6 * 3 + 0] += com_part * XX; // 41 B[6 * 3 + 1] += com_part * XY; // 42 B[6 * 3 + 2] += com_part * XZ; // 43 B[6 * 3 + 3] += com_part * YY; // 44 B[6 * 3 + 4] += com_part * YZ; // 45 B[6 * 3 + 5] += com_part * ZZ; // 46 // mn=23 com_part = (A_ijk[5] * eij.x + A_ijk[14] * eij.y + A_ijk[23] * eij.z + rij.y * eij.z) * V_j; B[6 * 4 + 0] += com_part * XX; // 51 B[6 * 4 + 1] += com_part * XY; // 52 B[6 * 4 + 2] += com_part * XZ; // 53 B[6 * 4 + 3] += com_part * YY; // 54 B[6 * 4 + 4] += com_part * YZ; // 55 B[6 * 4 + 5] += com_part * ZZ; // 56 // mn=33 com_part = (A_ijk[8] * eij.x + A_ijk[17] * eij.y + A_ijk[26] * eij.z + rij.z * eij.z) * V_j; B[6 * 5 + 0] += com_part * XX; // 61 B[6 * 5 + 1] += com_part * XY; // 62 B[6 * 5 + 2] += com_part * XZ; // 63 B[6 * 5 + 3] += com_part * YY; // 64 B[6 * 5 + 4] += com_part * YZ; // 65 B[6 * 5 + 5] += com_part * ZZ; // 66 } inv6xdelta_mn(B, &L_tensor[6 * i_idx]); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcRho_kernel(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* sumWij_inv, uint* cellStart, uint* cellEnd, uint* mynumContact, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w == -2) { mynumContact[i_idx] = 1; return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real m_i = cube((h_i * paramsD.MULT_INITSPACE)) * paramsD.rho0; Real sum_mW = 0; Real sum_W = 0.0; uint mcon = 1; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; if (i_idx != j) mcon++; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real W3 = W3h(d, 0.5 * (h_j + h_i)); sum_mW += m_j * W3; sum_W += W3; } } } mynumContact[i_idx] = mcon; // Adding neighbor contribution is done! sumWij_inv[i_idx] = m_i / sum_mW; sortedRhoPreMu[i_idx].x = sum_mW; if ((sortedRhoPreMu[i_idx].x > 2 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w == -1) printf("(calcRho_kernel)too large/small density marker %d, rho=%f, sum_W=%f, m_i=%f\n", i_idx, sortedRhoPreMu[i_idx].x, sum_W, m_i); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcNormalizedRho_kernel(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* sumWij_inv, Real* G_i, Real3* normals, Real* Color, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } // Real3 gravity = paramsD.gravity; Real RHO_0 = paramsD.rho0; // Real IncompressibilityFactor = paramsD.IncompressibilityFactor; // dxi_over_Vi[i_idx] = 1e10; if (sortedRhoPreMu[i_idx].w == -2) return; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; // Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real sum_mW = 0; Real sum_Wij_inv = 0; Real C = 0; // get address in grid int3 gridPos = calcGridPos(posRadA); // This is the elements of inverse of G Real mGi[9] = {0.0}; Real theta_i = sortedRhoPreMu[i_idx].w + 1; if (theta_i > 1) theta_i = 1; Real3 mynormals = mR3(0.0); // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real3 dv3 = Distance(sortedVelMas[i_idx], sortedVelMas[j]); Real d = length(dist3); Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * 1) * paramsD.rho0; C += m_j * Color[i_idx] / sortedRhoPreMu[i_idx].x * W3h(d, 0.5 * (h_j + h_i)); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; Real V_j = sumWij_inv[j]; Real h_ij = 0.5 * (h_j + h_i); Real W3 = W3h(d, h_ij); Real3 grad_i_wij = GradWh(dist3, h_ij); Real theta_j = sortedRhoPreMu[j].w + 1; if (theta_j > 1) theta_j = 1; if (sortedRhoPreMu[i_idx].w == -3 && sortedRhoPreMu[j].w == -3) mynormals += grad_i_wij * V_j; if (sortedRhoPreMu[i_idx].w != -3) mynormals += (theta_j - theta_i) * grad_i_wij * V_j; mGi[0] -= dist3.x * grad_i_wij.x * V_j; mGi[1] -= dist3.x * grad_i_wij.y * V_j; mGi[2] -= dist3.x * grad_i_wij.z * V_j; mGi[3] -= dist3.y * grad_i_wij.x * V_j; mGi[4] -= dist3.y * grad_i_wij.y * V_j; mGi[5] -= dist3.y * grad_i_wij.z * V_j; mGi[6] -= dist3.z * grad_i_wij.x * V_j; mGi[7] -= dist3.z * grad_i_wij.y * V_j; mGi[8] -= dist3.z * grad_i_wij.z * V_j; sum_mW += m_j * W3; sum_Wij_inv += sumWij_inv[j] * W3; } } } normals[i_idx] = mynormals; if (length(mynormals) > EPSILON) normals[i_idx] = mynormals / length(mynormals); Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); G_i[i_idx * 9 + 0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) / Det; G_i[i_idx * 9 + 1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) / Det; G_i[i_idx * 9 + 2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) / Det; G_i[i_idx * 9 + 3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) / Det; G_i[i_idx * 9 + 4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) / Det; G_i[i_idx * 9 + 5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) / Det; G_i[i_idx * 9 + 6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) / Det; G_i[i_idx * 9 + 7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) / Det; G_i[i_idx * 9 + 8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) / Det; // if (sortedRhoPreMu[i_idx].x > RHO_0) // IncompressibilityFactor = 1; // sortedRhoPreMu[i_idx].x = (sum_mW / sum_W_sumWij_inv - RHO_0) * IncompressibilityFactor + RHO_0; // sortedRhoPreMu[i_idx].x = (sum_mW - RHO_0) * IncompressibilityFactor + RHO_0; sortedRhoPreMu[i_idx].x = sum_mW / sum_Wij_inv; if ((sortedRhoPreMu[i_idx].x > 5 * RHO_0 || sortedRhoPreMu[i_idx].x < RHO_0 / 5) && sortedRhoPreMu[i_idx].w == -1) printf( "calcNormalizedRho_kernel-- sortedRhoPreMu[i_idx].w=%f, h=%f, sum_mW=%f, " "sum_W_sumWij_inv=%.4e, sortedRhoPreMu[i_idx].x=%.4e\n", sortedRhoPreMu[i_idx].w, sortedPosRad[i_idx].w, sum_mW, sum_Wij_inv, sortedRhoPreMu[i_idx].x); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcNormalizedRho_Gi_fillInMatrixIndices(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* sumWij_inv, Real* G_i, Real3* normals, uint* csrColInd, uint* numContacts, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real RHO_0 = paramsD.rho0; uint csrStartIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real sum_mW = 0; Real sum_mW_rho = 0; Real sum_W_sumWij_inv = 0; // get address in grid int3 gridPos = calcGridPos(posRadA); csrColInd[csrStartIdx - 1] = i_idx; uint nextCol = csrStartIdx; if (sortedRhoPreMu[i_idx].w == -2) return; Real theta_i = sortedRhoPreMu[i_idx].w + 1; if (theta_i > 1) theta_i = 1; Real3 mynormals = mR3(0.0); // This is the elements of inverse of G Real mGi[9] = {0.0}; // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real3 dv3 = Distance(sortedVelMas[i_idx], sortedVelMas[j]); Real d = length(rij); Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real W3 = W3h(d, h_ij); Real3 grad_i_wij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; if (i_idx != j) { csrColInd[nextCol] = j; nextCol++; } Real theta_j = sortedRhoPreMu[j].w + 1; if (theta_j > 1) theta_j = 1; if (sortedRhoPreMu[i_idx].w == -3 && sortedRhoPreMu[j].w == -3) mynormals += grad_i_wij * V_j; if (sortedRhoPreMu[i_idx].w != -3) mynormals += (theta_j - theta_i) * grad_i_wij * V_j; mGi[0] -= rij.x * grad_i_wij.x * V_j; mGi[1] -= rij.x * grad_i_wij.y * V_j; mGi[2] -= rij.x * grad_i_wij.z * V_j; mGi[3] -= rij.y * grad_i_wij.x * V_j; mGi[4] -= rij.y * grad_i_wij.y * V_j; mGi[5] -= rij.y * grad_i_wij.z * V_j; mGi[6] -= rij.z * grad_i_wij.x * V_j; mGi[7] -= rij.z * grad_i_wij.y * V_j; mGi[8] -= rij.z * grad_i_wij.z * V_j; sum_mW += m_j * W3; // sum_mW += sortedRhoPreMu[j].x * W3; // sum_mW += m_j * sumWij_inv[j]; // sum_mW += sortedRhoPreMu[j].x * W3 * V_j; sum_mW_rho += W3 * m_j / sortedRhoPreMu[j].x; // sum_W_sumWij_inv += W3 * sumWij_inv[j]; } } } normals[i_idx] = mynormals; if (length(mynormals) > EPSILON) normals[i_idx] = mynormals / length(mynormals); if (sortedRhoPreMu[i_idx].w == -3) normals[i_idx] *= -1; Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); if (abs(Det) < EPSILON && sortedRhoPreMu[i_idx].w != -3) { for (int i = 0; i < 9; i++) { G_i[i_idx * 9 + i] = 0.0; G_i[i_idx * 9 + 0] = 1; G_i[i_idx * 9 + 4] = 1; G_i[i_idx * 9 + 8] = 1; } } else { G_i[i_idx * 9 + 0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) / Det; G_i[i_idx * 9 + 1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) / Det; G_i[i_idx * 9 + 2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) / Det; G_i[i_idx * 9 + 3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) / Det; G_i[i_idx * 9 + 4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) / Det; G_i[i_idx * 9 + 5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) / Det; G_i[i_idx * 9 + 6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) / Det; G_i[i_idx * 9 + 7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) / Det; G_i[i_idx * 9 + 8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) / Det; } // sortedRhoPreMu[i_idx].x = sum_mW / sum_mW_rho; // sortedRhoPreMu[i_idx].x = sum_mW / sum_W; // sortedRhoPreMu[i_idx].x = sum_mW; if ((sortedRhoPreMu[i_idx].x > 5 * RHO_0 || sortedRhoPreMu[i_idx].x < RHO_0 / 5) && sortedRhoPreMu[i_idx].w > -2) printf( "calcNormalizedRho_kernel-- sortedRhoPreMu[i_idx].w=%f, h=%f, sum_mW=%f, " "sum_W_sumWij_inv=%.4e, sortedRhoPreMu[i_idx].x=%.4e\n", sortedRhoPreMu[i_idx].w, sortedPosRad[i_idx].w, sum_mW, sum_W_sumWij_inv, sortedRhoPreMu[i_idx].x); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Function_Gradient_Laplacian_Operator(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* sumWij_inv, Real* G_tensor, Real* L_tensor, Real* A_L, // Laplacian Operator matrix Real3* A_G, // Gradient Operator matrix Real* A_f, // Function Operator matrix // A_L, A_G are in system level; // A_G* p gives gradp, A_L*p gives Delta^2p uint* csrColInd, uint* numContacts, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) return; if (sortedRhoPreMu[i_idx].w <= -2) return; // Real RHO_0 = paramsD.rho0; uint csrStartIdx = numContacts[i_idx]; uint csrEndIdx = numContacts[i_idx + 1]; //- paramsD.Pressure_Constraint; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; // get address in grid int3 gridPos = calcGridPos(posRadA); // This is the elements of inverse of G Real mGi[9] = {0.0}; Real Li[6] = {0.0}; Real3 LaplacainVi = mR3(0.0); Real NormGi = 0; Real NormLi = 0; for (int i = 0; i < 9; i++) { mGi[i] = G_tensor[i_idx * 9 + i]; NormGi += abs(mGi[i]); } for (int i = 0; i < 6; i++) { Li[i] = L_tensor[i_idx * 6 + i]; NormLi += abs(Li[i]); } Real V_i = sumWij_inv[i_idx]; Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real rhoi = sortedRhoPreMu[i_idx].x; for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real W3 = 0.5 * (W3h(d, h_i) + W3h(d, h_j)); Real3 grad_i_wij = 0.5 * (GradWh(rij, h_i) + GradWh(rij, h_j)); Real V_j = sumWij_inv[j]; A_f[count] = V_j * W3; if (paramsD.Conservative_Form) { if (paramsD.gradient_type == 0) { Real Coeff = V_j; A_G[count] = Coeff * grad_i_wij; A_G[csrStartIdx] -= Coeff * grad_i_wij; } else if (paramsD.gradient_type == 1) { Real Coeff = V_j; A_G[count] = Coeff * grad_i_wij; A_G[csrStartIdx] += Coeff * grad_i_wij; } else if (paramsD.gradient_type == 2) { Real3 comm = m_j * rhoi * grad_i_wij; A_G[count] = 1.0 / (sortedRhoPreMu[j].x * sortedRhoPreMu[j].x) * comm; A_G[csrStartIdx] += 1.0 / (rhoi * rhoi) * comm; } else { Real3 comm = 1.0 / V_i * (V_j * V_j + V_i * V_i) / (rhoi + sortedRhoPreMu[j].x) * grad_i_wij; A_G[count] = rhoi * comm; A_G[csrStartIdx] += sortedRhoPreMu[j].x * comm; } } else { Real Coeff = V_j; A_G[count].x = Coeff * (grad_i_wij.x * mGi[0] + grad_i_wij.y * mGi[1] + grad_i_wij.z * mGi[2]); A_G[count].y = Coeff * (grad_i_wij.x * mGi[3] + grad_i_wij.y * mGi[4] + grad_i_wij.z * mGi[5]); A_G[count].z = Coeff * (grad_i_wij.x * mGi[6] + grad_i_wij.y * mGi[7] + grad_i_wij.z * mGi[8]); A_G[csrStartIdx].x -= Coeff * (grad_i_wij.x * mGi[0] + grad_i_wij.y * mGi[1] + grad_i_wij.z * mGi[2]); A_G[csrStartIdx].y -= Coeff * (grad_i_wij.x * mGi[3] + grad_i_wij.y * mGi[4] + grad_i_wij.z * mGi[5]); A_G[csrStartIdx].z -= Coeff * (grad_i_wij.x * mGi[6] + grad_i_wij.y * mGi[7] + grad_i_wij.z * mGi[8]); } } for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real W3 = W3h(d, h_ij); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; if (d < EPSILON) continue; if (paramsD.Conservative_Form) { if (paramsD.laplacian_type == 0) { Real commonterm = 1.0 / V_j * (V_j * V_j + V_i * V_i) * dot(rij, grad_ij); A_L[count] -= commonterm / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); // j A_L[csrStartIdx] += commonterm / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); // i for (int count_in = csrStartIdx; count_in < csrEndIdx; count_in++) { A_L[count_in] -= commonterm * dot(A_G[count_in], eij); // k } } else if (paramsD.laplacian_type == 1) { Real comm = 2.0 / rhoi * m_j * dot(rij, grad_ij) / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); A_L[count] = -comm; // j A_L[csrStartIdx] += comm; // i } else { Real comm = 2.0 / V_i * (V_j * V_j + V_i * V_i) * dot(rij, grad_ij) / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); A_L[count] = -comm; // j A_L[csrStartIdx] += comm; // i } } else { Real commonterm = 1.0 / V_j * (V_j * V_j + V_i * V_i) * (Li[0] * eij.x * grad_ij.x + Li[1] * eij.x * grad_ij.y + Li[2] * eij.x * grad_ij.z + Li[1] * eij.y * grad_ij.x + Li[3] * eij.y * grad_ij.y + Li[4] * eij.y * grad_ij.z + Li[2] * eij.z * grad_ij.x + Li[4] * eij.z * grad_ij.y + Li[5] * eij.z * grad_ij.z); A_L[count] -= commonterm / (d + h_ij * paramsD.epsMinMarkersDis); // j A_L[csrStartIdx] += commonterm / (d + h_ij * paramsD.epsMinMarkersDis); // i for (int count_in = csrStartIdx; count_in < csrEndIdx; count_in++) { A_L[count_in] -= commonterm * dot(A_G[count_in], eij); // k } } if (!(isfinite(A_L[count]))) { printf("Error! A_L ChSPHGeneral.cu !\n"); } } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Jacobi_SOR_Iter(Real4* sortedRhoPreMu, Real* A_Matrix, Real3* V_old, Real3* V_new, Real3* b3vec, Real* q_old, // q=p^(n+1)-p^n Real* q_new, // q=p^(n+1)-p^n Real* b1vec, const uint* csrColInd, const uint* numContacts, size_t numAllMarkers, bool _3dvector, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } uint startIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii uint endIdx = numContacts[i_idx + 1]; //- uint(_3dvector && paramsD.Pressure_Constraint); if (_3dvector) { Real3 aij_vj = mR3(0.0); for (int myIdx = startIdx; myIdx < endIdx; myIdx++) { int j = csrColInd[myIdx]; aij_vj += A_Matrix[myIdx] * V_old[j]; } V_new[i_idx] = (b3vec[i_idx] - aij_vj) / A_Matrix[startIdx - 1]; } else { Real aij_pj = 0.0; for (int myIdx = startIdx; myIdx < endIdx; myIdx++) { aij_pj += A_Matrix[myIdx] * q_old[csrColInd[myIdx]]; } q_new[i_idx] = (b1vec[i_idx] - aij_pj) / A_Matrix[startIdx - 1]; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Update_AND_Calc_Res(Real4* sortedRhoPreMu, Real3* V_old, Real3* V_new, Real* q_old, Real* q_new, Real* Residuals, const size_t numAllMarkers, bool _3dvector, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } // Real omega = _3dvector ? 1.0 : paramsD.PPE_relaxation; Real omega = paramsD.PPE_relaxation; Real res = 0; if (_3dvector) { V_new[i_idx] = (1 - omega) * V_old[i_idx] + omega * V_new[i_idx]; res = length(V_old[i_idx] - V_new[i_idx]); V_old[i_idx] = V_new[i_idx]; } else { q_new[i_idx] = (1 - omega) * q_old[i_idx] + omega * q_new[i_idx]; res = abs(q_old[i_idx] - q_new[i_idx]); q_old[i_idx] = q_new[i_idx]; } Residuals[i_idx] = res; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Initialize_Variables(Real4* sortedRhoPreMu, Real* p_old, Real3* sortedVelMas, Real3* V_new, const size_t numAllMarkers, volatile bool* isErrorD) { const uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } p_old[i_idx] = sortedRhoPreMu[i_idx].y; // This needs consistency p_old is old but v_new is new !! if (sortedRhoPreMu[i_idx].w > -1) { sortedVelMas[i_idx] = V_new[i_idx]; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void UpdateDensity(Real3* vis_vel, Real3* XSPH_Vel, Real3* sortedVelMas, // Write Real4* sortedPosRad, // Read Real4* sortedRhoPreMu, Real* sumWij_inv, uint* cellStart, uint* cellEnd, size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { sortedRhoPreMu[i_idx].x = 0; sortedRhoPreMu[i_idx].y = 0; sortedRhoPreMu[i_idx].z = 0; return; } Real dT = paramsD.dT; Real rho_plus = 0; Real3 Vel_i = sortedVelMas[i_idx]; Real3 posi = mR3(sortedPosRad[i_idx]); if ((sortedRhoPreMu[i_idx].x > 2 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0) printf("(UpdateDensity-0)too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w); Real h_i = sortedPosRad[i_idx].w; int3 gridPos = calcGridPos(posi); Real3 normalizedV_n = mR3(0); Real normalizedV_d = 0.0; Real sumW = 0.0; Real3 xSPH_Sum = mR3(0.); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posi, posj); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w >= 0)) continue; Real3 Vel_j = sortedVelMas[j]; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); rho_plus += m_j * dot((Vel_i - Vel_j), grad_i_wij) * sumWij_inv[j]; Real Wd = W3h(d, h_ij); sumW += Wd; normalizedV_n += Vel_j * Wd * m_j / sortedRhoPreMu[j].x; normalizedV_d += Wd * m_j / sortedRhoPreMu[j].x; if (sortedRhoPreMu[j].w != -1) continue; Real rho_bar = 0.5 * (sortedRhoPreMu[i_idx].x + sortedRhoPreMu[j].x); xSPH_Sum += (Vel_j - Vel_i) * Wd * m_j / rho_bar; } } } } if (abs(sumW) > EPSILON) { vis_vel[i_idx] = normalizedV_n / normalizedV_d; } XSPH_Vel[i_idx] = xSPH_Sum; // sortedRhoPreMu[i_idx].x += rho_plus * dT; if ((sortedRhoPreMu[i_idx].x > 2 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0) printf("(UpdateDensity-1)too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w); } } // namespace fsi } // namespace chrono #endif
47d30ca68ecc136e070dfc3d33b0897388592a76.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author:Arman Pazouki, Milad Rakhsha // ============================================================================= // This file contains miscellaneous macros and utilities used in the SPH code. // **************************************************************************** #ifndef CH_SPH_GENERAL_CU #define CH_SPH_GENERAL_CU // ---------------------------------------------------------------------------- // CUDA headers // ---------------------------------------------------------------------------- #include "chrono_fsi/physics/ChSphGeneral.cuh" namespace chrono { namespace fsi { void CopyParams_NumberOfObjects(std::shared_ptr<SimParams> paramsH, std::shared_ptr<NumberOfObjects> numObjectsH) { cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); cudaDeviceSynchronize(); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calc_A_tensor(Real* A_tensor, Real* G_tensor, Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* sumWij_inv, uint* csrColInd, uint* numContacts, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } // Remember : we want to solve 6x6 system Bi*l=-[1 0 0 1 0 1]' // elements of matrix B depends on tensor A uint csrStartIdx = numContacts[i_idx]; uint csrEndIdx = numContacts[i_idx + 1]; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real A_ijk[27] = {0.0}; Real Gi[9] = {0.0}; for (int i = 0; i < 9; i++) Gi[i] = G_tensor[i_idx * 9 + i]; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; Real com_part = 0; com_part = (Gi[0] * grad_ij.x + Gi[1] * grad_ij.y + Gi[2] * grad_ij.z) * V_j; A_ijk[0] += rij.x * rij.x * com_part; // 111 A_ijk[1] += rij.x * rij.y * com_part; // 112 A_ijk[2] += rij.x * rij.z * com_part; // 113 A_ijk[3] += rij.y * rij.x * com_part; // 121 A_ijk[4] += rij.y * rij.y * com_part; // 122 A_ijk[5] += rij.y * rij.z * com_part; // 123 A_ijk[6] += rij.z * rij.x * com_part; // 131 A_ijk[7] += rij.z * rij.y * com_part; // 132 A_ijk[8] += rij.z * rij.z * com_part; // 133 com_part = (Gi[3] * grad_ij.x + Gi[4] * grad_ij.y + Gi[5] * grad_ij.z) * V_j; A_ijk[9] += rij.x * rij.x * com_part; // 211 A_ijk[10] += rij.x * rij.y * com_part; // 212 A_ijk[11] += rij.x * rij.z * com_part; // 213 A_ijk[12] += rij.y * rij.x * com_part; // 221 A_ijk[13] += rij.y * rij.y * com_part; // 222 A_ijk[14] += rij.y * rij.z * com_part; // 223 A_ijk[15] += rij.z * rij.x * com_part; // 231 A_ijk[16] += rij.z * rij.y * com_part; // 232 A_ijk[17] += rij.z * rij.z * com_part; // 233 com_part = (Gi[6] * grad_ij.x + Gi[7] * grad_ij.y + Gi[8] * grad_ij.z) * V_j; A_ijk[18] += rij.x * rij.x * com_part; // 311 A_ijk[19] += rij.x * rij.y * com_part; // 312 A_ijk[20] += rij.x * rij.z * com_part; // 313 A_ijk[21] += rij.y * rij.x * com_part; // 321 A_ijk[22] += rij.y * rij.y * com_part; // 322 A_ijk[23] += rij.y * rij.z * com_part; // 323 A_ijk[24] += rij.z * rij.x * com_part; // 331 A_ijk[25] += rij.z * rij.y * com_part; // 332 A_ijk[26] += rij.z * rij.z * com_part; // 333 } for (int i = 0; i < 27; i++) A_tensor[i_idx * 9 + i] = A_ijk[i]; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calc_L_tensor(Real* A_tensor, Real* L_tensor, Real* G_tensor, Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* sumWij_inv, uint* csrColInd, uint* numContacts, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w != -1) { return; } // Remember : we want to solve 6x6 system Bi*l=-[1 0 0 1 0 1]' // elements of matrix B depends on tensor A uint csrStartIdx = numContacts[i_idx]; uint csrEndIdx = numContacts[i_idx + 1]; // - paramsD.Pressure_Constraint; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real B[36] = {0.0}; // Real Gi[9] = {0.0}; // for (int i = 0; i < 9; i++) // Gi[i] = G_tensor[i_idx * 9 + i]; Real A_ijk[27] = {0.0}; for (int i = 0; i < 27; i++) A_ijk[i] = A_tensor[i_idx * 27 + i]; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; Real com_part = 0; // mn=11 Real XX = (eij.x * grad_ij.x); Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x); Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x); Real YY = (eij.y * grad_ij.y); Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y); Real ZZ = (eij.z * grad_ij.z); com_part = (A_ijk[0] * eij.x + A_ijk[9] * eij.y + A_ijk[18] * eij.z + rij.x * eij.x) * V_j; B[6 * 0 + 0] += com_part * XX; // 11 B[6 * 0 + 1] += com_part * XY; // 12 B[6 * 0 + 2] += com_part * XZ; // 13 B[6 * 0 + 3] += com_part * YY; // 14 B[6 * 0 + 4] += com_part * YZ; // 15 B[6 * 0 + 5] += com_part * ZZ; // 15 // mn=12 com_part = (A_ijk[1] * eij.x + A_ijk[10] * eij.y + A_ijk[19] * eij.z + rij.x * eij.y) * V_j; B[6 * 1 + 0] += com_part * XX; // 21 B[6 * 1 + 1] += com_part * XY; // 22 B[6 * 1 + 2] += com_part * XZ; // 23 B[6 * 1 + 3] += com_part * YY; // 24 B[6 * 1 + 4] += com_part * YZ; // 25 B[6 * 1 + 5] += com_part * ZZ; // 25 // mn=13 com_part = (A_ijk[2] * eij.x + A_ijk[11] * eij.y + A_ijk[20] * eij.z + rij.x * eij.z) * V_j; B[6 * 2 + 0] += com_part * XX; // 31 B[6 * 2 + 1] += com_part * XY; // 32 B[6 * 2 + 2] += com_part * XZ; // 33 B[6 * 2 + 3] += com_part * YY; // 34 B[6 * 2 + 4] += com_part * YZ; // 35 B[6 * 2 + 5] += com_part * ZZ; // 36 // Note that we skip mn=21 since it is similar to mn=12 // mn=22 com_part = (A_ijk[4] * eij.x + A_ijk[13] * eij.y + A_ijk[22] * eij.z + rij.y * eij.y) * V_j; B[6 * 3 + 0] += com_part * XX; // 41 B[6 * 3 + 1] += com_part * XY; // 42 B[6 * 3 + 2] += com_part * XZ; // 43 B[6 * 3 + 3] += com_part * YY; // 44 B[6 * 3 + 4] += com_part * YZ; // 45 B[6 * 3 + 5] += com_part * ZZ; // 46 // mn=23 com_part = (A_ijk[5] * eij.x + A_ijk[14] * eij.y + A_ijk[23] * eij.z + rij.y * eij.z) * V_j; B[6 * 4 + 0] += com_part * XX; // 51 B[6 * 4 + 1] += com_part * XY; // 52 B[6 * 4 + 2] += com_part * XZ; // 53 B[6 * 4 + 3] += com_part * YY; // 54 B[6 * 4 + 4] += com_part * YZ; // 55 B[6 * 4 + 5] += com_part * ZZ; // 56 // mn=33 com_part = (A_ijk[8] * eij.x + A_ijk[17] * eij.y + A_ijk[26] * eij.z + rij.z * eij.z) * V_j; B[6 * 5 + 0] += com_part * XX; // 61 B[6 * 5 + 1] += com_part * XY; // 62 B[6 * 5 + 2] += com_part * XZ; // 63 B[6 * 5 + 3] += com_part * YY; // 64 B[6 * 5 + 4] += com_part * YZ; // 65 B[6 * 5 + 5] += com_part * ZZ; // 66 } inv6xdelta_mn(B, &L_tensor[6 * i_idx]); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcRho_kernel(Real4* sortedPosRad, Real4* sortedRhoPreMu, Real* sumWij_inv, uint* cellStart, uint* cellEnd, uint* mynumContact, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w == -2) { mynumContact[i_idx] = 1; return; } Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real m_i = cube((h_i * paramsD.MULT_INITSPACE)) * paramsD.rho0; Real sum_mW = 0; Real sum_W = 0.0; uint mcon = 1; // get address in grid int3 gridPos = calcGridPos(posRadA); for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; if (i_idx != j) mcon++; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real W3 = W3h(d, 0.5 * (h_j + h_i)); sum_mW += m_j * W3; sum_W += W3; } } } mynumContact[i_idx] = mcon; // Adding neighbor contribution is done! sumWij_inv[i_idx] = m_i / sum_mW; sortedRhoPreMu[i_idx].x = sum_mW; if ((sortedRhoPreMu[i_idx].x > 2 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w == -1) printf("(calcRho_kernel)too large/small density marker %d, rho=%f, sum_W=%f, m_i=%f\n", i_idx, sortedRhoPreMu[i_idx].x, sum_W, m_i); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcNormalizedRho_kernel(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* sumWij_inv, Real* G_i, Real3* normals, Real* Color, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) { return; } // Real3 gravity = paramsD.gravity; Real RHO_0 = paramsD.rho0; // Real IncompressibilityFactor = paramsD.IncompressibilityFactor; // dxi_over_Vi[i_idx] = 1e10; if (sortedRhoPreMu[i_idx].w == -2) return; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; // Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real sum_mW = 0; Real sum_Wij_inv = 0; Real C = 0; // get address in grid int3 gridPos = calcGridPos(posRadA); // This is the elements of inverse of G Real mGi[9] = {0.0}; Real theta_i = sortedRhoPreMu[i_idx].w + 1; if (theta_i > 1) theta_i = 1; Real3 mynormals = mR3(0.0); // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real3 dv3 = Distance(sortedVelMas[i_idx], sortedVelMas[j]); Real d = length(dist3); Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * 1) * paramsD.rho0; C += m_j * Color[i_idx] / sortedRhoPreMu[i_idx].x * W3h(d, 0.5 * (h_j + h_i)); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; Real V_j = sumWij_inv[j]; Real h_ij = 0.5 * (h_j + h_i); Real W3 = W3h(d, h_ij); Real3 grad_i_wij = GradWh(dist3, h_ij); Real theta_j = sortedRhoPreMu[j].w + 1; if (theta_j > 1) theta_j = 1; if (sortedRhoPreMu[i_idx].w == -3 && sortedRhoPreMu[j].w == -3) mynormals += grad_i_wij * V_j; if (sortedRhoPreMu[i_idx].w != -3) mynormals += (theta_j - theta_i) * grad_i_wij * V_j; mGi[0] -= dist3.x * grad_i_wij.x * V_j; mGi[1] -= dist3.x * grad_i_wij.y * V_j; mGi[2] -= dist3.x * grad_i_wij.z * V_j; mGi[3] -= dist3.y * grad_i_wij.x * V_j; mGi[4] -= dist3.y * grad_i_wij.y * V_j; mGi[5] -= dist3.y * grad_i_wij.z * V_j; mGi[6] -= dist3.z * grad_i_wij.x * V_j; mGi[7] -= dist3.z * grad_i_wij.y * V_j; mGi[8] -= dist3.z * grad_i_wij.z * V_j; sum_mW += m_j * W3; sum_Wij_inv += sumWij_inv[j] * W3; } } } normals[i_idx] = mynormals; if (length(mynormals) > EPSILON) normals[i_idx] = mynormals / length(mynormals); Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); G_i[i_idx * 9 + 0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) / Det; G_i[i_idx * 9 + 1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) / Det; G_i[i_idx * 9 + 2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) / Det; G_i[i_idx * 9 + 3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) / Det; G_i[i_idx * 9 + 4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) / Det; G_i[i_idx * 9 + 5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) / Det; G_i[i_idx * 9 + 6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) / Det; G_i[i_idx * 9 + 7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) / Det; G_i[i_idx * 9 + 8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) / Det; // if (sortedRhoPreMu[i_idx].x > RHO_0) // IncompressibilityFactor = 1; // sortedRhoPreMu[i_idx].x = (sum_mW / sum_W_sumWij_inv - RHO_0) * IncompressibilityFactor + RHO_0; // sortedRhoPreMu[i_idx].x = (sum_mW - RHO_0) * IncompressibilityFactor + RHO_0; sortedRhoPreMu[i_idx].x = sum_mW / sum_Wij_inv; if ((sortedRhoPreMu[i_idx].x > 5 * RHO_0 || sortedRhoPreMu[i_idx].x < RHO_0 / 5) && sortedRhoPreMu[i_idx].w == -1) printf( "calcNormalizedRho_kernel-- sortedRhoPreMu[i_idx].w=%f, h=%f, sum_mW=%f, " "sum_W_sumWij_inv=%.4e, sortedRhoPreMu[i_idx].x=%.4e\n", sortedRhoPreMu[i_idx].w, sortedPosRad[i_idx].w, sum_mW, sum_Wij_inv, sortedRhoPreMu[i_idx].x); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void calcNormalizedRho_Gi_fillInMatrixIndices(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* sumWij_inv, Real* G_i, Real3* normals, uint* csrColInd, uint* numContacts, uint* cellStart, uint* cellEnd, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } Real RHO_0 = paramsD.rho0; uint csrStartIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; Real sum_mW = 0; Real sum_mW_rho = 0; Real sum_W_sumWij_inv = 0; // get address in grid int3 gridPos = calcGridPos(posRadA); csrColInd[csrStartIdx - 1] = i_idx; uint nextCol = csrStartIdx; if (sortedRhoPreMu[i_idx].w == -2) return; Real theta_i = sortedRhoPreMu[i_idx].w + 1; if (theta_i > 1) theta_i = 1; Real3 mynormals = mR3(0.0); // This is the elements of inverse of G Real mGi[9] = {0.0}; // examine neighbouring cells for (int z = -1; z <= 1; z++) for (int y = -1; y <= 1; y++) for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell50 uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real3 dv3 = Distance(sortedVelMas[i_idx], sortedVelMas[j]); Real d = length(rij); Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real W3 = W3h(d, h_ij); Real3 grad_i_wij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2) continue; if (i_idx != j) { csrColInd[nextCol] = j; nextCol++; } Real theta_j = sortedRhoPreMu[j].w + 1; if (theta_j > 1) theta_j = 1; if (sortedRhoPreMu[i_idx].w == -3 && sortedRhoPreMu[j].w == -3) mynormals += grad_i_wij * V_j; if (sortedRhoPreMu[i_idx].w != -3) mynormals += (theta_j - theta_i) * grad_i_wij * V_j; mGi[0] -= rij.x * grad_i_wij.x * V_j; mGi[1] -= rij.x * grad_i_wij.y * V_j; mGi[2] -= rij.x * grad_i_wij.z * V_j; mGi[3] -= rij.y * grad_i_wij.x * V_j; mGi[4] -= rij.y * grad_i_wij.y * V_j; mGi[5] -= rij.y * grad_i_wij.z * V_j; mGi[6] -= rij.z * grad_i_wij.x * V_j; mGi[7] -= rij.z * grad_i_wij.y * V_j; mGi[8] -= rij.z * grad_i_wij.z * V_j; sum_mW += m_j * W3; // sum_mW += sortedRhoPreMu[j].x * W3; // sum_mW += m_j * sumWij_inv[j]; // sum_mW += sortedRhoPreMu[j].x * W3 * V_j; sum_mW_rho += W3 * m_j / sortedRhoPreMu[j].x; // sum_W_sumWij_inv += W3 * sumWij_inv[j]; } } } normals[i_idx] = mynormals; if (length(mynormals) > EPSILON) normals[i_idx] = mynormals / length(mynormals); if (sortedRhoPreMu[i_idx].w == -3) normals[i_idx] *= -1; Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] - mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] + mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]); if (abs(Det) < EPSILON && sortedRhoPreMu[i_idx].w != -3) { for (int i = 0; i < 9; i++) { G_i[i_idx * 9 + i] = 0.0; G_i[i_idx * 9 + 0] = 1; G_i[i_idx * 9 + 4] = 1; G_i[i_idx * 9 + 8] = 1; } } else { G_i[i_idx * 9 + 0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) / Det; G_i[i_idx * 9 + 1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) / Det; G_i[i_idx * 9 + 2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) / Det; G_i[i_idx * 9 + 3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) / Det; G_i[i_idx * 9 + 4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) / Det; G_i[i_idx * 9 + 5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) / Det; G_i[i_idx * 9 + 6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) / Det; G_i[i_idx * 9 + 7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) / Det; G_i[i_idx * 9 + 8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) / Det; } // sortedRhoPreMu[i_idx].x = sum_mW / sum_mW_rho; // sortedRhoPreMu[i_idx].x = sum_mW / sum_W; // sortedRhoPreMu[i_idx].x = sum_mW; if ((sortedRhoPreMu[i_idx].x > 5 * RHO_0 || sortedRhoPreMu[i_idx].x < RHO_0 / 5) && sortedRhoPreMu[i_idx].w > -2) printf( "calcNormalizedRho_kernel-- sortedRhoPreMu[i_idx].w=%f, h=%f, sum_mW=%f, " "sum_W_sumWij_inv=%.4e, sortedRhoPreMu[i_idx].x=%.4e\n", sortedRhoPreMu[i_idx].w, sortedPosRad[i_idx].w, sum_mW, sum_W_sumWij_inv, sortedRhoPreMu[i_idx].x); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Function_Gradient_Laplacian_Operator(Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, Real4* sortedRhoPreMu, Real* sumWij_inv, Real* G_tensor, Real* L_tensor, Real* A_L, // Laplacian Operator matrix Real3* A_G, // Gradient Operator matrix Real* A_f, // Function Operator matrix // A_L, A_G are in system level; // A_G* p gives gradp, A_L*p gives Delta^2p uint* csrColInd, uint* numContacts, const size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) return; if (sortedRhoPreMu[i_idx].w <= -2) return; // Real RHO_0 = paramsD.rho0; uint csrStartIdx = numContacts[i_idx]; uint csrEndIdx = numContacts[i_idx + 1]; //- paramsD.Pressure_Constraint; Real3 posRadA = mR3(sortedPosRad[i_idx]); Real h_i = sortedPosRad[i_idx].w; // get address in grid int3 gridPos = calcGridPos(posRadA); // This is the elements of inverse of G Real mGi[9] = {0.0}; Real Li[6] = {0.0}; Real3 LaplacainVi = mR3(0.0); Real NormGi = 0; Real NormLi = 0; for (int i = 0; i < 9; i++) { mGi[i] = G_tensor[i_idx * 9 + i]; NormGi += abs(mGi[i]); } for (int i = 0; i < 6; i++) { Li[i] = L_tensor[i_idx * 6 + i]; NormLi += abs(Li[i]); } Real V_i = sumWij_inv[i_idx]; Real m_i = cube(h_i * paramsD.MULT_INITSPACE) * paramsD.rho0; Real rhoi = sortedRhoPreMu[i_idx].x; for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real W3 = 0.5 * (W3h(d, h_i) + W3h(d, h_j)); Real3 grad_i_wij = 0.5 * (GradWh(rij, h_i) + GradWh(rij, h_j)); Real V_j = sumWij_inv[j]; A_f[count] = V_j * W3; if (paramsD.Conservative_Form) { if (paramsD.gradient_type == 0) { Real Coeff = V_j; A_G[count] = Coeff * grad_i_wij; A_G[csrStartIdx] -= Coeff * grad_i_wij; } else if (paramsD.gradient_type == 1) { Real Coeff = V_j; A_G[count] = Coeff * grad_i_wij; A_G[csrStartIdx] += Coeff * grad_i_wij; } else if (paramsD.gradient_type == 2) { Real3 comm = m_j * rhoi * grad_i_wij; A_G[count] = 1.0 / (sortedRhoPreMu[j].x * sortedRhoPreMu[j].x) * comm; A_G[csrStartIdx] += 1.0 / (rhoi * rhoi) * comm; } else { Real3 comm = 1.0 / V_i * (V_j * V_j + V_i * V_i) / (rhoi + sortedRhoPreMu[j].x) * grad_i_wij; A_G[count] = rhoi * comm; A_G[csrStartIdx] += sortedRhoPreMu[j].x * comm; } } else { Real Coeff = V_j; A_G[count].x = Coeff * (grad_i_wij.x * mGi[0] + grad_i_wij.y * mGi[1] + grad_i_wij.z * mGi[2]); A_G[count].y = Coeff * (grad_i_wij.x * mGi[3] + grad_i_wij.y * mGi[4] + grad_i_wij.z * mGi[5]); A_G[count].z = Coeff * (grad_i_wij.x * mGi[6] + grad_i_wij.y * mGi[7] + grad_i_wij.z * mGi[8]); A_G[csrStartIdx].x -= Coeff * (grad_i_wij.x * mGi[0] + grad_i_wij.y * mGi[1] + grad_i_wij.z * mGi[2]); A_G[csrStartIdx].y -= Coeff * (grad_i_wij.x * mGi[3] + grad_i_wij.y * mGi[4] + grad_i_wij.z * mGi[5]); A_G[csrStartIdx].z -= Coeff * (grad_i_wij.x * mGi[6] + grad_i_wij.y * mGi[7] + grad_i_wij.z * mGi[8]); } } for (int count = csrStartIdx; count < csrEndIdx; count++) { int j = csrColInd[count]; Real3 posRadB = mR3(sortedPosRad[j]); Real3 rij = Distance(posRadA, posRadB); Real d = length(rij); Real3 eij = rij / d; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real W3 = W3h(d, h_ij); Real3 grad_ij = GradWh(rij, h_ij); Real V_j = sumWij_inv[j]; if (d < EPSILON) continue; if (paramsD.Conservative_Form) { if (paramsD.laplacian_type == 0) { Real commonterm = 1.0 / V_j * (V_j * V_j + V_i * V_i) * dot(rij, grad_ij); A_L[count] -= commonterm / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); // j A_L[csrStartIdx] += commonterm / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); // i for (int count_in = csrStartIdx; count_in < csrEndIdx; count_in++) { A_L[count_in] -= commonterm * dot(A_G[count_in], eij); // k } } else if (paramsD.laplacian_type == 1) { Real comm = 2.0 / rhoi * m_j * dot(rij, grad_ij) / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); A_L[count] = -comm; // j A_L[csrStartIdx] += comm; // i } else { Real comm = 2.0 / V_i * (V_j * V_j + V_i * V_i) * dot(rij, grad_ij) / (d * d + h_ij * h_ij * paramsD.epsMinMarkersDis); A_L[count] = -comm; // j A_L[csrStartIdx] += comm; // i } } else { Real commonterm = 1.0 / V_j * (V_j * V_j + V_i * V_i) * (Li[0] * eij.x * grad_ij.x + Li[1] * eij.x * grad_ij.y + Li[2] * eij.x * grad_ij.z + Li[1] * eij.y * grad_ij.x + Li[3] * eij.y * grad_ij.y + Li[4] * eij.y * grad_ij.z + Li[2] * eij.z * grad_ij.x + Li[4] * eij.z * grad_ij.y + Li[5] * eij.z * grad_ij.z); A_L[count] -= commonterm / (d + h_ij * paramsD.epsMinMarkersDis); // j A_L[csrStartIdx] += commonterm / (d + h_ij * paramsD.epsMinMarkersDis); // i for (int count_in = csrStartIdx; count_in < csrEndIdx; count_in++) { A_L[count_in] -= commonterm * dot(A_G[count_in], eij); // k } } if (!(isfinite(A_L[count]))) { printf("Error! A_L ChSPHGeneral.cu !\n"); } } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Jacobi_SOR_Iter(Real4* sortedRhoPreMu, Real* A_Matrix, Real3* V_old, Real3* V_new, Real3* b3vec, Real* q_old, // q=p^(n+1)-p^n Real* q_new, // q=p^(n+1)-p^n Real* b1vec, const uint* csrColInd, const uint* numContacts, size_t numAllMarkers, bool _3dvector, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } uint startIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii uint endIdx = numContacts[i_idx + 1]; //- uint(_3dvector && paramsD.Pressure_Constraint); if (_3dvector) { Real3 aij_vj = mR3(0.0); for (int myIdx = startIdx; myIdx < endIdx; myIdx++) { int j = csrColInd[myIdx]; aij_vj += A_Matrix[myIdx] * V_old[j]; } V_new[i_idx] = (b3vec[i_idx] - aij_vj) / A_Matrix[startIdx - 1]; } else { Real aij_pj = 0.0; for (int myIdx = startIdx; myIdx < endIdx; myIdx++) { aij_pj += A_Matrix[myIdx] * q_old[csrColInd[myIdx]]; } q_new[i_idx] = (b1vec[i_idx] - aij_pj) / A_Matrix[startIdx - 1]; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Update_AND_Calc_Res(Real4* sortedRhoPreMu, Real3* V_old, Real3* V_new, Real* q_old, Real* q_new, Real* Residuals, const size_t numAllMarkers, bool _3dvector, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } // Real omega = _3dvector ? 1.0 : paramsD.PPE_relaxation; Real omega = paramsD.PPE_relaxation; Real res = 0; if (_3dvector) { V_new[i_idx] = (1 - omega) * V_old[i_idx] + omega * V_new[i_idx]; res = length(V_old[i_idx] - V_new[i_idx]); V_old[i_idx] = V_new[i_idx]; } else { q_new[i_idx] = (1 - omega) * q_old[i_idx] + omega * q_new[i_idx]; res = abs(q_old[i_idx] - q_new[i_idx]); q_old[i_idx] = q_new[i_idx]; } Residuals[i_idx] = res; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Initialize_Variables(Real4* sortedRhoPreMu, Real* p_old, Real3* sortedVelMas, Real3* V_new, const size_t numAllMarkers, volatile bool* isErrorD) { const uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { return; } p_old[i_idx] = sortedRhoPreMu[i_idx].y; // This needs consistency p_old is old but v_new is new !! if (sortedRhoPreMu[i_idx].w > -1) { sortedVelMas[i_idx] = V_new[i_idx]; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void UpdateDensity(Real3* vis_vel, Real3* XSPH_Vel, Real3* sortedVelMas, // Write Real4* sortedPosRad, // Read Real4* sortedRhoPreMu, Real* sumWij_inv, uint* cellStart, uint* cellEnd, size_t numAllMarkers, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= numAllMarkers) { return; } if (sortedRhoPreMu[i_idx].w <= -2) { sortedRhoPreMu[i_idx].x = 0; sortedRhoPreMu[i_idx].y = 0; sortedRhoPreMu[i_idx].z = 0; return; } Real dT = paramsD.dT; Real rho_plus = 0; Real3 Vel_i = sortedVelMas[i_idx]; Real3 posi = mR3(sortedPosRad[i_idx]); if ((sortedRhoPreMu[i_idx].x > 2 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0) printf("(UpdateDensity-0)too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w); Real h_i = sortedPosRad[i_idx].w; int3 gridPos = calcGridPos(posi); Real3 normalizedV_n = mR3(0); Real normalizedV_d = 0.0; Real sumW = 0.0; Real3 xSPH_Sum = mR3(0.); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); uint gridHash = calcGridHash(neighbourPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posj = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posi, posj); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w >= 0)) continue; Real3 Vel_j = sortedVelMas[j]; Real h_j = sortedPosRad[j].w; Real m_j = cube(h_j * paramsD.MULT_INITSPACE) * paramsD.rho0; Real h_ij = 0.5 * (h_j + h_i); Real3 grad_i_wij = GradWh(dist3, h_ij); rho_plus += m_j * dot((Vel_i - Vel_j), grad_i_wij) * sumWij_inv[j]; Real Wd = W3h(d, h_ij); sumW += Wd; normalizedV_n += Vel_j * Wd * m_j / sortedRhoPreMu[j].x; normalizedV_d += Wd * m_j / sortedRhoPreMu[j].x; if (sortedRhoPreMu[j].w != -1) continue; Real rho_bar = 0.5 * (sortedRhoPreMu[i_idx].x + sortedRhoPreMu[j].x); xSPH_Sum += (Vel_j - Vel_i) * Wd * m_j / rho_bar; } } } } if (abs(sumW) > EPSILON) { vis_vel[i_idx] = normalizedV_n / normalizedV_d; } XSPH_Vel[i_idx] = xSPH_Sum; // sortedRhoPreMu[i_idx].x += rho_plus * dT; if ((sortedRhoPreMu[i_idx].x > 2 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0) printf("(UpdateDensity-1)too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w); } } // namespace fsi } // namespace chrono #endif
cbe61de07aa95d12496764d2a8ca67c6c62ac7ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This is a program to implement Odd-Even Sort using CUDA * It uses N/2 threads */ #include<cuda.h> #include<stdio.h> #include<time.h> #include<stdlib.h> #define N 1024 __global__ void oddeven(int *a, int flag) { int index = blockIdx.x * blockDim.x + threadIdx.x; int temp; if((index >= N/2 - 1) && flag % 2 != 0) return; if(flag % 2 == 0) //if even phase { if(a[index *2 ] > a[index * 2 + 1]) { temp = a[index * 2]; a[index * 2] = a[index *2 +1]; a[index * 2 +1] = temp; } } else { //if odd phase if(a[index * 2 +1 ] > a[index *2 + 2]) { temp = a[index * 2 + 1]; a[index * 2 + 1] = a[index*2+2]; a[index*2+2] = temp; } } } int main() { int *a; int *deva; int i; int size = sizeof(int) * N; srand((unsigned)time(NULL)); //allocate memory in host a = (int *)malloc(size); //allocate memory in CUDA (device) memory hipMalloc((void **)&deva, size); //puting some random values in memory for generating data for sorting for(i=0;i<N;i++) { a[i] = rand()%N; } printf("\nNumbers before sorting: "); for(i=0;i<N;i++) { printf("%d ", a[i]); } //recording starting time double start_time = clock(); //copy host memory data in CUDA (device) memory hipMemcpy(deva, a, size, hipMemcpyHostToDevice); // launch a kernel N-1 times for Odd-even sort for(i=0;i<N;i++) { hipLaunchKernelGGL(( oddeven), dim3(N/1024), dim3(512), 0, 0, deva, i); //512 threads per block and total N/2/512 blocks } //copy the result back into host memory hipMemcpy(a, deva, size, hipMemcpyDeviceToHost); //Lets see the execution time printf("\nExecution time : %lf seconds", (clock()-start_time)/CLOCKS_PER_SEC); //print the result printf("\nOutput: "); for(i=0;i<N;i++) { printf("%d ", a[i]); } return 0; }
cbe61de07aa95d12496764d2a8ca67c6c62ac7ff.cu
/* * This is a program to implement Odd-Even Sort using CUDA * It uses N/2 threads */ #include<cuda.h> #include<stdio.h> #include<time.h> #include<stdlib.h> #define N 1024 __global__ void oddeven(int *a, int flag) { int index = blockIdx.x * blockDim.x + threadIdx.x; int temp; if((index >= N/2 - 1) && flag % 2 != 0) return; if(flag % 2 == 0) //if even phase { if(a[index *2 ] > a[index * 2 + 1]) { temp = a[index * 2]; a[index * 2] = a[index *2 +1]; a[index * 2 +1] = temp; } } else { //if odd phase if(a[index * 2 +1 ] > a[index *2 + 2]) { temp = a[index * 2 + 1]; a[index * 2 + 1] = a[index*2+2]; a[index*2+2] = temp; } } } int main() { int *a; int *deva; int i; int size = sizeof(int) * N; srand((unsigned)time(NULL)); //allocate memory in host a = (int *)malloc(size); //allocate memory in CUDA (device) memory cudaMalloc((void **)&deva, size); //puting some random values in memory for generating data for sorting for(i=0;i<N;i++) { a[i] = rand()%N; } printf("\nNumbers before sorting: "); for(i=0;i<N;i++) { printf("%d ", a[i]); } //recording starting time double start_time = clock(); //copy host memory data in CUDA (device) memory cudaMemcpy(deva, a, size, cudaMemcpyHostToDevice); // launch a kernel N-1 times for Odd-even sort for(i=0;i<N;i++) { oddeven<<<N/1024, 512>>>(deva, i); //512 threads per block and total N/2/512 blocks } //copy the result back into host memory cudaMemcpy(a, deva, size, cudaMemcpyDeviceToHost); //Lets see the execution time printf("\nExecution time : %lf seconds", (clock()-start_time)/CLOCKS_PER_SEC); //print the result printf("\nOutput: "); for(i=0;i<N;i++) { printf("%d ", a[i]); } return 0; }
d1407c272ae392acbd7e061d289679916fb32032.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/async/copy.h> #include "cupoch/geometry/kdtree_flann.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/console.h" #include "cupoch/utility/helper.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/range.h" using namespace cupoch; using namespace cupoch::geometry; namespace { void SelectByIndexImpl(const geometry::PointCloud &src, geometry::PointCloud &dst, const utility::device_vector<size_t> &indices) { const bool has_normals = src.HasNormals(); const bool has_colors = src.HasColors(); if (has_normals) dst.normals_.resize(indices.size()); if (has_colors) dst.colors_.resize(indices.size()); dst.points_.resize(indices.size()); thrust::gather(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), indices.begin(), indices.end(), src.points_.begin(), dst.points_.begin()); if (has_normals) { thrust::gather(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), indices.begin(), indices.end(), src.normals_.begin(), dst.normals_.begin()); } if (has_colors) { thrust::gather(utility::exec_policy(utility::GetStream(2)) ->on(utility::GetStream(2)), indices.begin(), indices.end(), src.colors_.begin(), dst.colors_.begin()); } cudaSafeCall(hipDeviceSynchronize()); } struct compute_key_functor { compute_key_functor(const Eigen::Vector3f &voxel_min_bound, float voxel_size) : voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){}; const Eigen::Vector3f voxel_min_bound_; const float voxel_size_; __device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) { auto ref_coord = (pt - voxel_min_bound_) / voxel_size_; return Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>(); } }; template <int Index, class... Args> struct normalize_and_devide_tuple_functor : public thrust::binary_function<const thrust::tuple<Args...>, const int, thrust::tuple<Args...>> { __host__ __device__ thrust::tuple<Args...> operator()( const thrust::tuple<Args...> &x, const int &y) const { thrust::tuple<Args...> ans = x; devide_tuple_impl(ans, y, thrust::make_index_sequence<sizeof...(Args)>{}); thrust::get<Index>(ans).normalize(); return ans; } }; struct check_distance_threshold_functor { check_distance_threshold_functor(float distance_threshold) : distance_threshold_(distance_threshold){}; const float distance_threshold_; __device__ bool operator()(thrust::tuple<int, float> x) const { const float dist = thrust::get<1>(x); return (dist > 0 && dist < distance_threshold_); } }; } // namespace std::shared_ptr<PointCloud> PointCloud::SelectByIndex( const utility::device_vector<size_t> &indices, bool invert) const { auto output = std::make_shared<PointCloud>(); if (invert) { size_t n_out = points_.size() - indices.size(); utility::device_vector<size_t> sorted_indices = indices; thrust::sort(utility::exec_policy(0)->on(0), sorted_indices.begin(), sorted_indices.end()); utility::device_vector<size_t> inv_indices(n_out); thrust::set_difference(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(points_.size()), sorted_indices.begin(), sorted_indices.end(), inv_indices.begin()); SelectByIndexImpl(*this, *output, inv_indices); } else { SelectByIndexImpl(*this, *output, indices); } return output; } std::shared_ptr<PointCloud> PointCloud::VoxelDownSample( float voxel_size) const { auto output = std::make_shared<PointCloud>(); if (voxel_size <= 0.0) { utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n"); return output; } const Eigen::Vector3f voxel_size3 = Eigen::Vector3f(voxel_size, voxel_size, voxel_size); const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5; const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5; if (voxel_size * std::numeric_limits<int>::max() < (voxel_max_bound - voxel_min_bound).maxCoeff()) { utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n"); return output; } const int n = points_.size(); const bool has_normals = HasNormals(); const bool has_colors = HasColors(); compute_key_functor ck_func(voxel_min_bound, voxel_size); utility::device_vector<Eigen::Vector3i> keys(n); thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func); utility::device_vector<Eigen::Vector3f> sorted_points = points_; output->points_.resize(n); utility::device_vector<int> counts(n); thrust::equal_to<Eigen::Vector3i> binary_pred; if (!has_normals && !has_colors) { thrust::sort_by_key(utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points)); add_tuple_functor<Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); devide_tuple_functor<Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); output->points_.resize(n_out); } else if (has_normals && !has_colors) { utility::device_vector<Eigen::Vector3f> sorted_normals = normals_; output->normals_.resize(n); thrust::sort_by_key(utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points, sorted_normals)); add_tuple_functor<Eigen::Vector3f, Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, output->normals_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), sorted_normals.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_, output->normals_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); resize_all(n_out, output->points_, output->normals_); } else if (!has_normals && has_colors) { utility::device_vector<Eigen::Vector3f> sorted_colors = colors_; resize_all(n, output->colors_); thrust::sort_by_key(utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points, sorted_colors)); add_tuple_functor<Eigen::Vector3f, Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, output->colors_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), sorted_colors.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); devide_tuple_functor<Eigen::Vector3f, Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_, output->colors_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); resize_all(n_out, output->points_, output->colors_); } else { utility::device_vector<Eigen::Vector3f> sorted_normals = normals_; utility::device_vector<Eigen::Vector3f> sorted_colors = colors_; resize_all(n, output->normals_, output->colors_); thrust::sort_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points, sorted_normals, sorted_colors)); add_tuple_functor<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, output->normals_, output->colors_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), sorted_normals.begin(), sorted_colors.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_, output->normals_, output->colors_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); resize_all(n_out, output->points_, output->normals_, output->colors_); } utility::LogDebug( "Pointcloud down sampled from {:d} points to {:d} points.\n", (int)points_.size(), (int)output->points_.size()); return output; } std::shared_ptr<PointCloud> PointCloud::UniformDownSample( size_t every_k_points) const { const bool has_normals = HasNormals(); const bool has_colors = HasColors(); auto output = std::make_shared<PointCloud>(); if (every_k_points == 0) { utility::LogError("[UniformDownSample] Illegal sample rate."); return output; } const int n_out = points_.size() / every_k_points; output->points_.resize(n_out); if (has_normals) output->normals_.resize(n_out); if (has_colors) output->colors_.resize(n_out); thrust::system::cuda::unique_eager_event copy_e[3]; thrust::strided_range< utility::device_vector<Eigen::Vector3f>::const_iterator> range_points(points_.begin(), points_.end(), every_k_points); copy_e[0] = thrust::async::copy(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), range_points.begin(), range_points.end(), output->points_.begin()); if (has_normals) { thrust::strided_range< utility::device_vector<Eigen::Vector3f>::const_iterator> range_normals(normals_.begin(), normals_.end(), every_k_points); copy_e[1] = thrust::async::copy(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), range_normals.begin(), range_normals.end(), output->normals_.begin()); } if (has_colors) { thrust::strided_range< utility::device_vector<Eigen::Vector3f>::const_iterator> range_colors(colors_.begin(), colors_.end(), every_k_points); copy_e[2] = thrust::async::copy(utility::exec_policy(utility::GetStream(2)) ->on(utility::GetStream(2)), range_colors.begin(), range_colors.end(), output->colors_.begin()); } copy_e[0].wait(); if (has_normals) { copy_e[1].wait(); } if (has_colors) { copy_e[2].wait(); } return output; } std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>> PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const { if (nb_points < 1 || search_radius <= 0) { utility::LogError( "[RemoveRadiusOutliers] Illegal input parameters," "number of points and radius must be positive"); } KDTreeFlann kdtree; kdtree.SetGeometry(*this); utility::device_vector<int> tmp_indices; utility::device_vector<float> dist; kdtree.SearchRadius(points_, search_radius, nb_points + 1, tmp_indices, dist); const size_t n_pt = points_.size(); utility::device_vector<size_t> counts(n_pt); utility::device_vector<size_t> indices(n_pt); thrust::repeated_range<thrust::counting_iterator<size_t>> range( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_pt), nb_points + 1); thrust::reduce_by_key( utility::exec_policy(0)->on(0), range.begin(), range.end(), thrust::make_transform_iterator( tmp_indices.begin(), [] __device__(int idx) { return (int)(idx >= 0); }), thrust::make_discard_iterator(), counts.begin(), thrust::equal_to<size_t>(), thrust::plus<size_t>()); auto begin = make_tuple_iterator(indices.begin(), thrust::make_discard_iterator()); auto end = thrust::copy_if( enumerate_begin(counts), enumerate_end(counts), begin, [nb_points] __device__(const thrust::tuple<size_t, size_t> &x) { return thrust::get<1>(x) > nb_points; }); indices.resize(thrust::distance(begin, end)); return std::make_tuple(SelectByIndex(indices), indices); } std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>> PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors, float std_ratio) const { if (nb_neighbors < 1 || std_ratio <= 0) { utility::LogError( "[RemoveStatisticalOutliers] Illegal input parameters, number " "of neighbors and standard deviation ratio must be positive"); } if (points_.empty()) { return std::make_tuple(std::make_shared<PointCloud>(), utility::device_vector<size_t>()); } KDTreeFlann kdtree; kdtree.SetGeometry(*this); const size_t n_pt = points_.size(); utility::device_vector<float> avg_distances(n_pt); utility::device_vector<size_t> indices(n_pt); utility::device_vector<size_t> counts(n_pt); utility::device_vector<int> tmp_indices; utility::device_vector<float> dist; kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist); thrust::repeated_range<thrust::counting_iterator<size_t>> range( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_pt), nb_neighbors); thrust::reduce_by_key( utility::exec_policy(0)->on(0), range.begin(), range.end(), make_tuple_iterator(thrust::make_constant_iterator<size_t>(1), dist.begin()), thrust::make_discard_iterator(), make_tuple_iterator(counts.begin(), avg_distances.begin()), thrust::equal_to<size_t>(), [] __device__(const thrust::tuple<size_t, float> &rhs, const thrust::tuple<size_t, float> &lhs) { float rd = thrust::get<1>(rhs); size_t rc = thrust::get<0>(rhs); if (isinf(rd) || rd < 0.0) { rd = 0.0; rc = 0; } float ld = thrust::get<1>(lhs); size_t lc = thrust::get<0>(lhs); if (isinf(ld) || ld < 0.0) { ld = 0.0; lc = 0; } return thrust::make_tuple(rc + lc, rd + ld); }); thrust::transform(avg_distances.begin(), avg_distances.end(), counts.begin(), avg_distances.begin(), [] __device__(float avg, size_t cnt) { return (cnt > 0) ? avg / (float)cnt : -1.0; }); auto mean_and_count = thrust::transform_reduce( utility::exec_policy(0)->on(0), avg_distances.begin(), avg_distances.end(), [] __device__(float const &x) { return thrust::make_tuple(max(x, 0.0f), (size_t)(x >= 0.0)); }, thrust::make_tuple(0.0f, size_t(0)), add_tuple_functor<float, size_t>()); const size_t valid_distances = thrust::get<1>(mean_and_count); if (valid_distances == 0) { return std::make_tuple(std::make_shared<PointCloud>(), utility::device_vector<size_t>()); } float cloud_mean = thrust::get<0>(mean_and_count); cloud_mean /= valid_distances; const float sq_sum = thrust::transform_reduce( utility::exec_policy(0)->on(0), avg_distances.begin(), avg_distances.end(), [cloud_mean] __device__(const float x) { return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0; }, 0.0, thrust::plus<float>()); // Bessel's correction const float std_dev = std::sqrt(sq_sum / (valid_distances - 1)); const float distance_threshold = cloud_mean + std_ratio * std_dev; check_distance_threshold_functor th_func(distance_threshold); auto begin = make_tuple_iterator(indices.begin(), thrust::make_discard_iterator()); auto end = thrust::copy_if(enumerate_begin(avg_distances), enumerate_end(avg_distances), begin, th_func); indices.resize(thrust::distance(begin, end)); return std::make_tuple(SelectByIndex(indices), indices); }
d1407c272ae392acbd7e061d289679916fb32032.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/async/copy.h> #include "cupoch/geometry/kdtree_flann.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/console.h" #include "cupoch/utility/helper.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/range.h" using namespace cupoch; using namespace cupoch::geometry; namespace { void SelectByIndexImpl(const geometry::PointCloud &src, geometry::PointCloud &dst, const utility::device_vector<size_t> &indices) { const bool has_normals = src.HasNormals(); const bool has_colors = src.HasColors(); if (has_normals) dst.normals_.resize(indices.size()); if (has_colors) dst.colors_.resize(indices.size()); dst.points_.resize(indices.size()); thrust::gather(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), indices.begin(), indices.end(), src.points_.begin(), dst.points_.begin()); if (has_normals) { thrust::gather(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), indices.begin(), indices.end(), src.normals_.begin(), dst.normals_.begin()); } if (has_colors) { thrust::gather(utility::exec_policy(utility::GetStream(2)) ->on(utility::GetStream(2)), indices.begin(), indices.end(), src.colors_.begin(), dst.colors_.begin()); } cudaSafeCall(cudaDeviceSynchronize()); } struct compute_key_functor { compute_key_functor(const Eigen::Vector3f &voxel_min_bound, float voxel_size) : voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){}; const Eigen::Vector3f voxel_min_bound_; const float voxel_size_; __device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) { auto ref_coord = (pt - voxel_min_bound_) / voxel_size_; return Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>(); } }; template <int Index, class... Args> struct normalize_and_devide_tuple_functor : public thrust::binary_function<const thrust::tuple<Args...>, const int, thrust::tuple<Args...>> { __host__ __device__ thrust::tuple<Args...> operator()( const thrust::tuple<Args...> &x, const int &y) const { thrust::tuple<Args...> ans = x; devide_tuple_impl(ans, y, thrust::make_index_sequence<sizeof...(Args)>{}); thrust::get<Index>(ans).normalize(); return ans; } }; struct check_distance_threshold_functor { check_distance_threshold_functor(float distance_threshold) : distance_threshold_(distance_threshold){}; const float distance_threshold_; __device__ bool operator()(thrust::tuple<int, float> x) const { const float dist = thrust::get<1>(x); return (dist > 0 && dist < distance_threshold_); } }; } // namespace std::shared_ptr<PointCloud> PointCloud::SelectByIndex( const utility::device_vector<size_t> &indices, bool invert) const { auto output = std::make_shared<PointCloud>(); if (invert) { size_t n_out = points_.size() - indices.size(); utility::device_vector<size_t> sorted_indices = indices; thrust::sort(utility::exec_policy(0)->on(0), sorted_indices.begin(), sorted_indices.end()); utility::device_vector<size_t> inv_indices(n_out); thrust::set_difference(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(points_.size()), sorted_indices.begin(), sorted_indices.end(), inv_indices.begin()); SelectByIndexImpl(*this, *output, inv_indices); } else { SelectByIndexImpl(*this, *output, indices); } return output; } std::shared_ptr<PointCloud> PointCloud::VoxelDownSample( float voxel_size) const { auto output = std::make_shared<PointCloud>(); if (voxel_size <= 0.0) { utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n"); return output; } const Eigen::Vector3f voxel_size3 = Eigen::Vector3f(voxel_size, voxel_size, voxel_size); const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5; const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5; if (voxel_size * std::numeric_limits<int>::max() < (voxel_max_bound - voxel_min_bound).maxCoeff()) { utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n"); return output; } const int n = points_.size(); const bool has_normals = HasNormals(); const bool has_colors = HasColors(); compute_key_functor ck_func(voxel_min_bound, voxel_size); utility::device_vector<Eigen::Vector3i> keys(n); thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func); utility::device_vector<Eigen::Vector3f> sorted_points = points_; output->points_.resize(n); utility::device_vector<int> counts(n); thrust::equal_to<Eigen::Vector3i> binary_pred; if (!has_normals && !has_colors) { thrust::sort_by_key(utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points)); add_tuple_functor<Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); devide_tuple_functor<Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); output->points_.resize(n_out); } else if (has_normals && !has_colors) { utility::device_vector<Eigen::Vector3f> sorted_normals = normals_; output->normals_.resize(n); thrust::sort_by_key(utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points, sorted_normals)); add_tuple_functor<Eigen::Vector3f, Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, output->normals_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), sorted_normals.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_, output->normals_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); resize_all(n_out, output->points_, output->normals_); } else if (!has_normals && has_colors) { utility::device_vector<Eigen::Vector3f> sorted_colors = colors_; resize_all(n, output->colors_); thrust::sort_by_key(utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points, sorted_colors)); add_tuple_functor<Eigen::Vector3f, Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, output->colors_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), sorted_colors.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); devide_tuple_functor<Eigen::Vector3f, Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_, output->colors_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); resize_all(n_out, output->points_, output->colors_); } else { utility::device_vector<Eigen::Vector3f> sorted_normals = normals_; utility::device_vector<Eigen::Vector3f> sorted_colors = colors_; resize_all(n, output->normals_, output->colors_); thrust::sort_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_begin(sorted_points, sorted_normals, sorted_colors)); add_tuple_functor<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f, int> add_func; auto begin = make_tuple_begin(output->points_, output->normals_, output->colors_, counts); auto end = thrust::reduce_by_key( utility::exec_policy(0)->on(0), keys.begin(), keys.end(), make_tuple_iterator(sorted_points.begin(), sorted_normals.begin(), sorted_colors.begin(), thrust::make_constant_iterator(1)), thrust::make_discard_iterator(), begin, binary_pred, add_func); int n_out = thrust::distance(begin, end.second); normalize_and_devide_tuple_functor<1, Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> dv_func; auto output_begins = make_tuple_begin(output->points_, output->normals_, output->colors_); thrust::transform(output_begins, output_begins + n_out, counts.begin(), output_begins, dv_func); resize_all(n_out, output->points_, output->normals_, output->colors_); } utility::LogDebug( "Pointcloud down sampled from {:d} points to {:d} points.\n", (int)points_.size(), (int)output->points_.size()); return output; } std::shared_ptr<PointCloud> PointCloud::UniformDownSample( size_t every_k_points) const { const bool has_normals = HasNormals(); const bool has_colors = HasColors(); auto output = std::make_shared<PointCloud>(); if (every_k_points == 0) { utility::LogError("[UniformDownSample] Illegal sample rate."); return output; } const int n_out = points_.size() / every_k_points; output->points_.resize(n_out); if (has_normals) output->normals_.resize(n_out); if (has_colors) output->colors_.resize(n_out); thrust::system::cuda::unique_eager_event copy_e[3]; thrust::strided_range< utility::device_vector<Eigen::Vector3f>::const_iterator> range_points(points_.begin(), points_.end(), every_k_points); copy_e[0] = thrust::async::copy(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), range_points.begin(), range_points.end(), output->points_.begin()); if (has_normals) { thrust::strided_range< utility::device_vector<Eigen::Vector3f>::const_iterator> range_normals(normals_.begin(), normals_.end(), every_k_points); copy_e[1] = thrust::async::copy(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), range_normals.begin(), range_normals.end(), output->normals_.begin()); } if (has_colors) { thrust::strided_range< utility::device_vector<Eigen::Vector3f>::const_iterator> range_colors(colors_.begin(), colors_.end(), every_k_points); copy_e[2] = thrust::async::copy(utility::exec_policy(utility::GetStream(2)) ->on(utility::GetStream(2)), range_colors.begin(), range_colors.end(), output->colors_.begin()); } copy_e[0].wait(); if (has_normals) { copy_e[1].wait(); } if (has_colors) { copy_e[2].wait(); } return output; } std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>> PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const { if (nb_points < 1 || search_radius <= 0) { utility::LogError( "[RemoveRadiusOutliers] Illegal input parameters," "number of points and radius must be positive"); } KDTreeFlann kdtree; kdtree.SetGeometry(*this); utility::device_vector<int> tmp_indices; utility::device_vector<float> dist; kdtree.SearchRadius(points_, search_radius, nb_points + 1, tmp_indices, dist); const size_t n_pt = points_.size(); utility::device_vector<size_t> counts(n_pt); utility::device_vector<size_t> indices(n_pt); thrust::repeated_range<thrust::counting_iterator<size_t>> range( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_pt), nb_points + 1); thrust::reduce_by_key( utility::exec_policy(0)->on(0), range.begin(), range.end(), thrust::make_transform_iterator( tmp_indices.begin(), [] __device__(int idx) { return (int)(idx >= 0); }), thrust::make_discard_iterator(), counts.begin(), thrust::equal_to<size_t>(), thrust::plus<size_t>()); auto begin = make_tuple_iterator(indices.begin(), thrust::make_discard_iterator()); auto end = thrust::copy_if( enumerate_begin(counts), enumerate_end(counts), begin, [nb_points] __device__(const thrust::tuple<size_t, size_t> &x) { return thrust::get<1>(x) > nb_points; }); indices.resize(thrust::distance(begin, end)); return std::make_tuple(SelectByIndex(indices), indices); } std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>> PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors, float std_ratio) const { if (nb_neighbors < 1 || std_ratio <= 0) { utility::LogError( "[RemoveStatisticalOutliers] Illegal input parameters, number " "of neighbors and standard deviation ratio must be positive"); } if (points_.empty()) { return std::make_tuple(std::make_shared<PointCloud>(), utility::device_vector<size_t>()); } KDTreeFlann kdtree; kdtree.SetGeometry(*this); const size_t n_pt = points_.size(); utility::device_vector<float> avg_distances(n_pt); utility::device_vector<size_t> indices(n_pt); utility::device_vector<size_t> counts(n_pt); utility::device_vector<int> tmp_indices; utility::device_vector<float> dist; kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist); thrust::repeated_range<thrust::counting_iterator<size_t>> range( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_pt), nb_neighbors); thrust::reduce_by_key( utility::exec_policy(0)->on(0), range.begin(), range.end(), make_tuple_iterator(thrust::make_constant_iterator<size_t>(1), dist.begin()), thrust::make_discard_iterator(), make_tuple_iterator(counts.begin(), avg_distances.begin()), thrust::equal_to<size_t>(), [] __device__(const thrust::tuple<size_t, float> &rhs, const thrust::tuple<size_t, float> &lhs) { float rd = thrust::get<1>(rhs); size_t rc = thrust::get<0>(rhs); if (isinf(rd) || rd < 0.0) { rd = 0.0; rc = 0; } float ld = thrust::get<1>(lhs); size_t lc = thrust::get<0>(lhs); if (isinf(ld) || ld < 0.0) { ld = 0.0; lc = 0; } return thrust::make_tuple(rc + lc, rd + ld); }); thrust::transform(avg_distances.begin(), avg_distances.end(), counts.begin(), avg_distances.begin(), [] __device__(float avg, size_t cnt) { return (cnt > 0) ? avg / (float)cnt : -1.0; }); auto mean_and_count = thrust::transform_reduce( utility::exec_policy(0)->on(0), avg_distances.begin(), avg_distances.end(), [] __device__(float const &x) { return thrust::make_tuple(max(x, 0.0f), (size_t)(x >= 0.0)); }, thrust::make_tuple(0.0f, size_t(0)), add_tuple_functor<float, size_t>()); const size_t valid_distances = thrust::get<1>(mean_and_count); if (valid_distances == 0) { return std::make_tuple(std::make_shared<PointCloud>(), utility::device_vector<size_t>()); } float cloud_mean = thrust::get<0>(mean_and_count); cloud_mean /= valid_distances; const float sq_sum = thrust::transform_reduce( utility::exec_policy(0)->on(0), avg_distances.begin(), avg_distances.end(), [cloud_mean] __device__(const float x) { return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0; }, 0.0, thrust::plus<float>()); // Bessel's correction const float std_dev = std::sqrt(sq_sum / (valid_distances - 1)); const float distance_threshold = cloud_mean + std_ratio * std_dev; check_distance_threshold_functor th_func(distance_threshold); auto begin = make_tuple_iterator(indices.begin(), thrust::make_discard_iterator()); auto end = thrust::copy_if(enumerate_begin(avg_distances), enumerate_end(avg_distances), begin, th_func); indices.resize(thrust::distance(begin, end)); return std::make_tuple(SelectByIndex(indices), indices); }
6b5a1efc37f83cc1daf13e0c51be63c74febb88a.hip
// !!! This is a file automatically generated by hipify!!! //advanced cuda system //sped up algorithms /* Code guide: first matrices are initialized. they are used to keep track of the particles, the probabilities to jump, the substrate, and the general electric potential. Input parameters are also taken in. paramLoad tells you which parameters can be changed in the input file. The general electric potential is calculated in cuda. This reduces a n^4 problem to a n^2 one. A site is picked at random at the CPU (part of the monte-carlo process) and the probabilities with the particles around it are calculated at the gpu. The probabilities are then returned to the CPU where the second part of the Monte-Carlo algorithm occurs. Here, the site which the subject particle will interact with is chosen randomly but with weights according to the probabilities. The jump is made, and the system starts over. The relaxation is based on a previous code. Basically, it consists of 2 steps. First, every site is tested against its 4 neighbors to see if any quick optimization can be done. Then the density of states is found. This gives a quick global view of which sites can be swapped to minimize system energy. */ #include <stdio.h> #include <stdlib.h> /* for rand() */ #include <unistd.h> /* for getpid() */ #include <time.h> /* for time() */ #include <math.h> #include <assert.h> #include <iostream> #include <fstream> #include <ctime> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <hip/hip_runtime.h> #include <sstream> #include <string> #include "closedMott.h" #define PI 3.1415926535897932384626433832795 #define TWOPI 6.28318530717958647692528676655901 // construct REAL "type," depending on desired precision // set the maximum number of threads #ifdef DOUBLE #define REAL double #define MAXT 256 #else #define REAL double #define MAXT 256 // #define REAL float // #define MAXT 512 #endif using namespace std; int currentCount = 0; int countThese = 1; int tIndex = 0; typedef struct { REAL re; REAL im; } COMPLEX; //absolute value on the gpu __device__ int G_abs(int a) { if (a < 0) { a = -1*a; } return a; } //wrote own modulo algorithms since computer modulo (%) does negative modulo's incorrectly (-3%10 = -3 instead of 7) __device__ int G_mod(int a,int b) { while (a < 0) { a = a + b; } while (a >= b) { a = a - b; } return a; } // I need a version of my modulo for the gpu and for the CPU int C_mod(int a, int b) { while (a < 0) { a = a + b; } while (a >= b) { a = a - b; } return a; } //gpu matrix copying __global__ void matrixCopy(int intN, REAL * matrixIn,REAL *matrixOut){ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < intN*intN) { matrixOut[idx] = matrixIn[idx]; } } //Here, the gpu's find the general electric potential at each lattice site. __global__ void findPotential(REAL *particles,REAL *potentials, REAL *boxR,parameters p) { int i,j,checkx,checky; int intN = (int) p.N; int halfRange = p.N/2;//gets forced to (N-1)/2 since odd int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double sum,distanceTerm; int k,l; if(idx<intN*intN) { i = idx/intN; j = idx%intN; sum = 0; for(l = 0 ; l < p.N; l++) { for(k = 0; k < p.N; k++) { checkx = i - halfRange + k; checky = j - halfRange + l; if(g_checkIn(checkx,checky,p.N)) { if ((k != halfRange) || (l != halfRange)) { //dont do self-potential distanceTerm = boxR[i + intN*j + intN*intN*k + intN*intN*intN*l]; sum = sum + particles[(checkx) + intN*(checky)]/distanceTerm; } } } } potentials[i + intN*j] = sum*p.changeToV; } } //check for a CUDA error, use argument for identification bool errorAsk(const char *s="n/a") { hipError_t err=hipGetLastError(); if(err==hipSuccess) return false; printf("CUDA error [%s]: %s\n",s,hipGetErrorString(err)); return true; }; __device__ double findBlockade(int p,int thisp,double Ec) { int deltaP = G_abs(thisp - p); int rho; //for rho*Ec constant multiplier regarding how much stacking is happening if (deltaP==0) { //if they are the same, then something is trying to stack return Ec; } if (deltaP == 2) { //if they are off by "1" then one guy is moving freely return 0; } rho = deltaP/2; // if it's not the first two, then the system is relaxing return -rho*Ec; } //The first half of the heart of this program. Here the probabilities are calculated based on the energy change of the system and on the localization of the electron. __global__ void findProbabilities(REAL *KdArray,REAL *TField,REAL *probabilities,REAL *particles,REAL *potentials,REAL *substrate,REAL *boxR,REAL *watcher,int tStep,int x, int y, parameters p) { // REAL number = 11; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int i,j,thisi,thisj,thatp,thisp,hyperIndex,N; double potConstant,currentPart,distancePart,blockadePart,potentialPart,substratePart,energyPart,electronT; // double doublej, doublei,r; // potConstant = 1.17e-13; // potConstant = Ec; potConstant = -1; N = p.N; if(idx<N*N) { i = idx/N; j = idx%N; i = i-N/2; j = j-N/2; thisi = i + x; thisj = j + y; if (g_checkIn(thisi,thisj,N)) { hyperIndex = x + N*y + N*N*(idx/N) + N*N*N*(idx%N); // doublei = i; // doublej = j; // r = sqrt(doublei*doublei + doublej*doublej); // distancePart = -2.000*boxR[idx]; distancePart = -2*boxR[hyperIndex]/(p.xi); // distancePart = 0; thatp = particles[x + N*y]; thisp = particles[thisi + N*thisj]; if(particles[x + N*y] > particles[thisi + N*thisj]) { //situation 1 blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = -sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] - p.changeToV/boxR[hyperIndex]); substratePart = substrate[thisi+ N*thisj]; currentPart = p.eV*i; electronT = TField[x + N*y]; // currentPart = 0; // blockadePart = 0; // potentialPart= 0; // substratePart= 0; } if (particles[x + N*y] < particles[thisi + N*thisj]) { //situation 2 blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] + p.changeToV/boxR[hyperIndex]); substratePart = -substrate[thisi + N*thisj]; currentPart = -p.eV*i; electronT = TField[thisi + N*thisj]; // currentPart = 0; // substratePart = 0; // potentialPart = 0; // blockadePart = 0; } if ( particles[x + N*y] == particles[thisi + N*thisj] ){// stacking electronT = (TField[x + N*y] + TField[thisi + N*thisj])/2; if (particles[x + N*y] < 0) { //then p1 is getting more negative and p2 is getting more positive (like in situation 1) blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] + p.changeToV/boxR[hyperIndex]); substratePart = -substrate[thisi + N*thisj]; currentPart = -p.eV*i; } else { //then p1 is getting more positive and p2 is getting more negative (situation2-like transfer is happening) blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = -sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] - p.changeToV/boxR[hyperIndex]); substratePart = substrate[thisi+ N*thisj]; currentPart = p.eV*i; } // currentPart = 0; // substratePart = 0; // potentialPart = 0; // blockadePart = 0; } energyPart = p.alphaTwo*(blockadePart+potentialPart+substratePart+currentPart)/electronT; if (energyPart > 0) { energyPart = 0; } probabilities[idx] = exp(distancePart+energyPart); // watcher[idx] = distancePart+p.alphaTwo*(blockadePart+potentialPart+substratePart+currentPart)/electronT; watcher[idx] = KdArray[idx]; if ((thisi==x && thisj==y ) ){ // probabilities[idx] = 1; //force probability of jumping to self to 1 (avoids 0/0 problems) // probabilities[idx] = 0; //rejection free monte carlo algorithm probabilities[idx] = p.rejection; } } else { probabilities[idx] = 0; } } }; __device__ void simpleFill(REAL *jumpRecord, REAL fillVal, int t) { jumpRecord[t] = fillVal; } __device__ void fillRecord(REAL *jumpRecord,REAL fillVal,int N) { int found = 0; int n = 0; while ((found == 0) && (n < N)) { if(jumpRecord[n] == 999) { found = 1; jumpRecord[n] = fillVal; } n++; } } //calculates which direction the electron went and how far (not necessary if you are not measuring anything) __global__ void interaction(parameters p,int x,int y,int newx,int newy,REAL *particles,REAL *jumpRecord,REAL *boxR,int tStep) { int N = p.N,obsx,obsy; int whichWay = 0; REAL fillVal; int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < 1) {//only needs to be done once if (particles[x + y*N] == particles[newx + newy*N]) { if (particles[x + y*N] > 0) { whichWay = 1; } else { whichWay = -1; } } else if (particles[x + y*N] > particles[newx + newy*N] ) { whichWay = 1; } else if (particles[x + y*N] < particles[newx + newy*N]) { whichWay = -1; } obsx = newx + ( p.N/2 - x); obsy = newy + ( p.N/2 - y); if (g_checkIn(obsx,obsy,p.N)) { if(p.grabJ == 1) { fillVal = -whichWay*(obsx-p.N/2); } if(p.grabJ == 0) { fillVal = boxR[x + N*y + N*N*obsx + N*N*N*obsy]/p.L; } } else { fillVal = 1e9; //ridiculous value will highlight errors } // fillRecord(jumpRecord,fillVal,p.recordLength); simpleFill(jumpRecord,fillVal,tStep); } } //this section does the various outputs such as particle positions or general electric potential //this one outputs how far electrons jumped void showJump(int N,int x,int y,int newx,int newy,REAL* hereP) { double r,deltax,deltay; deltax = (x-newx); deltay = (y-newy); r = sqrt(deltax*deltax + deltay*deltay); cout<<r<<endl; } //this is for showing the electron positions void showMove(REAL* hereP,int N) { int i,j; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { cout<<hereP[i + N*j]<<" "; } cout<<endl; } } //sums the potentials (during relaxation this should generally decrease) double sumEnergy(REAL* hereField,int N) { int i,j; double sum; sum = 0; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { sum = sum + hereField[i + N*j]; } } return sum; } //to double check i had no particles leaking void countParticles(REAL* hereP, int N) { int i,j; double sum; sum = 0; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { sum = sum + hereP[i + N*j]; } } cout<<sum<<endl; } //finalizes the monte carlo algorithm by picking a site at random (weighted) __global__ void weightedWheel(parameters p, double randomNum,REAL *reducedProb, int *picked) { int N = p.N; int idx = blockIdx.x*blockDim.x + threadIdx.x; double pickedValue = randomNum*reducedProb[N*N -1]; if ((idx > 0) && (idx < N*N)) { if ((reducedProb[idx - 1] < pickedValue) && (reducedProb[idx] > pickedValue)) { picked[0] = idx; } } if (idx == 0) { if (pickedValue < reducedProb[0]) { picked[0] =idx; } } } void printLineCPU(REAL * c_line, char *fileName) { int k; FILE *fp1; fp1 = fopen(fileName, "w"); for (k = 0; k < tIndex; k++) { fprintf(fp1, "%lf ", c_line[k]); } fclose(fp1); } //print the CPU matrix to a file void printBoxCPU(REAL *c_array,int size, char * fileName) { int k,l; FILE *fp1; // char str1[256]; // sprintf(str1, "box.txt"); fp1 = fopen(fileName, "w"); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { fprintf(fp1, "%lf ",1e9*c_array[k + l*size]); } fprintf(fp1,"\n"); } //cleanup fclose(fp1); } void printMagnifyGPU(REAL *g_array,int size,char * fileName) { REAL *c_array; c_array = new REAL[size*size]; int k,l; hipMemcpy(c_array,g_array,size*size*sizeof(REAL),hipMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(fileName, "w"); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { fprintf(fp1, "%lf ",c_array[k + l*size]*1e100); } fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } /* void printBoxGPU(REAL *g_array,int size,char * fileName) { REAL *c_array; c_array = new REAL[size*size]; int k,l; hipMemcpy(c_array,g_array,size*size*sizeof(REAL),hipMemcpyDeviceToHost); ofstream myfile; myfile.open (fileName); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { myfile<<c_array[k + l*size]<<" "; } myfile<<endl; } myfile.close(); delete[] c_array; } */ //print the gpu matrix to a file void printBoxGPU(REAL *g_array,int size,char * fileName) { REAL *c_array; c_array = new REAL[size*size]; int k,l; hipMemcpy(c_array,g_array,size*size*sizeof(REAL),hipMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(fileName, "w"); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { /* if(c_array[k + l*size] == 0) { c_array[k + l*size] = 999; } */ fprintf(fp1, "%lf ",c_array[l + k*size]); //transposed l & k since thats how octave reads it } fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } void printIntGPU(int *g_array,int size,char * name) {//can probably overload using C++11 int *c_array; c_array = new int[size]; int k; hipMemcpy(c_array,g_array,size*sizeof(int),hipMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(name, "w"); for (k = 0; k < size ; k++){ fprintf(fp1, "%i ",c_array[k]); fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } //print gpu array to a file void printLineGPU(REAL *g_array,int size,char * name) { REAL *c_array; c_array = new REAL[size]; int k; hipMemcpy(c_array,g_array,size*sizeof(REAL),hipMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(name, "w"); for (k = 0; k < size ; k++){ fprintf(fp1, "%lf ",c_array[k]); fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } /* //print a single number to a file void printSingle(double nimeRun,char *fileName){ FILE *fp1; fp1 = fopen(fileName, "w"); fprintf(fp1, "%lf ",timeRun); fclose(fp1); } */ //loading previous results REAL *loadMatrix(REAL *hereMatrix,char* fileName) { // infile.open (fileName, ifstream::in); // REAL * buffer; // ifstream read(fileName); ifstream infile(fileName); string line; int counter = 0; double d; while (getline(infile, line)) { istringstream iss(line); if (iss >> d) { hereMatrix[counter] = d; counter++; } } return hereMatrix; } //tracking really the sum of the probabilities void trackTime(REAL *timeRun, REAL sum,int recordLength) { if (tIndex < recordLength) { //prevent bad bad memory writing timeRun[tIndex] = sum; tIndex++; } } //second part of the heart of this code. Here the probabilities are summed and a number is picked from 0 to that number. The code then sums through the probabilities untill it reaches that number. In this way, probabilities which are higher will have a larger chance of getting picked. void particleScout(vectors &v,int x,int y, double randomNum,int blocks, int threads,parameters p) { int lastx,lasty,newx,newy; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.probabilities); thrust::device_ptr<REAL> g_return = thrust::device_pointer_cast(v.reducedProb); // double sum; thrust::inclusive_scan(g_go, g_go + p.N*p.N, g_return); // in-place scan // sum = thrust::reduce(g_go, g_go + p.N*p.N); // trackTime(v.timeRun, sum,p.recordLength); hipLaunchKernelGGL(( weightedWheel), dim3(blocks),dim3(threads), 0, 0, p, randomNum,v.reducedProb, v.picked); hipMemcpy(v.herePicked,v.picked,sizeof(int),hipMemcpyDeviceToHost); /* hipMemcpy(v.hereProb,v.probabilities,p.N*p.N*sizeof(REAL),hipMemcpyDeviceToHost); cout<<"cell "<<v.herePicked[0]<<" was picked with a weight "<<v.hereProb[v.herePicked[0]]<<" out of a total "<<sum<<endl; // printMagnifyGPU(v.reducedProb,p.N,"reduced.dat"); */ // printBoxGPU(v.reducedProb,p.N,"reduced.dat"); //printMagnifyGPU(v.probabilities,p.N,"magnified.dat"); lastx = v.herePicked[0]/p.N; lasty = v.herePicked[0]%p.N; newx = x - p.N/2 + lastx; newy = y - p.N/2 + lasty; if (c_checkIn(newx,newy,p.N)) { //cout<<x<<" "<<y<<" "<<newx<<" "<<newy<<endl; hipLaunchKernelGGL(( interaction), dim3(blocks),dim3(threads), 0, 0, p,x,y,newx,newy,v.particles,v.jumpRecord,v.boxR,v.tStep); hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p, x, y,newx,newy,p.N,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleMove), dim3(blocks),dim3(threads), 0, 0, x, y, newx,newy,p.N,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, p.N, v.Ematrix,v.particles,v.potentials,v.substrate); } errorAsk("particleJump"); } __global__ void Eflip(int intN, double boltzmann, REAL *tempDos,REAL *Ematrix,REAL *TField) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { tempDos[idx] = exp(-Ematrix[idx]/(boltzmann*TField[idx])); } } void findFirst(parameters p,int blocks,int threads,vectors &v) { double randomNum; hipLaunchKernelGGL(( Eflip), dim3(blocks),dim3(threads), 0, 0, p.N, p.boltzmann, v.tempDos,v.Ematrix,v.TField); errorAsk("Eflip"); //check for error thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.tempDos);//tempDos memory being recycled thrust::device_ptr<REAL> g_return = thrust::device_pointer_cast(v.reducedProb);// also reduced prob memory thrust::inclusive_scan(g_go, g_go + p.N*p.N, g_return); // in-place scan randomNum = drand48();//place where the wheel lands hipLaunchKernelGGL(( weightedWheel), dim3(blocks),dim3(threads), 0, 0, p, randomNum,v.reducedProb, v.picked); hipMemcpy(v.herePicked,v.picked,sizeof(int),hipMemcpyDeviceToHost); } void findTime(parameters p,int blocks,int threads,vectors &v) { int x,y; double totalSum,result; totalSum = 0; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.probabilities); for (y = 0; y < p.N; y++) { for(x = 0;x < p.N; x++) { hipLaunchKernelGGL(( findProbabilities), dim3(blocks),dim3(threads), 0, 0, v.KdArray,v.TField,v.probabilities,v.particles,v.potentials,v.substrate,v.boxR,v.watcher,v.tStep,x,y,p); result = thrust::reduce(g_go, g_go + p.N*p.N); totalSum += result; /* cout<<result<<endl; if ((result < -0) && (result > -10000000)) { //if ((result ==0)) { printBoxGPU(v.probabilities,p.N,p.boxName); cout<<"gets to here"<<endl; } */ } } // if (result == 0) { // printBoxGPU(v.probabilities,p.N,p.boxName); // cout<<"gets to here"<<endl; // } // cout<<totalSum<<endl; trackTime(v.sumRun, totalSum,p.recordLength); } //the particles are picked here. This is also where the system is run from. (find potential, find probabilities, and move particle are done here) void findJump(vectors &v,int threads,int blocks,parameters p) { int x,y; double randomNum; // printBoxGPU(v.potentials,p.N,"pot0.dat"); findTime(p,blocks,threads,v); // printBoxGPU(v.potentials,p.N,"pot1.dat"); findFirst( p, blocks,threads,v);//find the first particle according to exp(-beta) // printBoxGPU(v.potentials,p.N,"pot2.dat"); x = v.herePicked[0]%p.N; y = v.herePicked[0]/p.N; hipLaunchKernelGGL(( findProbabilities), dim3(blocks),dim3(threads), 0, 0, v.KdArray,v.TField,v.probabilities,v.particles,v.potentials,v.substrate,v.boxR,v.watcher,v.tStep,x,y,p); errorAsk("find probabilities"); //check for error // printBoxGPU(v.potentials,p.N,"pot3.dat"); randomNum = drand48(); particleScout(v, x, y, randomNum, blocks, threads,p); // printBoxGPU(v.potentials,p.N,"pot4.dat"); } //calculate energy contribution from stacked particles __global__ void G_stackE(REAL *particles,REAL *stacked,int intN) { int i,j; double blockade = 1.97e-5; int idx = blockIdx.x*blockDim.x + threadIdx.x; i = idx/intN; j = idx%intN; if(idx < intN*intN) { if (particles[i + j*intN] > 1) { stacked[idx] = blockade; } } } //calculate energy contribution from the substrate __global__ void G_subE(REAL *substrate,REAL *particles,REAL *combined,int intN) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < intN*intN) { combined[idx] = substrate[idx]*particles[idx]; } } //filling a gpu array using CPU numbers __global__ void fillSum(int index,int intN,int addSub,REAL *sumArray,REAL numToInsert) { // int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; // if(idx < 1) { REAL dSign = (REAL) addSub; sumArray[index] = dSign*numToInsert; // } } //change particle to hole (or back) __global__ void particleSwitch(int i,int j,int intN,REAL *particles) { if (particles[i + j*intN] == -1) { particles[i + j*intN]= 1; } else { particles[i + j*intN]= -1; } } //fill dos (gpu) matrix with sums (CPU) __global__ void dosPut(int i,int j,int intN,REAL *Ematrix,REAL sum) { Ematrix[i + j*intN] = sum; } //find the density of states at each site void G_dos(REAL * sumArray,REAL *extraArray,REAL *boxR,REAL *particles,REAL *substrate,REAL *Ematrix,REAL *potentials,int slices,int threads,int blocks,parameters ,parameters p) { int i,j,intN;//not sure about Sums intN = p.N; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(potentials); thrust::device_ptr<REAL> sumArrayPtr = thrust::device_pointer_cast(sumArray); thrust::device_ptr<REAL> extraArrayPtr = thrust::device_pointer_cast(extraArray); REAL result; for (j = 0; j < intN; j++) { for (i = 0; i < intN; i++) { hipLaunchKernelGGL(( findPotential), dim3(blocks),dim3(threads), 0, 0, particles,potentials, boxR,p); result = thrust::reduce(g_go, g_go + intN*intN); hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 0,intN,-1,sumArray,result); // hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 0,intN,1,sumArray,result); hipLaunchKernelGGL(( G_subE), dim3(blocks),dim3(threads), 0, 0, substrate,particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 1,intN,-1,sumArray,result); // hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 1,intN,1,sumArray,result); hipLaunchKernelGGL(( G_stackE), dim3(blocks),dim3(threads), 0, 0, particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 2,intN,-1,sumArray,result); // hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 2,intN,1,sumArray,result); hipLaunchKernelGGL(( particleSwitch), dim3(blocks),dim3(threads), 0, 0, i,j,intN,particles); hipLaunchKernelGGL(( findPotential), dim3(blocks),dim3(threads), 0, 0, particles,potentials, boxR,p); result = thrust::reduce(g_go, g_go + intN*intN); hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 3,intN,1,sumArray,result); // hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 3,intN,-1,sumArray,result); hipLaunchKernelGGL(( G_subE), dim3(blocks),dim3(threads), 0, 0, substrate,particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 4,intN,1,sumArray,result); // hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 4,intN,-1,sumArray,result); hipLaunchKernelGGL(( G_stackE), dim3(blocks),dim3(threads), 0, 0, particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 5,intN,1,sumArray,result); // hipLaunchKernelGGL(( fillSum), dim3(blocks),dim3(threads), 0, 0, 5,intN,-1,sumArray,result); hipLaunchKernelGGL(( particleSwitch), dim3(blocks),dim3(threads), 0, 0, i,j,intN,particles); result = thrust::reduce(sumArrayPtr, sumArrayPtr + 6); // result = 0; hipLaunchKernelGGL(( dosPut), dim3(blocks),dim3(threads), 0, 0, i,j,intN,Ematrix,result); } } } //random substrate is created here REAL *createSub(REAL *hereS,double muVar,int N) { int i,j; for(j = 0; j < N; j++ ) { for(i = 0; i < N; i++) { hereS[i + N*j] = drand48()*muVar*2 - muVar; // if(i > nx/2) hereS[i + ny*j] = 50000000; } } return hereS; } // creates the variation in x & y matrices REAL * createDiff(REAL * hereDiff, double var, int N) { int i,j; for(j = 0; j < N; j++) { for(i = 0; i < N; i++) { hereDiff[i + N*j] = drand48()* var*2 - var; } } return hereDiff; } //fill a matrix with 0s REAL *C_zeros(double N, REAL *A) { int idx; for (idx = 0; idx < N; idx++) { A[idx] = 0; } return A; } //creates and fills matrices REAL *C_random(double N,double nparticles,REAL *A) { int idx,idy,count,index; int randx,randy; count = 0; for (idx = 0; idx < N; idx++) { for( idy = 0; idy < N; idy++) { index = int(idy + idx*N); A[index] = -1; } } while(count < nparticles) { randx = drand48()*N; randy = drand48()*N; randx = floor(randx); randy = floor(randy); index = int(randx*N + randy); if (A[index] < 2) { A[index] = A[index] + 1; count++; } } return A; } //creates and fills matrices when filled percent > 100% REAL *C_more(double N,double nparticles,REAL *A) { int idx,idy,count,index; int randx,randy; count = 0; for (idx = 0; idx < N; idx++) { for( idy = 0; idy < N; idy++) { index = int(idy + idx*N); A[index] = 1; } } while(count < (nparticles-N*N)) { randx = drand48()*N; randy = drand48()*N; randx = floor(randx); randy = floor(randy); index = int(randx*N + randy); if (A[index] < 2) { A[index] = A[index] + 1; count++; } } return A; } //creates the "distance hyper-matrix" 1/r REAL *createR(REAL *A,REAL *diffX, REAL *diffY,double N,double L,double xi) { double r,doublel,doublek,deltaX,deltaY; double diffXThere,diffYThere,diffXHere,diffYHere; int i,j,k,l,intN,idx,kObs,lObs,kNew,lNew; intN = N; for (idx = 0; idx < N*N*N*N; idx++) { i = idx%(intN); j = (idx%(intN*intN) - idx%(intN))/intN; k = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; l = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; /* k = idx%(intN); l = (idx%(intN*intN) - idx%(intN))/intN; i = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; j = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; */ doublek = (double) k; doublel = (double) l; kNew = i + k - N/2; lNew = j + l - N/2; kObs = C_mod(kNew,N); lObs = C_mod(lNew,N); diffXHere = diffX[i + intN*j]; diffXThere = diffX[kObs + intN*lObs]; if((kNew < 0) || (kNew > N)) { // diffXHere = -diffX[i + intN*j]; diffXThere = -diffX[kObs + intN*lObs]; } diffYHere = diffY[i + intN*j]; diffYThere = diffY[kObs + intN*lObs]; if((lNew < 0) || (lNew > N)) { // diffYHere = -diffY[i + intN*j]; diffYThere = -diffY[kObs + intN*lObs]; } deltaX = diffXHere - (diffXThere + L*(doublek - N/2)); deltaY = diffYHere - (diffYThere + L*(doublel - N/2)); r = sqrt(deltaX*deltaX + deltaY*deltaY); A[idx] = r; } /* for (i = 0; i < N; i++) { for(j = 0; j < N ; j++) { cout<<A[1 + intN*1 + intN*intN*i + intN*intN*intN*j]/L<<" "; } cout<<endl; } */ //cout<<A[1 + intN*2 + intN*intN*1 + intN*intN*intN*1]/L<<endl; return A; } //create hexagonal lattice position tensor REAL *createHex(REAL *A,REAL *diffX, REAL *diffY,double N,double L,double xi) { double r,doublel,doublek,deltaX,deltaY; double diffXThere,diffYThere,diffXHere,diffYHere; int i,j,k,l,intN,idx,kObs,lObs,kNew,lNew; intN = N; for (idx = 0; idx < N*N*N*N; idx++) { i = idx%(intN); j = (idx%(intN*intN) - idx%(intN))/intN; k = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; l = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; doublek = (double) k; doublel = (double) l; kNew = i + k - N/2; lNew = j + l - N/2; kObs = C_mod(kNew,N); lObs = C_mod(lNew,N); diffXHere = diffX[i + intN*j]; diffXThere = diffX[kObs + intN*lObs]; if((kNew < 0) || (kNew > N)) { diffXThere = -diffX[kObs + intN*lObs]; } diffYHere = diffY[i + intN*j]; diffYThere = diffY[kObs + intN*lObs]; if((lNew < 0) || (lNew > N)) { diffYThere = -diffY[kObs + intN*lObs]; } if ( (l%2)==1 ){ if (doublek < N/2) { deltaX = diffXHere - (diffXThere + L*(doublek - N/2) - L/2); } else { deltaX = diffXHere - (diffXThere + L*(doublek - N/2) + L/2); } } else { deltaX = diffXHere - (diffXThere + L*(doublek - N/2)); } deltaY = diffYHere - (diffYThere + .866*L*(doublel - N/2)); r = sqrt(deltaX*deltaX + deltaY*deltaY); A[idx] = r; } /* for (i = 0; i < N; i++) { for(j = 0; j < N ; j++) { cout<<A[1 + intN*1 + intN*intN*i + intN*intN*intN*j]<<" "; } cout<<endl; } */ return A; } //clumps all of the original electrons ( to show relaxation) REAL *C_clump(double N,double nparticles,REAL *A) { int idx; for (idx = 0;idx < N*N; idx++) { A[idx] = -1; } for (idx = 0; idx < nparticles; idx++) { A[idx] = 1; } return A; } //electrons evenly spaced out (to try to calculate average jump distances with a general electric potential) REAL *C_spread(double N,double nparticles,REAL *A) { int idx,i,j,intN; intN = (int) N; for (idx = 0;idx < N*N; idx++) { A[idx] = -1; } for (idx = 0; idx < N*N; idx++) { i = idx/intN; j = idx%intN; if((i + j)%2) { A[idx] = 1; } } return A; } //take whatever is at a and swap it with whatever is at b __global__ void particleSwap(int i,int j,int k,int l,int intN,REAL *particles) { int temp; // int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; // if (idx < 1) { temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; // } } //take whatever is at a and swap it with whatever is at b __device__ void g_particleSwap(int i,int j,int k,int l,int intN,REAL *particles){ int temp; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < 1) { temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; } } __global__ void particleMove(int i,int j,int k,int l,int N,REAL *particles) { int deltaP = G_abs(particles[k + N*l] - particles[i + N*j]); int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < 1) { if (deltaP == 0) { if (particles[i + j*N] > 0) { //particle is moving from p1 to p2 particles[i + j*N] = particles[i + j*N] - 2; particles[k + l*N] = particles[k + l*N] + 2; } else { // hole is moving from p1 to p2 (particle is moving from p2 to p1) particles[i + j*N] = particles[i + j*N] + 2; particles[k + l*N] = particles[k + l*N] - 2; } } if (deltaP == 2) { if (particles[i + j*N] > particles[k + l*N]) { particles[i + j*N] = particles[i + j*N] - 2; particles[k + l*N] = particles[k + l*N] + 2; } else { particles[i + j*N] = particles[i + j*N] + 2; particles[k + l*N] = particles[k + l*N] - 2; } } if (deltaP > 2) { if (particles[i + j*N] > particles[k + l*N]) { particles[i + j*N] = particles[i + j*N] - 2; particles[k + l*N] = particles[k + l*N] + 2; } else { particles[i + j*N] = particles[i + j*N] + 2; particles[k + l*N] = particles[k + l*N] - 2; } } } } //change coordinates from observer to particle __device__ int changeCoordinates(int intN, int x1, int x2) { int modulox,newCoord; if (x2 < intN/2) { modulox = x2; } else { modulox = x2 - intN; } newCoord = intN/2 + modulox ; return newCoord; } //perform a swap of two particles and recalculate all of the values __global__ void slowSwap(parameters p,int i1,int j1,int i2, int j2,int intN, REAL* tempPar,REAL *tempPot, REAL* tempDos, REAL* particles,REAL *boxR,REAL* substrate, REAL *Ematrix, REAL *watcher,REAL *potentials) { double distance1, distance2; int xPre,yPre,x,y; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { tempPar[idx] = particles[idx]; tempPot[idx] = potentials[idx]; tempDos[idx] = Ematrix[idx]; if (particles[i1 + intN*j1] != particles[i2 + intN*j2]) { if(particles[i1 + intN*j1] == 1) { tempPar[i1 + intN*j1] = -1; yPre = idx/intN; xPre = idx%intN; x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { tempPot[idx] = tempPot[idx] + p.changeToV/distance1; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] - substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } // probe = distance1; tempPar[i2 + intN*j2] = 1; distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y]; if (distance2 > 0) { tempPot[idx] = tempPot[idx] - p.changeToV/distance2; tempDos[idx] = tempPot[idx]*tempPar[idx]+ substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] + substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] - substrate[idx]*tempPar[idx]; } } } else { tempPar[i1 + intN*j1] = 1; // xPre = idx/intN; // yPre = idx%intN; yPre = idx/intN; xPre = idx%intN; x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; // distance1 = boxR[x + intN*y + intN*intN*i1 + intN*intN*intN*j1]; // watcher[idx] = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { tempPot[idx] = tempPot[idx] - p.changeToV/distance1; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] + substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; // watcher[idx] = tempPot[idx]*tempPar[idx] ; } tempPar[i2 + intN*j2] = -1; x = (int) G_mod(xPre + ( intN/2 - i2),intN); y = (int) G_mod(yPre + (intN/2 - j2),intN); distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y]; if (distance2 > 0) { tempPot[idx] = tempPot[idx] + p.changeToV/distance2; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] - substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; // watcher[idx] = tempPot[idx]*tempPar[idx] ; } } // watcher[idx] = substrate[idx]*tempPar[idx]; } // tempDos[idx] = probe; } else { tempDos[idx] = Ematrix[idx]; } } } //calculate substrate contribution to syatem energy __global__ void subAdd(int intN, REAL *particles,REAL *potentials,REAL *substrate){ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { potentials[idx] = potentials[idx] + substrate[idx]*particles[idx]; } } //calculate electrical potential contribution to system energy __global__ void potAdd(parameters p,int i1, int j1, int intN,REAL *particles, REAL *potentials, REAL *boxR){ int x,y; int xPre,yPre; REAL distance1; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { //for the -0.5,0.5 system if (particles[i1 + intN*j1] == 1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] -.5*p.changeToV/distance1; // .5 since Im coming from neutral } } } if (particles[i1 + intN*j1] == -1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] + .5*p.changeToV/distance1; } } } /* //for the -1,0,1 system , maybe it doesnt matter yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] - particles[i1 + intN*j1]*.5*p.changeToV/distance1; } } */ } } //calculate change in electric potentials when a particle is removed __global__ void potSub(parameters p,int i1, int j1, int intN,REAL *particles,REAL *boxR,REAL *potentials){ int x,y; int xPre,yPre; REAL distance1; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { if (particles[i1 + intN*j1] == -1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] + p.changeToV/distance1; } } } if (particles[i1 + intN*j1] == 1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } } } } //calculate change in potential energy __global__ void potChange(parameters p,int i1, int j1, int intN,REAL *particles,REAL *boxR,REAL *potentials,REAL* Ematrix){ int x,y; int xPre,yPre; REAL distance1; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { if (particles[i1 + intN*j1] == 1) { particles[i1 + intN*j1] = 0; yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] + p.changeToV/distance1; // potentials[idx] = 999; } } if(particles[idx] == 0) {//empty cells count as positive potential Ematrix[idx] = potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = -potentials[idx]; // } } else { Ematrix[idx] = -potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = potentials[idx]; // } } } else { particles[i1 + intN*j1] = 1; yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } if(particles[idx] == 0) { Ematrix[idx] = potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = -potentials[idx]; // } } else { Ematrix[idx] = -potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = potentials[idx]; // } } } } } //combine potential energies and substrate energies to find total energies __global__ void findE(int intN, REAL *Ematrix, REAL *particles, REAL *potentials, REAL *substrate) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { Ematrix[idx] = particles[idx]*potentials[idx] + particles[idx]*substrate[idx]; // Ematrix[idx] = particles[idx]*potentials[idx]; // Ematrix[idx] =particles[idx]*substrate[idx]; } } //change the density of states from absolute value contribution to reflect removing and adding of a particle __global__ void dosChange(int intN, REAL *particles,REAL *Ematrix,REAL *potentials) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { if(particles[idx] == -1) { Ematrix[idx] = potentials[idx]; } else { Ematrix[idx] = -potentials[idx]; } } } //place particles __global__ void particleDrop(int intN,int i ,int j,int newParticle,REAL *particles){ particles[i + intN*j] = newParticle; } //find the potentials after a swap of positions //generalized for deltaP>0 __global__ void potSwap(parameters p,int i1, int j1, int i2, int j2,int intN,REAL *particles,REAL *boxR,REAL *potentials){ int x,y; int xPre,yPre; REAL distance1,distance2; // REAL before,after; int deltaP = G_abs(particles[i2 + intN*j2] - particles[i1 + intN*j1]); int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { // before = Ematrix[idx]; if (deltaP == 0) { //if they are the same, then something has stacked p1 either started positive and became more positive or it started negative and became more negative yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; // first I do the change at the first position if (distance1 > 0) { // if (particles[i1 + intN*j1] < 0) { if (particles[i1 + intN*j1] > 0) { potentials[idx] = potentials[idx] + p.changeToV/distance1; } else { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } } x = xPre + (intN/2 - i2);//for closed boundary dont do G_mod y = yPre + (intN/2 - j2); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y]; if (distance2 > 0) { // if (particles[i1 + intN*j1] > 0) { //might be the other way if (particles[i1 + intN*j1] <= 0) { //might be the other way potentials[idx] = potentials[idx] + p.changeToV/distance2; } else { potentials[idx] = potentials[idx] - p.changeToV/distance2; } } } } if ((deltaP == 2) || (deltaP > 2)) { //if they are 1 change away from each other (a particle is traveling without blockade) // the last line could be written more simply, but this way shows that both scenarios act the same way //system is relaxing, kind of like the opposite of stacking, except this time it can go either way (but I only have to calculate the one way that it is going) yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { if (particles[i1 + intN*j1] > particles[i2 + intN*j2]) { potentials[idx] = potentials[idx] + p.changeToV/distance1; } else { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } } x = xPre + (intN/2 - i2);//for closed boundary dont do G_mod y = yPre + (intN/2 - j2); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y];//might be the other way if (distance2 > 0) { if (particles[i2 + intN*j2] > particles[i1 + intN*j1]) { potentials[idx] = potentials[idx] + p.changeToV/distance2; } else { potentials[idx] = potentials[idx] - p.changeToV/distance2; } } } } } } //force a particle to a certain place void C_particleForce(vectors &v,int intN, int i1, int j1,int i2,int j2,int threads, int blocks,parameters p) { hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p,i1, j1, i2, j2,intN,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleSwap), dim3(blocks),dim3(threads), 0, 0, i1, j1, i2,j2,intN,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); } int c_checkIn(int i, int j, int N) { int in = 0; if (i > 0 && i < N && j > 0 && j < N) { in = 1; } return in; } __device__ int g_checkIn(int i, int j, int N) { int in = 0; if (i > 0 && i < N && j > 0 && j < N) { in = 1; } return in; } //pick which site would result in a decrease of system energy (energies are negative, so this looks for the highest (closest to zero) value) void C_particlePick(vectors &v,int intN, int i, int j,int threads, int blocks,parameters p) { if ((v.results[0] < v.results[1] ) ||(v.results[0] < v.results[2] ) ||(v.results[0] < v.results[3] ) ||(v.results[0] < v.results[4] ) ) { int iPrev,jPrev,iPost,jPost; iPrev = C_mod(i - 1,intN); jPrev = C_mod(j - 1,intN); iPost = C_mod(i + 1,intN); jPost = C_mod(j + 1,intN); if ((v.results[1] > v.results[2] ) &&(v.results[1] > v.results[3] ) &&(v.results[1] > v.results[4] ) ) { hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p, i, j, iPrev,j,intN,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleSwap), dim3(blocks),dim3(threads), 0, 0, i, j, iPrev,j,intN,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); } else if ((v.results[2] > v.results[3] ) &&(v.results[2] > v.results[4] )) { hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p, i, j, i,jPrev,intN,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleSwap), dim3(blocks),dim3(threads), 0, 0, i, j, i,jPrev,intN,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); } else if (v.results[3] > v.results[4]) { hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p, i, j, iPost,j,intN,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleSwap), dim3(blocks),dim3(threads), 0, 0, i, j, iPost,j,intN,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); } else { hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p, i, j, i,jPost,intN,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleSwap), dim3(blocks),dim3(threads), 0, 0, i, j, i,jPost,intN,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); } } } //compare a site with its neighbors to find minimal system energy void fastTest(vectors &v,int i, int j, int intN,int threads, int blocks,parameters p) { int iPrev,jPrev,iPost,jPost; iPrev = C_mod(i - 1,intN); jPrev = C_mod(j - 1,intN); iPost = C_mod(i + 1,intN); jPost = C_mod(j + 1,intN); REAL result; thrust::device_ptr<REAL> p_tempDos = thrust::device_pointer_cast(v.tempDos); thrust::device_ptr<REAL> p_Ematrix = thrust::device_pointer_cast(v.Ematrix); result = thrust::reduce(p_Ematrix, p_Ematrix + intN*intN); v.results[0] = result; if (c_checkIn(iPrev,j,intN)) { hipLaunchKernelGGL(( slowSwap), dim3(blocks),dim3(threads), 0, 0, p, i,j, iPrev, j, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[1] = result; } else { v.results[1] = -1e50; //large negative number so that it's not picked } if (c_checkIn(i,jPrev,intN)) { hipLaunchKernelGGL(( slowSwap), dim3(blocks),dim3(threads), 0, 0, p, i,j, i, jPrev, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[2] = result; } else { v.results[2] = -1e50; //large negative number so that it's not picked } if (c_checkIn(iPost,j,intN)) { hipLaunchKernelGGL(( slowSwap), dim3(blocks),dim3(threads), 0, 0, p, i,j, iPost, j, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[3] = result; } else { v.results[3] = -1e50; //large negative number so that it's not picked } if (c_checkIn(i,jPost,intN)) { hipLaunchKernelGGL(( slowSwap), dim3(blocks),dim3(threads), 0, 0, p, i,j, i, jPost, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[4] = result; } else { v.results[4] = -1e50; //large negative number so that it's not picked } C_particlePick(v, intN, i, j, threads, blocks,p); } //when a particle is moved far away, there may be a cascade of changes as the particles around it accomodate. This checks for that in a spiral manner void spiral(parameters p,int index,int blocks, int threads,vectors &v) { int nLevels,xStart,yStart,ringLevel,ringLength,xNow,yNow,xCount,yCount; nLevels = 5; int intN = p.N; xStart = index%intN; yStart = index/intN; for (ringLevel = 1; ringLevel < nLevels; ringLevel++) { ringLength = ringLevel * 2 +1; xNow = xStart + ringLevel; yNow = yStart + ringLevel; for (xCount = 1; xCount < ringLength; xCount++) { xNow = xNow - 1; yNow = yNow; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } for (yCount = 1; yCount < ringLength; yCount++) { xNow = xNow; yNow = yNow - 1; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } for (xCount = 1; xCount < ringLength; xCount++) { xNow = xNow + 1; yNow = yNow; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } for (yCount = 1; yCount < ringLength; yCount++) { xNow = xNow; yNow = yNow + 1; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } } } //see if youre close enough to a change in particles to be updated __global__ void checkRange(int index,REAL *rangeMatrix,int intN) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < intN*intN) { int i,j,k,l; double di,dj,dk,dl,r,dx,dy; i = index/intN; j = index%intN; k = idx/intN; l = idx%intN; dk = (double) k; dl = (double) l; di = (double) i; dj = (double) j; dx = dk - di; dy = dl - dj; r = sqrt(dx*dx + dy*dy); rangeMatrix[idx] = 0; if (r < 10) { rangeMatrix[idx] = 1; } } } int updateMinMax(vectors &v, int c_stable, REAL min_value, REAL max_value) {//apparently the loop can be 4 switches long (hopefully it doesnt go further) if(v.min1 == min_value && v.max1 == max_value) { c_stable = 1; } if(v.min2 == min_value && v.max2 == max_value) { c_stable = 1; } if(v.min3 == min_value && v.max3 == max_value) { c_stable = 1; } if(v.min4 == min_value && v.max4 == max_value) { c_stable = 1; } v.min4 = v.min3; v.min3 = v.min2; v.min2 = v.min1; v.min1 = min_value; v.max4 = v.max3; v.max3 = v.max2; v.max2 = v.max1; v.max1 = max_value; return c_stable; } //see if the system has reached a local minimum int checkStable(vectors &v,int c_stable,REAL min_value,REAL max_value,int min_offset,int max_offset,int intN,int blocks,int threads,parameters p){ int i1,i2,j1,j2; c_stable = updateMinMax(v, c_stable, min_value,max_value); if (c_stable == 0) { i1 = min_offset%intN; j1 = min_offset/intN; i2 = max_offset%intN; j2 = max_offset/intN; hipLaunchKernelGGL(( potSwap), dim3(blocks),dim3(threads), 0, 0, p,i1, j1, i2, j2,intN,v.particles,v.boxR,v.potentials); hipLaunchKernelGGL(( particleSwap), dim3(blocks),dim3(threads), 0, 0, i1, j1, i2,j2,intN,v.particles); hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); } return c_stable; } //move a particle from high system energy to low system energy int highsToLows(vectors &v,int max_offset,int min_offset,REAL max_value,REAL min_value,int c_stable, int blocks,int threads,parameters p) { c_stable = checkStable(v, c_stable, min_value, max_value, min_offset,max_offset,p.N, blocks,threads,p); if (c_stable == 0) { spiral(p,max_offset, blocks, threads,v); spiral(p,min_offset, blocks, threads,v); } return c_stable; } //grab only the positive values of a matrix ( for using the thrust function) __global__ void grabPositives(REAL* particles,REAL *extraArray,REAL* Ematrix,int N,int posNeg) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { extraArray[idx] = 0; if(particles[idx] == posNeg) { extraArray[idx] = Ematrix[idx]; } } } //reflect the fact that holes are filled and full sites are emptied void __global__ lastFlip(int intN,REAL *invertedDos,REAL *particles) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < intN*intN) { if (particles[idx] == -1) { invertedDos[idx] = -invertedDos[idx]; } } } //calculate dos by removing the particle and seeing what effect this has on the system void dosInvert (parameters p,int intN,int threads,int blocks,vectors &v) {//should work for nParticles > 1 int i,j; double result1,result2; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.tempDos); for(j = 0; j < intN; j++) { for (i = 0; i < intN; i++) { //i = 20; //j = 20; hipLaunchKernelGGL(( matrixCopy), dim3(blocks),dim3(threads), 0, 0, intN, v.potentials ,v.tempPot); hipLaunchKernelGGL(( matrixCopy), dim3(blocks),dim3(threads), 0, 0, intN, v.particles , v.tempPar); hipLaunchKernelGGL(( matrixCopy), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix ,v.tempDos); hipLaunchKernelGGL(( potChange), dim3(blocks),dim3(threads), 0, 0, p,i, j, intN,v.tempPar,v.boxR,v.tempPot,v.tempDos); // dosChange<<<blocks,threads>>>(intN, tempPar,tempDos,tempPot); result1 = thrust::reduce(g_go, g_go + intN*intN); hipLaunchKernelGGL(( potChange), dim3(blocks),dim3(threads), 0, 0, p,i, j, intN,v.tempPar,v.boxR,v.tempPot,v.tempDos); // dosChange<<<blocks,threads>>>(intN, tempPar,tempDos,tempPot); result2 = thrust::reduce(g_go, g_go + intN*intN); hipLaunchKernelGGL(( dosPut), dim3(blocks),dim3(threads), 0, 0, i, j,intN,v.invertedDos, result2 - result1); } } //lastFlip<<<blocks,threads>>>(intN,invertedDos,tempPar); } //do the half of the Glatz algorithm which uses a density of states map to find which particle switching is optimal void switcharoo(vectors &v,int c_stable,int threads, int blocks,parameters p) { int intN = p.N; int min_offset,max_offset; REAL min_value,max_value; thrust::device_ptr<REAL> g_ptr = thrust::device_pointer_cast(v.Ematrix); thrust::device_ptr<REAL> inverted_ptr = thrust::device_pointer_cast(v.extraArray); while (c_stable == 0) { hipLaunchKernelGGL(( grabPositives), dim3(blocks),dim3(threads), 0, 0, v.particles,v.extraArray,v.Ematrix,intN,1); min_offset = thrust::min_element(inverted_ptr, inverted_ptr + intN*intN) - inverted_ptr; min_value = *(inverted_ptr + min_offset); hipLaunchKernelGGL(( grabPositives), dim3(blocks),dim3(threads), 0, 0, v.particles,v.extraArray,v.Ematrix,intN,-1); max_offset = thrust::min_element(inverted_ptr, inverted_ptr + intN*intN) - inverted_ptr; //grabbing the smallest positive number max_value = *(inverted_ptr + max_offset); // max_offset = thrust::max_element(inverted_ptr, inverted_ptr + intN*intN) - inverted_ptr; // max_value = *(inverted_ptr + max_offset); // potentialse = *(g_ptr + max_offset); //cout<<min_value<<" "<<max_value<<endl; c_stable = highsToLows(v, max_offset,min_offset, max_value,min_value,c_stable, blocks,threads,p); } } //2 step relaxation algorithm developed by Glatz void glatzRelax(int threads,int blocks,parameters p, vectors v) { int i,j,intN; intN = p.N; int N = p.N; int c_stable = 0; /* for(j = 0; j < intN; j++) { for(i = 0; i < intN; i++) { potAdd<<<blocks,threads>>>( i, j, intN, v.particles, v.potentials, v.boxR); } } hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, intN, v.Ematrix,v.particles,v.potentials,v.substrate); */ for (int t = 0; t < 1; t++) { //original pair exchange for(j = 0; j < N; j++) { for(i = 0; i < N; i++) { fastTest(v, i, j, intN, threads, blocks,p); } } } //highs to lows switcharoo(v,c_stable,threads, blocks,p); errorAsk("switching highs to lows"); dosInvert (p, intN,threads,blocks,v); hipFree(v.extraArray); hipFree(v.rangeMatrix); } //initialize jump matrix __global__ void jumpFill(REAL* jumpRecord,int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N) { jumpRecord[idx] = 999; } } //find minimum of 4 values __device__ REAL findMin(REAL d1, REAL d2, REAL d3, REAL d4) { if (d1 < d2 && d1 < d3 && d1 < d4) { return d1; } if (d2 < d1 && d2 < d3 && d2 < d4) { return d2; } if (d3 < d1 && d3 < d2 && d3 < d4) { return d3; } if (d4 < d1 && d4 < d2 && d4 < d3) { return d4; } return d1; //default } //create matrix which finds individual granule radii(a) by linking it to distance between granules __global__ void aMaker(REAL *aMatrix,REAL *boxR,int N) { int i,j,iPrev,jPrev,iPost,jPost; REAL distanceUp,distanceDown,distanceLeft,distanceRight, minDistance; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { i = idx%(N); j = (idx%(N*N) - idx%(N))/N; iPrev = G_mod(i - 1,N); jPrev = G_mod(j - 1,N); iPost = G_mod(i + 1,N); jPost = G_mod(j + 1,N); distanceUp = boxR[i + N*j + N*N*i + N*N*N*jPost]; distanceDown = boxR[i + N*j + N*N*i + N*N*N*jPrev]; distanceLeft = boxR[i + N*j + N*N*iPrev + N*N*N*j]; distanceRight = boxR[i + N*j + N*N*iPost + N*N*N*j]; minDistance = findMin(distanceUp,distanceDown,distanceLeft,distanceRight); aMatrix[idx] = minDistance/2; } } //substrate contribution from the granule size __global__ void subCombine(REAL *aMatrix,REAL *substrate,REAL L, int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { substrate[idx] = substrate[idx]*aMatrix[idx]/L; } } __global__ void tempGradient(REAL *TField,double lowTemp,double highTemp,int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int i,j; double doubleI; if (idx < N*N) { i = idx/N; j = idx%N; doubleI = (double) i; TField[i + N*j] = lowTemp + doubleI*(highTemp-lowTemp)/N; } } __global__ void noGradient(REAL *TField,double temperature, int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { TField[idx] = temperature; } } __global__ void createKd(REAL *KdArray,double highKd,int KdFrequency,int N){ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { if (idx%KdFrequency == 0){ KdArray[idx] = 4/highKd; //since kd is the denominator } else { KdArray[idx] = 1; } } } //load parameters void paramLoad(parameters &p, char *argv[]){ sprintf(p.lineName, "line.txt"); sprintf(p.boxName, "box.txt"); sprintf(p.timeName,"time.txt"); // N = 32; p.N = 30; //size of system (N x N) // N = 256; p.muVar = 0; // randomness of substrate (site energy?) -muvar to muvar // muVar = 1e-5; // p.boltzmann = 1.38e-23; p.boltzmann = .01;//test // p.changeToV = 1;//test p.changeToV = 3.6e-10; // Ke*Q/Kd // eV = .05; p.eV = 0; //voltage (arbitrary units for now) p.Ec = 1600; //penalty for double-stacking // Ec = 1.6e-5; // Ec = 1; // T = 1; p.alphaOne = 1; // technically combined with density of states (not used at the moment) // p.alphaTwo = 1; // test p.alphaTwo = 1.16e4; //C/Kb (for converting to unitless) p.T = 25; //temperature // nParticles = input; p.nParticles = .5*p.N*p.N; //number of particles // nParticles = 1; // L = 7e-6; p.L = 1e-8; //10 nm average inter-granule spacing // tSteps = 1000000; //for statistically accurate runs (timesteps) // tSteps = 10000; //for potential runs p.tSteps = 1; // for seeing the fields // tSteps = 0; // relax = 1; p.relax = 0; //wether or not to relax the system before running (should be 0 iff muVar & xyVar = 0) p.grabJ=0; //0 grabs average jumping distance , 1 grabs current p.whichBox=1; p.xi = p.L; //tunneling factor p.xVar = 0; //variance of lattice site in x direction p.yVar = 0; // typically = xVar p.rejection = 0; //default no rejection p.xMoment = 0; p.KdFrequency=2; p.highKd=4; int intVal; REAL realVal; ifstream is_file(argv[1]); string line; //this part loads some of the variables while( getline(is_file, line) ) { istringstream is_line(line); string key; if( getline(is_line, key, '=') ) { string value; if( getline(is_line, value) ) // store_line(key, value); if(key == "Temp") { realVal = atof(value.c_str()); p.T = realVal; } if(key == "muVar") { realVal = atof(value.c_str()); p.muVar = realVal; } if(key == "XYvar") { realVal = atof(value.c_str()); p.xVar = realVal*p.L; p.yVar = realVal*p.L; } if(key == "tSteps") { intVal = atoi(value.c_str()); p.tSteps = intVal; } if(key == "L") { realVal = atof(value.c_str()); p.L = realVal; } if(key == "eV") { realVal = atof(value.c_str()); p.eV = realVal; } if(key == "relax") { intVal = atoi(value.c_str()); p.relax = intVal; } if(key == "grabJ") { intVal = atoi(value.c_str()); p.grabJ = intVal; } if(key == "whichBox") { intVal = atoi(value.c_str()); p.whichBox = intVal; } if(key == "lineName") { sprintf(p.lineName, value.c_str()); // lineName = value.c_str(); } if(key == "boxName") { sprintf(p.boxName, value.c_str()); // boxName = value.c_str(); } if(key == "timeName") { sprintf(p.timeName, value.c_str()); } if(key == "rejection") { realVal = atof(value.c_str()); p.rejection = realVal; } if(key == "Ec") { realVal = atof(value.c_str()); p.Ec = realVal; } if(key == "highKd") { realVal = atof(value.c_str()); p.highKd = realVal; } if(key == "KdFrequency") { realVal = atof(value.c_str()); p.KdFrequency = realVal; } } } p.recordLength = p.tSteps; } //load arrays void vectorLoad(vectors &v,parameters p,int blocks, int threads){ int N = p.N; hipMalloc(&v.KdArray,N*N*sizeof(REAL)); hipMalloc(&v.watcher,N*N*sizeof(REAL)); hipMalloc(&v.reducedProb,N*N*sizeof(REAL)); hipMalloc(&v.particles,N*N*sizeof(REAL)); hipMalloc(&v.probabilities,N*N*sizeof(REAL)); hipMalloc(&v.potentials,N*N*sizeof(REAL)); hipMalloc(&v.substrate,N*N*sizeof(REAL)); hipMalloc(&v.Ematrix,N*N*sizeof(REAL)); hipMalloc(&v.tempDos,N*N*sizeof(REAL)); hipMalloc(&v.tempPar,N*N*sizeof(REAL)); hipMalloc(&v.tempPot,N*N*sizeof(REAL)); hipMalloc(&v.invertedDos,N*N*sizeof(REAL)); hipMalloc(&v.jumpRecord,p.recordLength*sizeof(REAL)); hipMalloc(&v.aMatrix,N*N*sizeof(REAL)); hipMalloc(&v.boxR,N*N*N*N*sizeof(REAL)); hipMalloc(&v.picked,sizeof(int)); v.herePicked = new int[1]; v.herePicked[0] = 0; v.timeRun = new REAL[p.recordLength]; v.sumRun = new REAL[p.recordLength]; v.herePot = new REAL[N*N]; v.herePot = C_zeros(N, v.herePot); v.hereProb = new REAL[N*N]; v.hereProb = C_random(N,0,v.hereProb); v.hereP = new REAL[N*N]; // v.hereP = C_clump(p.N,p.nParticles,v.hereP);//test relaxation v.hereP = C_spread(N,p.nParticles,v.hereP); //test general potential // v.hereP = C_zeros(p.N,v.hereP); //zeros for the true neutral map (-2,0,2 ...etc) // hereP = C_random(N,nParticles,hereP); // hereP = C_random(N,0,hereP); //empty system // hereP = C_more(N,nParticles,hereP); v.hereXDiff = new REAL[N*N]; v.hereYDiff = new REAL[N*N]; v.hereXDiff = createDiff(v.hereXDiff, p.xVar, N); v.hereYDiff = createDiff(v.hereYDiff, p.yVar, N); v.hereS = new REAL[N*N]; v.hereS = createSub(v.hereS,p.muVar,N); v.hereBoxR = new REAL[N*N*N*N]; v.hereBoxR = createR(v.hereBoxR,v.hereXDiff,v.hereYDiff,N,p.L,p.xi); // hereBoxR = createHex(hereBoxR,hereXDiff,hereYDiff,N,L,xi); hipMemcpy(v.watcher,v.herePot,N*N*sizeof(REAL),hipMemcpyHostToDevice); hipMemcpy(v.potentials,v.herePot,N*N*sizeof(REAL),hipMemcpyHostToDevice); hipMemcpy(v.Ematrix,v.herePot,N*N*sizeof(REAL),hipMemcpyHostToDevice);//just filling it with 0s hipMemcpy(v.substrate,v.hereS,N*N*sizeof(REAL),hipMemcpyHostToDevice); hipMemcpy(v.boxR,v.hereBoxR,N*N*N*N*sizeof(REAL),hipMemcpyHostToDevice); hipMemcpy(v.particles,v.hereP,N*N*sizeof(REAL),hipMemcpyHostToDevice); // aMaker<<<blocks,threads>>>(v.aMatrix,v.boxR,N); // subCombine<<<blocks,threads>>>(v.aMatrix,v.substrate, p.L, N); hipLaunchKernelGGL(( jumpFill), dim3(blocks),dim3(threads), 0, 0, v.jumpRecord,p.recordLength); hipLaunchKernelGGL(( createKd), dim3(blocks),dim3(threads), 0, 0, v.KdArray,p.highKd,p.KdFrequency,p.N); v.min1 = 999; v.min2 = 999; v.min3 = 999; v.min4 = 999; v.max1 = 999; v.max2 = 999; v.max3 = 999; v.max4 = 999; int sizeSum = 6; v.hereSum = new REAL[sizeSum]; v.hereSum = C_zeros(sizeSum,v.hereSum); hipMalloc(&v.TField,N*N*sizeof(REAL)); hipMalloc(&v.extraArray,N*N*sizeof(REAL)); hipMalloc(&v.rangeMatrix,N*N*sizeof(REAL)); hipMalloc(&v.sumArray,sizeSum*sizeof(REAL)); hipMemcpy(v.sumArray,v.hereSum,sizeSum*sizeof(REAL),hipMemcpyHostToDevice); int i,j; for(j = 0; j < p.N; j++) { for(i = 0; i < p.N; i++) { hipLaunchKernelGGL(( potAdd), dim3(blocks),dim3(threads), 0, 0, p, i, j, p.N, v.particles, v.potentials, v.boxR); } } hipLaunchKernelGGL(( findE), dim3(blocks),dim3(threads), 0, 0, p.N, v.Ematrix,v.particles,v.potentials,v.substrate); // noGradient<<<blocks,threads>>>(v.TField,15,p.N); hipLaunchKernelGGL(( tempGradient), dim3(blocks),dim3(threads), 0, 0, v.TField,15,500,p.N); //maximum gradient // tempGradient<<<blocks,threads>>>(v.TField,30,31,p.N); //almost no gradient } double findDipole(REAL *g_array,int size) { double moment; REAL *c_array; c_array = new REAL[size*size]; int k,l; hipMemcpy(c_array,g_array,size*size*sizeof(REAL),hipMemcpyDeviceToHost); for (k = 0; k < size; k++) { for (l = 0; l < size; l++) { if (c_array[k + size*l] == 1) { moment += k*c_array[k + size*l]; } } } moment = 2*moment/(size*size); //half filled cells return moment; } int main(int argc,char *argv[]) { hipDeviceReset(); hipSetDevice(0); hipDeviceSynchronize(); hipDeviceSynchronize(); parameters p; vectors v; // srand48(time(0)); clock_t begin = clock(); paramLoad(p,argv); int threads,blocks; int N = p.N; N = p.N; threads=MAXT; blocks=N*N/threads+(N*N%threads==0?0:1); vectorLoad(v,p,blocks, threads); /* char nameP[256]; sprintf(nameP, "line.txt"); hereP = loadMatrix(hereP,nameP); */ //system relax if (p.relax == 1) { glatzRelax(threads, blocks,p,v ); } //run simulation for(int t = 0; t < p.tSteps ; t++) { countThese = 1; v.tStep = t; findJump(v, threads, blocks,p); } //save data // sprintf(str1, "line.txt"); // printBoxCPU(hereXDiff,N,boxName); hipLaunchKernelGGL(( lastFlip), dim3(blocks),dim3(threads), 0, 0, p.N,v.Ematrix,v.particles); switch(p.whichBox) { case 1: printBoxGPU(v.particles,p.N,p.boxName); break; case 2: printBoxGPU(v.probabilities,p.N,p.boxName); break; case 3: printBoxGPU(v.potentials,p.N,p.boxName); break; case 4: printBoxGPU(v.Ematrix,p.N,p.boxName); break; case 5: //no output box break; } printLineCPU(v.sumRun, p.timeName); printLineGPU(v.jumpRecord,p.recordLength,p.lineName); printBoxGPU(v.watcher,p.N,"watcher.dat"); p.xMoment = findDipole(v.particles,p.N); cout<<p.xMoment<<endl; /* hipMemcpy(hereP,jumpRecord,N*N*sizeof(REAL),hipMemcpyDeviceToHost); FILE *fp1; // char str1[256]; // sprintf(str1, "particles.txt"); fp1 = fopen(fileName, "w"); for (int k = 0; k < N*N ; k++){ fprintf(fp1, "%lf ",hereP[k]); } //cleanup fclose(fp1); */ delete[] v.herePicked; delete[] v.herePot; delete[] v.hereProb; delete[] v.hereP; delete[] v.hereS; delete[] v.hereBoxR; hipFree(v.particles); hipFree(v.probabilities); hipFree(v.potentials); hipFree(v.substrate); hipFree(v.boxR); hipFree(v.Ematrix); hipFree(v.jumpRecord); hipFree(v.picked); clock_t end = clock(); // double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //cout<<"this took "<<elapsed_secs<<" seconds"<<endl; }
6b5a1efc37f83cc1daf13e0c51be63c74febb88a.cu
//advanced cuda system //sped up algorithms /* Code guide: first matrices are initialized. they are used to keep track of the particles, the probabilities to jump, the substrate, and the general electric potential. Input parameters are also taken in. paramLoad tells you which parameters can be changed in the input file. The general electric potential is calculated in cuda. This reduces a n^4 problem to a n^2 one. A site is picked at random at the CPU (part of the monte-carlo process) and the probabilities with the particles around it are calculated at the gpu. The probabilities are then returned to the CPU where the second part of the Monte-Carlo algorithm occurs. Here, the site which the subject particle will interact with is chosen randomly but with weights according to the probabilities. The jump is made, and the system starts over. The relaxation is based on a previous code. Basically, it consists of 2 steps. First, every site is tested against its 4 neighbors to see if any quick optimization can be done. Then the density of states is found. This gives a quick global view of which sites can be swapped to minimize system energy. */ #include <stdio.h> #include <stdlib.h> /* for rand() */ #include <unistd.h> /* for getpid() */ #include <time.h> /* for time() */ #include <math.h> #include <assert.h> #include <iostream> #include <fstream> #include <ctime> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <cuda.h> #include <sstream> #include <string> #include "closedMott.h" #define PI 3.1415926535897932384626433832795 #define TWOPI 6.28318530717958647692528676655901 // construct REAL "type," depending on desired precision // set the maximum number of threads #ifdef DOUBLE #define REAL double #define MAXT 256 #else #define REAL double #define MAXT 256 // #define REAL float // #define MAXT 512 #endif using namespace std; int currentCount = 0; int countThese = 1; int tIndex = 0; typedef struct { REAL re; REAL im; } COMPLEX; //absolute value on the gpu __device__ int G_abs(int a) { if (a < 0) { a = -1*a; } return a; } //wrote own modulo algorithms since computer modulo (%) does negative modulo's incorrectly (-3%10 = -3 instead of 7) __device__ int G_mod(int a,int b) { while (a < 0) { a = a + b; } while (a >= b) { a = a - b; } return a; } // I need a version of my modulo for the gpu and for the CPU int C_mod(int a, int b) { while (a < 0) { a = a + b; } while (a >= b) { a = a - b; } return a; } //gpu matrix copying __global__ void matrixCopy(int intN, REAL * matrixIn,REAL *matrixOut){ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < intN*intN) { matrixOut[idx] = matrixIn[idx]; } } //Here, the gpu's find the general electric potential at each lattice site. __global__ void findPotential(REAL *particles,REAL *potentials, REAL *boxR,parameters p) { int i,j,checkx,checky; int intN = (int) p.N; int halfRange = p.N/2;//gets forced to (N-1)/2 since odd int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double sum,distanceTerm; int k,l; if(idx<intN*intN) { i = idx/intN; j = idx%intN; sum = 0; for(l = 0 ; l < p.N; l++) { for(k = 0; k < p.N; k++) { checkx = i - halfRange + k; checky = j - halfRange + l; if(g_checkIn(checkx,checky,p.N)) { if ((k != halfRange) || (l != halfRange)) { //dont do self-potential distanceTerm = boxR[i + intN*j + intN*intN*k + intN*intN*intN*l]; sum = sum + particles[(checkx) + intN*(checky)]/distanceTerm; } } } } potentials[i + intN*j] = sum*p.changeToV; } } //check for a CUDA error, use argument for identification bool errorAsk(const char *s="n/a") { cudaError_t err=cudaGetLastError(); if(err==cudaSuccess) return false; printf("CUDA error [%s]: %s\n",s,cudaGetErrorString(err)); return true; }; __device__ double findBlockade(int p,int thisp,double Ec) { int deltaP = G_abs(thisp - p); int rho; //for rho*Ec constant multiplier regarding how much stacking is happening if (deltaP==0) { //if they are the same, then something is trying to stack return Ec; } if (deltaP == 2) { //if they are off by "1" then one guy is moving freely return 0; } rho = deltaP/2; // if it's not the first two, then the system is relaxing return -rho*Ec; } //The first half of the heart of this program. Here the probabilities are calculated based on the energy change of the system and on the localization of the electron. __global__ void findProbabilities(REAL *KdArray,REAL *TField,REAL *probabilities,REAL *particles,REAL *potentials,REAL *substrate,REAL *boxR,REAL *watcher,int tStep,int x, int y, parameters p) { // REAL number = 11; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int i,j,thisi,thisj,thatp,thisp,hyperIndex,N; double potConstant,currentPart,distancePart,blockadePart,potentialPart,substratePart,energyPart,electronT; // double doublej, doublei,r; // potConstant = 1.17e-13; // potConstant = Ec; potConstant = -1; N = p.N; if(idx<N*N) { i = idx/N; j = idx%N; i = i-N/2; j = j-N/2; thisi = i + x; thisj = j + y; if (g_checkIn(thisi,thisj,N)) { hyperIndex = x + N*y + N*N*(idx/N) + N*N*N*(idx%N); // doublei = i; // doublej = j; // r = sqrt(doublei*doublei + doublej*doublej); // distancePart = -2.000*boxR[idx]; distancePart = -2*boxR[hyperIndex]/(p.xi); // distancePart = 0; thatp = particles[x + N*y]; thisp = particles[thisi + N*thisj]; if(particles[x + N*y] > particles[thisi + N*thisj]) { //situation 1 blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = -sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] - p.changeToV/boxR[hyperIndex]); substratePart = substrate[thisi+ N*thisj]; currentPart = p.eV*i; electronT = TField[x + N*y]; // currentPart = 0; // blockadePart = 0; // potentialPart= 0; // substratePart= 0; } if (particles[x + N*y] < particles[thisi + N*thisj]) { //situation 2 blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] + p.changeToV/boxR[hyperIndex]); substratePart = -substrate[thisi + N*thisj]; currentPart = -p.eV*i; electronT = TField[thisi + N*thisj]; // currentPart = 0; // substratePart = 0; // potentialPart = 0; // blockadePart = 0; } if ( particles[x + N*y] == particles[thisi + N*thisj] ){// stacking electronT = (TField[x + N*y] + TField[thisi + N*thisj])/2; if (particles[x + N*y] < 0) { //then p1 is getting more negative and p2 is getting more positive (like in situation 1) blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] + p.changeToV/boxR[hyperIndex]); substratePart = -substrate[thisi + N*thisj]; currentPart = -p.eV*i; } else { //then p1 is getting more positive and p2 is getting more negative (situation2-like transfer is happening) blockadePart = -1*findBlockade(thatp,thisp,p.Ec)/boxR[hyperIndex]; potentialPart = -sqrt(KdArray[thisi + N*thisj]*KdArray[x + N*y])*potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y] - p.changeToV/boxR[hyperIndex]); substratePart = substrate[thisi+ N*thisj]; currentPart = p.eV*i; } // currentPart = 0; // substratePart = 0; // potentialPart = 0; // blockadePart = 0; } energyPart = p.alphaTwo*(blockadePart+potentialPart+substratePart+currentPart)/electronT; if (energyPart > 0) { energyPart = 0; } probabilities[idx] = exp(distancePart+energyPart); // watcher[idx] = distancePart+p.alphaTwo*(blockadePart+potentialPart+substratePart+currentPart)/electronT; watcher[idx] = KdArray[idx]; if ((thisi==x && thisj==y ) ){ // probabilities[idx] = 1; //force probability of jumping to self to 1 (avoids 0/0 problems) // probabilities[idx] = 0; //rejection free monte carlo algorithm probabilities[idx] = p.rejection; } } else { probabilities[idx] = 0; } } }; __device__ void simpleFill(REAL *jumpRecord, REAL fillVal, int t) { jumpRecord[t] = fillVal; } __device__ void fillRecord(REAL *jumpRecord,REAL fillVal,int N) { int found = 0; int n = 0; while ((found == 0) && (n < N)) { if(jumpRecord[n] == 999) { found = 1; jumpRecord[n] = fillVal; } n++; } } //calculates which direction the electron went and how far (not necessary if you are not measuring anything) __global__ void interaction(parameters p,int x,int y,int newx,int newy,REAL *particles,REAL *jumpRecord,REAL *boxR,int tStep) { int N = p.N,obsx,obsy; int whichWay = 0; REAL fillVal; int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < 1) {//only needs to be done once if (particles[x + y*N] == particles[newx + newy*N]) { if (particles[x + y*N] > 0) { whichWay = 1; } else { whichWay = -1; } } else if (particles[x + y*N] > particles[newx + newy*N] ) { whichWay = 1; } else if (particles[x + y*N] < particles[newx + newy*N]) { whichWay = -1; } obsx = newx + ( p.N/2 - x); obsy = newy + ( p.N/2 - y); if (g_checkIn(obsx,obsy,p.N)) { if(p.grabJ == 1) { fillVal = -whichWay*(obsx-p.N/2); } if(p.grabJ == 0) { fillVal = boxR[x + N*y + N*N*obsx + N*N*N*obsy]/p.L; } } else { fillVal = 1e9; //ridiculous value will highlight errors } // fillRecord(jumpRecord,fillVal,p.recordLength); simpleFill(jumpRecord,fillVal,tStep); } } //this section does the various outputs such as particle positions or general electric potential //this one outputs how far electrons jumped void showJump(int N,int x,int y,int newx,int newy,REAL* hereP) { double r,deltax,deltay; deltax = (x-newx); deltay = (y-newy); r = sqrt(deltax*deltax + deltay*deltay); cout<<r<<endl; } //this is for showing the electron positions void showMove(REAL* hereP,int N) { int i,j; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { cout<<hereP[i + N*j]<<" "; } cout<<endl; } } //sums the potentials (during relaxation this should generally decrease) double sumEnergy(REAL* hereField,int N) { int i,j; double sum; sum = 0; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { sum = sum + hereField[i + N*j]; } } return sum; } //to double check i had no particles leaking void countParticles(REAL* hereP, int N) { int i,j; double sum; sum = 0; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { sum = sum + hereP[i + N*j]; } } cout<<sum<<endl; } //finalizes the monte carlo algorithm by picking a site at random (weighted) __global__ void weightedWheel(parameters p, double randomNum,REAL *reducedProb, int *picked) { int N = p.N; int idx = blockIdx.x*blockDim.x + threadIdx.x; double pickedValue = randomNum*reducedProb[N*N -1]; if ((idx > 0) && (idx < N*N)) { if ((reducedProb[idx - 1] < pickedValue) && (reducedProb[idx] > pickedValue)) { picked[0] = idx; } } if (idx == 0) { if (pickedValue < reducedProb[0]) { picked[0] =idx; } } } void printLineCPU(REAL * c_line, char *fileName) { int k; FILE *fp1; fp1 = fopen(fileName, "w"); for (k = 0; k < tIndex; k++) { fprintf(fp1, "%lf ", c_line[k]); } fclose(fp1); } //print the CPU matrix to a file void printBoxCPU(REAL *c_array,int size, char * fileName) { int k,l; FILE *fp1; // char str1[256]; // sprintf(str1, "box.txt"); fp1 = fopen(fileName, "w"); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { fprintf(fp1, "%lf ",1e9*c_array[k + l*size]); } fprintf(fp1,"\n"); } //cleanup fclose(fp1); } void printMagnifyGPU(REAL *g_array,int size,char * fileName) { REAL *c_array; c_array = new REAL[size*size]; int k,l; cudaMemcpy(c_array,g_array,size*size*sizeof(REAL),cudaMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(fileName, "w"); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { fprintf(fp1, "%lf ",c_array[k + l*size]*1e100); } fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } /* void printBoxGPU(REAL *g_array,int size,char * fileName) { REAL *c_array; c_array = new REAL[size*size]; int k,l; cudaMemcpy(c_array,g_array,size*size*sizeof(REAL),cudaMemcpyDeviceToHost); ofstream myfile; myfile.open (fileName); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { myfile<<c_array[k + l*size]<<" "; } myfile<<endl; } myfile.close(); delete[] c_array; } */ //print the gpu matrix to a file void printBoxGPU(REAL *g_array,int size,char * fileName) { REAL *c_array; c_array = new REAL[size*size]; int k,l; cudaMemcpy(c_array,g_array,size*size*sizeof(REAL),cudaMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(fileName, "w"); for (k = 0; k < size ; k++){ for(l = 0; l < size; l++) { /* if(c_array[k + l*size] == 0) { c_array[k + l*size] = 999; } */ fprintf(fp1, "%lf ",c_array[l + k*size]); //transposed l & k since thats how octave reads it } fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } void printIntGPU(int *g_array,int size,char * name) {//can probably overload using C++11 int *c_array; c_array = new int[size]; int k; cudaMemcpy(c_array,g_array,size*sizeof(int),cudaMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(name, "w"); for (k = 0; k < size ; k++){ fprintf(fp1, "%i ",c_array[k]); fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } //print gpu array to a file void printLineGPU(REAL *g_array,int size,char * name) { REAL *c_array; c_array = new REAL[size]; int k; cudaMemcpy(c_array,g_array,size*sizeof(REAL),cudaMemcpyDeviceToHost); FILE *fp1; fp1 = fopen(name, "w"); for (k = 0; k < size ; k++){ fprintf(fp1, "%lf ",c_array[k]); fprintf(fp1,"\n"); } //cleanup fclose(fp1); delete[] c_array; } /* //print a single number to a file void printSingle(double nimeRun,char *fileName){ FILE *fp1; fp1 = fopen(fileName, "w"); fprintf(fp1, "%lf ",timeRun); fclose(fp1); } */ //loading previous results REAL *loadMatrix(REAL *hereMatrix,char* fileName) { // infile.open (fileName, ifstream::in); // REAL * buffer; // ifstream read(fileName); ifstream infile(fileName); string line; int counter = 0; double d; while (getline(infile, line)) { istringstream iss(line); if (iss >> d) { hereMatrix[counter] = d; counter++; } } return hereMatrix; } //tracking really the sum of the probabilities void trackTime(REAL *timeRun, REAL sum,int recordLength) { if (tIndex < recordLength) { //prevent bad bad memory writing timeRun[tIndex] = sum; tIndex++; } } //second part of the heart of this code. Here the probabilities are summed and a number is picked from 0 to that number. The code then sums through the probabilities untill it reaches that number. In this way, probabilities which are higher will have a larger chance of getting picked. void particleScout(vectors &v,int x,int y, double randomNum,int blocks, int threads,parameters p) { int lastx,lasty,newx,newy; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.probabilities); thrust::device_ptr<REAL> g_return = thrust::device_pointer_cast(v.reducedProb); // double sum; thrust::inclusive_scan(g_go, g_go + p.N*p.N, g_return); // in-place scan // sum = thrust::reduce(g_go, g_go + p.N*p.N); // trackTime(v.timeRun, sum,p.recordLength); weightedWheel<<<blocks,threads>>>(p, randomNum,v.reducedProb, v.picked); cudaMemcpy(v.herePicked,v.picked,sizeof(int),cudaMemcpyDeviceToHost); /* cudaMemcpy(v.hereProb,v.probabilities,p.N*p.N*sizeof(REAL),cudaMemcpyDeviceToHost); cout<<"cell "<<v.herePicked[0]<<" was picked with a weight "<<v.hereProb[v.herePicked[0]]<<" out of a total "<<sum<<endl; // printMagnifyGPU(v.reducedProb,p.N,"reduced.dat"); */ // printBoxGPU(v.reducedProb,p.N,"reduced.dat"); //printMagnifyGPU(v.probabilities,p.N,"magnified.dat"); lastx = v.herePicked[0]/p.N; lasty = v.herePicked[0]%p.N; newx = x - p.N/2 + lastx; newy = y - p.N/2 + lasty; if (c_checkIn(newx,newy,p.N)) { //cout<<x<<" "<<y<<" "<<newx<<" "<<newy<<endl; interaction<<<blocks,threads>>>(p,x,y,newx,newy,v.particles,v.jumpRecord,v.boxR,v.tStep); potSwap<<<blocks,threads>>>(p, x, y,newx,newy,p.N,v.particles,v.boxR,v.potentials); particleMove<<<blocks,threads>>>(x, y, newx,newy,p.N,v.particles); findE<<<blocks,threads>>>(p.N, v.Ematrix,v.particles,v.potentials,v.substrate); } errorAsk("particleJump"); } __global__ void Eflip(int intN, double boltzmann, REAL *tempDos,REAL *Ematrix,REAL *TField) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { tempDos[idx] = exp(-Ematrix[idx]/(boltzmann*TField[idx])); } } void findFirst(parameters p,int blocks,int threads,vectors &v) { double randomNum; Eflip<<<blocks,threads>>>(p.N, p.boltzmann, v.tempDos,v.Ematrix,v.TField); errorAsk("Eflip"); //check for error thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.tempDos);//tempDos memory being recycled thrust::device_ptr<REAL> g_return = thrust::device_pointer_cast(v.reducedProb);// also reduced prob memory thrust::inclusive_scan(g_go, g_go + p.N*p.N, g_return); // in-place scan randomNum = drand48();//place where the wheel lands weightedWheel<<<blocks,threads>>>(p, randomNum,v.reducedProb, v.picked); cudaMemcpy(v.herePicked,v.picked,sizeof(int),cudaMemcpyDeviceToHost); } void findTime(parameters p,int blocks,int threads,vectors &v) { int x,y; double totalSum,result; totalSum = 0; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.probabilities); for (y = 0; y < p.N; y++) { for(x = 0;x < p.N; x++) { findProbabilities<<<blocks,threads>>>(v.KdArray,v.TField,v.probabilities,v.particles,v.potentials,v.substrate,v.boxR,v.watcher,v.tStep,x,y,p); result = thrust::reduce(g_go, g_go + p.N*p.N); totalSum += result; /* cout<<result<<endl; if ((result < -0) && (result > -10000000)) { //if ((result ==0)) { printBoxGPU(v.probabilities,p.N,p.boxName); cout<<"gets to here"<<endl; } */ } } // if (result == 0) { // printBoxGPU(v.probabilities,p.N,p.boxName); // cout<<"gets to here"<<endl; // } // cout<<totalSum<<endl; trackTime(v.sumRun, totalSum,p.recordLength); } //the particles are picked here. This is also where the system is run from. (find potential, find probabilities, and move particle are done here) void findJump(vectors &v,int threads,int blocks,parameters p) { int x,y; double randomNum; // printBoxGPU(v.potentials,p.N,"pot0.dat"); findTime(p,blocks,threads,v); // printBoxGPU(v.potentials,p.N,"pot1.dat"); findFirst( p, blocks,threads,v);//find the first particle according to exp(-beta) // printBoxGPU(v.potentials,p.N,"pot2.dat"); x = v.herePicked[0]%p.N; y = v.herePicked[0]/p.N; findProbabilities<<<blocks,threads>>>(v.KdArray,v.TField,v.probabilities,v.particles,v.potentials,v.substrate,v.boxR,v.watcher,v.tStep,x,y,p); errorAsk("find probabilities"); //check for error // printBoxGPU(v.potentials,p.N,"pot3.dat"); randomNum = drand48(); particleScout(v, x, y, randomNum, blocks, threads,p); // printBoxGPU(v.potentials,p.N,"pot4.dat"); } //calculate energy contribution from stacked particles __global__ void G_stackE(REAL *particles,REAL *stacked,int intN) { int i,j; double blockade = 1.97e-5; int idx = blockIdx.x*blockDim.x + threadIdx.x; i = idx/intN; j = idx%intN; if(idx < intN*intN) { if (particles[i + j*intN] > 1) { stacked[idx] = blockade; } } } //calculate energy contribution from the substrate __global__ void G_subE(REAL *substrate,REAL *particles,REAL *combined,int intN) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < intN*intN) { combined[idx] = substrate[idx]*particles[idx]; } } //filling a gpu array using CPU numbers __global__ void fillSum(int index,int intN,int addSub,REAL *sumArray,REAL numToInsert) { // int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; // if(idx < 1) { REAL dSign = (REAL) addSub; sumArray[index] = dSign*numToInsert; // } } //change particle to hole (or back) __global__ void particleSwitch(int i,int j,int intN,REAL *particles) { if (particles[i + j*intN] == -1) { particles[i + j*intN]= 1; } else { particles[i + j*intN]= -1; } } //fill dos (gpu) matrix with sums (CPU) __global__ void dosPut(int i,int j,int intN,REAL *Ematrix,REAL sum) { Ematrix[i + j*intN] = sum; } //find the density of states at each site void G_dos(REAL * sumArray,REAL *extraArray,REAL *boxR,REAL *particles,REAL *substrate,REAL *Ematrix,REAL *potentials,int slices,int threads,int blocks,parameters ,parameters p) { int i,j,intN;//not sure about Sums intN = p.N; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(potentials); thrust::device_ptr<REAL> sumArrayPtr = thrust::device_pointer_cast(sumArray); thrust::device_ptr<REAL> extraArrayPtr = thrust::device_pointer_cast(extraArray); REAL result; for (j = 0; j < intN; j++) { for (i = 0; i < intN; i++) { findPotential<<<blocks,threads>>>(particles,potentials, boxR,p); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(0,intN,-1,sumArray,result); // fillSum<<<blocks,threads>>>(0,intN,1,sumArray,result); G_subE<<<blocks,threads>>>(substrate,particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(1,intN,-1,sumArray,result); // fillSum<<<blocks,threads>>>(1,intN,1,sumArray,result); G_stackE<<<blocks,threads>>>(particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(2,intN,-1,sumArray,result); // fillSum<<<blocks,threads>>>(2,intN,1,sumArray,result); particleSwitch<<<blocks,threads>>>(i,j,intN,particles); findPotential<<<blocks,threads>>>(particles,potentials, boxR,p); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(3,intN,1,sumArray,result); // fillSum<<<blocks,threads>>>(3,intN,-1,sumArray,result); G_subE<<<blocks,threads>>>(substrate,particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(4,intN,1,sumArray,result); // fillSum<<<blocks,threads>>>(4,intN,-1,sumArray,result); G_stackE<<<blocks,threads>>>(particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(5,intN,1,sumArray,result); // fillSum<<<blocks,threads>>>(5,intN,-1,sumArray,result); particleSwitch<<<blocks,threads>>>(i,j,intN,particles); result = thrust::reduce(sumArrayPtr, sumArrayPtr + 6); // result = 0; dosPut<<<blocks,threads>>>(i,j,intN,Ematrix,result); } } } //random substrate is created here REAL *createSub(REAL *hereS,double muVar,int N) { int i,j; for(j = 0; j < N; j++ ) { for(i = 0; i < N; i++) { hereS[i + N*j] = drand48()*muVar*2 - muVar; // if(i > nx/2) hereS[i + ny*j] = 50000000; } } return hereS; } // creates the variation in x & y matrices REAL * createDiff(REAL * hereDiff, double var, int N) { int i,j; for(j = 0; j < N; j++) { for(i = 0; i < N; i++) { hereDiff[i + N*j] = drand48()* var*2 - var; } } return hereDiff; } //fill a matrix with 0s REAL *C_zeros(double N, REAL *A) { int idx; for (idx = 0; idx < N; idx++) { A[idx] = 0; } return A; } //creates and fills matrices REAL *C_random(double N,double nparticles,REAL *A) { int idx,idy,count,index; int randx,randy; count = 0; for (idx = 0; idx < N; idx++) { for( idy = 0; idy < N; idy++) { index = int(idy + idx*N); A[index] = -1; } } while(count < nparticles) { randx = drand48()*N; randy = drand48()*N; randx = floor(randx); randy = floor(randy); index = int(randx*N + randy); if (A[index] < 2) { A[index] = A[index] + 1; count++; } } return A; } //creates and fills matrices when filled percent > 100% REAL *C_more(double N,double nparticles,REAL *A) { int idx,idy,count,index; int randx,randy; count = 0; for (idx = 0; idx < N; idx++) { for( idy = 0; idy < N; idy++) { index = int(idy + idx*N); A[index] = 1; } } while(count < (nparticles-N*N)) { randx = drand48()*N; randy = drand48()*N; randx = floor(randx); randy = floor(randy); index = int(randx*N + randy); if (A[index] < 2) { A[index] = A[index] + 1; count++; } } return A; } //creates the "distance hyper-matrix" 1/r REAL *createR(REAL *A,REAL *diffX, REAL *diffY,double N,double L,double xi) { double r,doublel,doublek,deltaX,deltaY; double diffXThere,diffYThere,diffXHere,diffYHere; int i,j,k,l,intN,idx,kObs,lObs,kNew,lNew; intN = N; for (idx = 0; idx < N*N*N*N; idx++) { i = idx%(intN); j = (idx%(intN*intN) - idx%(intN))/intN; k = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; l = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; /* k = idx%(intN); l = (idx%(intN*intN) - idx%(intN))/intN; i = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; j = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; */ doublek = (double) k; doublel = (double) l; kNew = i + k - N/2; lNew = j + l - N/2; kObs = C_mod(kNew,N); lObs = C_mod(lNew,N); diffXHere = diffX[i + intN*j]; diffXThere = diffX[kObs + intN*lObs]; if((kNew < 0) || (kNew > N)) { // diffXHere = -diffX[i + intN*j]; diffXThere = -diffX[kObs + intN*lObs]; } diffYHere = diffY[i + intN*j]; diffYThere = diffY[kObs + intN*lObs]; if((lNew < 0) || (lNew > N)) { // diffYHere = -diffY[i + intN*j]; diffYThere = -diffY[kObs + intN*lObs]; } deltaX = diffXHere - (diffXThere + L*(doublek - N/2)); deltaY = diffYHere - (diffYThere + L*(doublel - N/2)); r = sqrt(deltaX*deltaX + deltaY*deltaY); A[idx] = r; } /* for (i = 0; i < N; i++) { for(j = 0; j < N ; j++) { cout<<A[1 + intN*1 + intN*intN*i + intN*intN*intN*j]/L<<" "; } cout<<endl; } */ //cout<<A[1 + intN*2 + intN*intN*1 + intN*intN*intN*1]/L<<endl; return A; } //create hexagonal lattice position tensor REAL *createHex(REAL *A,REAL *diffX, REAL *diffY,double N,double L,double xi) { double r,doublel,doublek,deltaX,deltaY; double diffXThere,diffYThere,diffXHere,diffYHere; int i,j,k,l,intN,idx,kObs,lObs,kNew,lNew; intN = N; for (idx = 0; idx < N*N*N*N; idx++) { i = idx%(intN); j = (idx%(intN*intN) - idx%(intN))/intN; k = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; l = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; doublek = (double) k; doublel = (double) l; kNew = i + k - N/2; lNew = j + l - N/2; kObs = C_mod(kNew,N); lObs = C_mod(lNew,N); diffXHere = diffX[i + intN*j]; diffXThere = diffX[kObs + intN*lObs]; if((kNew < 0) || (kNew > N)) { diffXThere = -diffX[kObs + intN*lObs]; } diffYHere = diffY[i + intN*j]; diffYThere = diffY[kObs + intN*lObs]; if((lNew < 0) || (lNew > N)) { diffYThere = -diffY[kObs + intN*lObs]; } if ( (l%2)==1 ){ if (doublek < N/2) { deltaX = diffXHere - (diffXThere + L*(doublek - N/2) - L/2); } else { deltaX = diffXHere - (diffXThere + L*(doublek - N/2) + L/2); } } else { deltaX = diffXHere - (diffXThere + L*(doublek - N/2)); } deltaY = diffYHere - (diffYThere + .866*L*(doublel - N/2)); r = sqrt(deltaX*deltaX + deltaY*deltaY); A[idx] = r; } /* for (i = 0; i < N; i++) { for(j = 0; j < N ; j++) { cout<<A[1 + intN*1 + intN*intN*i + intN*intN*intN*j]<<" "; } cout<<endl; } */ return A; } //clumps all of the original electrons ( to show relaxation) REAL *C_clump(double N,double nparticles,REAL *A) { int idx; for (idx = 0;idx < N*N; idx++) { A[idx] = -1; } for (idx = 0; idx < nparticles; idx++) { A[idx] = 1; } return A; } //electrons evenly spaced out (to try to calculate average jump distances with a general electric potential) REAL *C_spread(double N,double nparticles,REAL *A) { int idx,i,j,intN; intN = (int) N; for (idx = 0;idx < N*N; idx++) { A[idx] = -1; } for (idx = 0; idx < N*N; idx++) { i = idx/intN; j = idx%intN; if((i + j)%2) { A[idx] = 1; } } return A; } //take whatever is at a and swap it with whatever is at b __global__ void particleSwap(int i,int j,int k,int l,int intN,REAL *particles) { int temp; // int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; // if (idx < 1) { temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; // } } //take whatever is at a and swap it with whatever is at b __device__ void g_particleSwap(int i,int j,int k,int l,int intN,REAL *particles){ int temp; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < 1) { temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; } } __global__ void particleMove(int i,int j,int k,int l,int N,REAL *particles) { int deltaP = G_abs(particles[k + N*l] - particles[i + N*j]); int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < 1) { if (deltaP == 0) { if (particles[i + j*N] > 0) { //particle is moving from p1 to p2 particles[i + j*N] = particles[i + j*N] - 2; particles[k + l*N] = particles[k + l*N] + 2; } else { // hole is moving from p1 to p2 (particle is moving from p2 to p1) particles[i + j*N] = particles[i + j*N] + 2; particles[k + l*N] = particles[k + l*N] - 2; } } if (deltaP == 2) { if (particles[i + j*N] > particles[k + l*N]) { particles[i + j*N] = particles[i + j*N] - 2; particles[k + l*N] = particles[k + l*N] + 2; } else { particles[i + j*N] = particles[i + j*N] + 2; particles[k + l*N] = particles[k + l*N] - 2; } } if (deltaP > 2) { if (particles[i + j*N] > particles[k + l*N]) { particles[i + j*N] = particles[i + j*N] - 2; particles[k + l*N] = particles[k + l*N] + 2; } else { particles[i + j*N] = particles[i + j*N] + 2; particles[k + l*N] = particles[k + l*N] - 2; } } } } //change coordinates from observer to particle __device__ int changeCoordinates(int intN, int x1, int x2) { int modulox,newCoord; if (x2 < intN/2) { modulox = x2; } else { modulox = x2 - intN; } newCoord = intN/2 + modulox ; return newCoord; } //perform a swap of two particles and recalculate all of the values __global__ void slowSwap(parameters p,int i1,int j1,int i2, int j2,int intN, REAL* tempPar,REAL *tempPot, REAL* tempDos, REAL* particles,REAL *boxR,REAL* substrate, REAL *Ematrix, REAL *watcher,REAL *potentials) { double distance1, distance2; int xPre,yPre,x,y; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { tempPar[idx] = particles[idx]; tempPot[idx] = potentials[idx]; tempDos[idx] = Ematrix[idx]; if (particles[i1 + intN*j1] != particles[i2 + intN*j2]) { if(particles[i1 + intN*j1] == 1) { tempPar[i1 + intN*j1] = -1; yPre = idx/intN; xPre = idx%intN; x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { tempPot[idx] = tempPot[idx] + p.changeToV/distance1; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] - substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } // probe = distance1; tempPar[i2 + intN*j2] = 1; distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y]; if (distance2 > 0) { tempPot[idx] = tempPot[idx] - p.changeToV/distance2; tempDos[idx] = tempPot[idx]*tempPar[idx]+ substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] + substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] - substrate[idx]*tempPar[idx]; } } } else { tempPar[i1 + intN*j1] = 1; // xPre = idx/intN; // yPre = idx%intN; yPre = idx/intN; xPre = idx%intN; x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; // distance1 = boxR[x + intN*y + intN*intN*i1 + intN*intN*intN*j1]; // watcher[idx] = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { tempPot[idx] = tempPot[idx] - p.changeToV/distance1; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] + substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; // watcher[idx] = tempPot[idx]*tempPar[idx] ; } tempPar[i2 + intN*j2] = -1; x = (int) G_mod(xPre + ( intN/2 - i2),intN); y = (int) G_mod(yPre + (intN/2 - j2),intN); distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y]; if (distance2 > 0) { tempPot[idx] = tempPot[idx] + p.changeToV/distance2; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; } else { // tempPot[idx] = tempPot[idx] - substrate[idx]*particles[idx]; tempDos[idx] = tempPot[idx]*tempPar[idx] + substrate[idx]*tempPar[idx]; // watcher[idx] = tempPot[idx]*tempPar[idx] ; } } // watcher[idx] = substrate[idx]*tempPar[idx]; } // tempDos[idx] = probe; } else { tempDos[idx] = Ematrix[idx]; } } } //calculate substrate contribution to syatem energy __global__ void subAdd(int intN, REAL *particles,REAL *potentials,REAL *substrate){ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { potentials[idx] = potentials[idx] + substrate[idx]*particles[idx]; } } //calculate electrical potential contribution to system energy __global__ void potAdd(parameters p,int i1, int j1, int intN,REAL *particles, REAL *potentials, REAL *boxR){ int x,y; int xPre,yPre; REAL distance1; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { //for the -0.5,0.5 system if (particles[i1 + intN*j1] == 1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] -.5*p.changeToV/distance1; // .5 since Im coming from neutral } } } if (particles[i1 + intN*j1] == -1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] + .5*p.changeToV/distance1; } } } /* //for the -1,0,1 system , maybe it doesnt matter yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] - particles[i1 + intN*j1]*.5*p.changeToV/distance1; } } */ } } //calculate change in electric potentials when a particle is removed __global__ void potSub(parameters p,int i1, int j1, int intN,REAL *particles,REAL *boxR,REAL *potentials){ int x,y; int xPre,yPre; REAL distance1; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { if (particles[i1 + intN*j1] == -1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] + p.changeToV/distance1; } } } if (particles[i1 + intN*j1] == 1) { yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1); y = yPre + (intN/2 - j1); if(g_checkIn(x,y,intN) ) { distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } } } } //calculate change in potential energy __global__ void potChange(parameters p,int i1, int j1, int intN,REAL *particles,REAL *boxR,REAL *potentials,REAL* Ematrix){ int x,y; int xPre,yPre; REAL distance1; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { if (particles[i1 + intN*j1] == 1) { particles[i1 + intN*j1] = 0; yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] + p.changeToV/distance1; // potentials[idx] = 999; } } if(particles[idx] == 0) {//empty cells count as positive potential Ematrix[idx] = potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = -potentials[idx]; // } } else { Ematrix[idx] = -potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = potentials[idx]; // } } } else { particles[i1 + intN*j1] = 1; yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } if(particles[idx] == 0) { Ematrix[idx] = potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = -potentials[idx]; // } } else { Ematrix[idx] = -potentials[idx]; // if (distance1 == 0) { // Ematrix[idx] = potentials[idx]; // } } } } } //combine potential energies and substrate energies to find total energies __global__ void findE(int intN, REAL *Ematrix, REAL *particles, REAL *potentials, REAL *substrate) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { Ematrix[idx] = particles[idx]*potentials[idx] + particles[idx]*substrate[idx]; // Ematrix[idx] = particles[idx]*potentials[idx]; // Ematrix[idx] =particles[idx]*substrate[idx]; } } //change the density of states from absolute value contribution to reflect removing and adding of a particle __global__ void dosChange(int intN, REAL *particles,REAL *Ematrix,REAL *potentials) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { if(particles[idx] == -1) { Ematrix[idx] = potentials[idx]; } else { Ematrix[idx] = -potentials[idx]; } } } //place particles __global__ void particleDrop(int intN,int i ,int j,int newParticle,REAL *particles){ particles[i + intN*j] = newParticle; } //find the potentials after a swap of positions //generalized for deltaP>0 __global__ void potSwap(parameters p,int i1, int j1, int i2, int j2,int intN,REAL *particles,REAL *boxR,REAL *potentials){ int x,y; int xPre,yPre; REAL distance1,distance2; // REAL before,after; int deltaP = G_abs(particles[i2 + intN*j2] - particles[i1 + intN*j1]); int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<intN*intN) { // before = Ematrix[idx]; if (deltaP == 0) { //if they are the same, then something has stacked p1 either started positive and became more positive or it started negative and became more negative yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; // first I do the change at the first position if (distance1 > 0) { // if (particles[i1 + intN*j1] < 0) { if (particles[i1 + intN*j1] > 0) { potentials[idx] = potentials[idx] + p.changeToV/distance1; } else { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } } x = xPre + (intN/2 - i2);//for closed boundary dont do G_mod y = yPre + (intN/2 - j2); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y]; if (distance2 > 0) { // if (particles[i1 + intN*j1] > 0) { //might be the other way if (particles[i1 + intN*j1] <= 0) { //might be the other way potentials[idx] = potentials[idx] + p.changeToV/distance2; } else { potentials[idx] = potentials[idx] - p.changeToV/distance2; } } } } if ((deltaP == 2) || (deltaP > 2)) { //if they are 1 change away from each other (a particle is traveling without blockade) // the last line could be written more simply, but this way shows that both scenarios act the same way //system is relaxing, kind of like the opposite of stacking, except this time it can go either way (but I only have to calculate the one way that it is going) yPre = idx/intN;//if it works xPre = idx%intN;//it works x = xPre + (intN/2 - i1);//for closed boundary dont do G_mod y = yPre + (intN/2 - j1); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance1 = boxR[i1 + intN*j1 + intN*intN*x + intN*intN*intN*y]; if (distance1 > 0) { if (particles[i1 + intN*j1] > particles[i2 + intN*j2]) { potentials[idx] = potentials[idx] + p.changeToV/distance1; } else { potentials[idx] = potentials[idx] - p.changeToV/distance1; } } } x = xPre + (intN/2 - i2);//for closed boundary dont do G_mod y = yPre + (intN/2 - j2); //instead only do if yPre + (intN/2 - j1) > 0 or < intN if(g_checkIn(x,y,intN)) { //only apply potential changes inside the system distance2 = boxR[i2 + intN*j2 + intN*intN*x + intN*intN*intN*y];//might be the other way if (distance2 > 0) { if (particles[i2 + intN*j2] > particles[i1 + intN*j1]) { potentials[idx] = potentials[idx] + p.changeToV/distance2; } else { potentials[idx] = potentials[idx] - p.changeToV/distance2; } } } } } } //force a particle to a certain place void C_particleForce(vectors &v,int intN, int i1, int j1,int i2,int j2,int threads, int blocks,parameters p) { potSwap<<<blocks,threads>>>(p,i1, j1, i2, j2,intN,v.particles,v.boxR,v.potentials); particleSwap<<<blocks,threads>>>(i1, j1, i2,j2,intN,v.particles); findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); } int c_checkIn(int i, int j, int N) { int in = 0; if (i > 0 && i < N && j > 0 && j < N) { in = 1; } return in; } __device__ int g_checkIn(int i, int j, int N) { int in = 0; if (i > 0 && i < N && j > 0 && j < N) { in = 1; } return in; } //pick which site would result in a decrease of system energy (energies are negative, so this looks for the highest (closest to zero) value) void C_particlePick(vectors &v,int intN, int i, int j,int threads, int blocks,parameters p) { if ((v.results[0] < v.results[1] ) ||(v.results[0] < v.results[2] ) ||(v.results[0] < v.results[3] ) ||(v.results[0] < v.results[4] ) ) { int iPrev,jPrev,iPost,jPost; iPrev = C_mod(i - 1,intN); jPrev = C_mod(j - 1,intN); iPost = C_mod(i + 1,intN); jPost = C_mod(j + 1,intN); if ((v.results[1] > v.results[2] ) &&(v.results[1] > v.results[3] ) &&(v.results[1] > v.results[4] ) ) { potSwap<<<blocks,threads>>>(p, i, j, iPrev,j,intN,v.particles,v.boxR,v.potentials); particleSwap<<<blocks,threads>>>(i, j, iPrev,j,intN,v.particles); findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); } else if ((v.results[2] > v.results[3] ) &&(v.results[2] > v.results[4] )) { potSwap<<<blocks,threads>>>(p, i, j, i,jPrev,intN,v.particles,v.boxR,v.potentials); particleSwap<<<blocks,threads>>>(i, j, i,jPrev,intN,v.particles); findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); } else if (v.results[3] > v.results[4]) { potSwap<<<blocks,threads>>>(p, i, j, iPost,j,intN,v.particles,v.boxR,v.potentials); particleSwap<<<blocks,threads>>>(i, j, iPost,j,intN,v.particles); findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); } else { potSwap<<<blocks,threads>>>(p, i, j, i,jPost,intN,v.particles,v.boxR,v.potentials); particleSwap<<<blocks,threads>>>(i, j, i,jPost,intN,v.particles); findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); } } } //compare a site with its neighbors to find minimal system energy void fastTest(vectors &v,int i, int j, int intN,int threads, int blocks,parameters p) { int iPrev,jPrev,iPost,jPost; iPrev = C_mod(i - 1,intN); jPrev = C_mod(j - 1,intN); iPost = C_mod(i + 1,intN); jPost = C_mod(j + 1,intN); REAL result; thrust::device_ptr<REAL> p_tempDos = thrust::device_pointer_cast(v.tempDos); thrust::device_ptr<REAL> p_Ematrix = thrust::device_pointer_cast(v.Ematrix); result = thrust::reduce(p_Ematrix, p_Ematrix + intN*intN); v.results[0] = result; if (c_checkIn(iPrev,j,intN)) { slowSwap<<<blocks,threads>>>(p, i,j, iPrev, j, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[1] = result; } else { v.results[1] = -1e50; //large negative number so that it's not picked } if (c_checkIn(i,jPrev,intN)) { slowSwap<<<blocks,threads>>>(p, i,j, i, jPrev, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[2] = result; } else { v.results[2] = -1e50; //large negative number so that it's not picked } if (c_checkIn(iPost,j,intN)) { slowSwap<<<blocks,threads>>>(p, i,j, iPost, j, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[3] = result; } else { v.results[3] = -1e50; //large negative number so that it's not picked } if (c_checkIn(i,jPost,intN)) { slowSwap<<<blocks,threads>>>(p, i,j, i, jPost, intN, v.tempPar,v.tempPot, v.tempDos, v.particles,v.boxR, v.substrate,v.Ematrix, v.watcher,v.potentials); result = thrust::reduce(p_tempDos, p_tempDos + intN*intN); v.results[4] = result; } else { v.results[4] = -1e50; //large negative number so that it's not picked } C_particlePick(v, intN, i, j, threads, blocks,p); } //when a particle is moved far away, there may be a cascade of changes as the particles around it accomodate. This checks for that in a spiral manner void spiral(parameters p,int index,int blocks, int threads,vectors &v) { int nLevels,xStart,yStart,ringLevel,ringLength,xNow,yNow,xCount,yCount; nLevels = 5; int intN = p.N; xStart = index%intN; yStart = index/intN; for (ringLevel = 1; ringLevel < nLevels; ringLevel++) { ringLength = ringLevel * 2 +1; xNow = xStart + ringLevel; yNow = yStart + ringLevel; for (xCount = 1; xCount < ringLength; xCount++) { xNow = xNow - 1; yNow = yNow; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } for (yCount = 1; yCount < ringLength; yCount++) { xNow = xNow; yNow = yNow - 1; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } for (xCount = 1; xCount < ringLength; xCount++) { xNow = xNow + 1; yNow = yNow; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } for (yCount = 1; yCount < ringLength; yCount++) { xNow = xNow; yNow = yNow + 1; if(c_checkIn(xNow,yNow,intN)) { fastTest(v, xNow, yNow, intN, threads, blocks,p); } } } } //see if youre close enough to a change in particles to be updated __global__ void checkRange(int index,REAL *rangeMatrix,int intN) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < intN*intN) { int i,j,k,l; double di,dj,dk,dl,r,dx,dy; i = index/intN; j = index%intN; k = idx/intN; l = idx%intN; dk = (double) k; dl = (double) l; di = (double) i; dj = (double) j; dx = dk - di; dy = dl - dj; r = sqrt(dx*dx + dy*dy); rangeMatrix[idx] = 0; if (r < 10) { rangeMatrix[idx] = 1; } } } int updateMinMax(vectors &v, int c_stable, REAL min_value, REAL max_value) {//apparently the loop can be 4 switches long (hopefully it doesnt go further) if(v.min1 == min_value && v.max1 == max_value) { c_stable = 1; } if(v.min2 == min_value && v.max2 == max_value) { c_stable = 1; } if(v.min3 == min_value && v.max3 == max_value) { c_stable = 1; } if(v.min4 == min_value && v.max4 == max_value) { c_stable = 1; } v.min4 = v.min3; v.min3 = v.min2; v.min2 = v.min1; v.min1 = min_value; v.max4 = v.max3; v.max3 = v.max2; v.max2 = v.max1; v.max1 = max_value; return c_stable; } //see if the system has reached a local minimum int checkStable(vectors &v,int c_stable,REAL min_value,REAL max_value,int min_offset,int max_offset,int intN,int blocks,int threads,parameters p){ int i1,i2,j1,j2; c_stable = updateMinMax(v, c_stable, min_value,max_value); if (c_stable == 0) { i1 = min_offset%intN; j1 = min_offset/intN; i2 = max_offset%intN; j2 = max_offset/intN; potSwap<<<blocks,threads>>>(p,i1, j1, i2, j2,intN,v.particles,v.boxR,v.potentials); particleSwap<<<blocks,threads>>>(i1, j1, i2,j2,intN,v.particles); findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); } return c_stable; } //move a particle from high system energy to low system energy int highsToLows(vectors &v,int max_offset,int min_offset,REAL max_value,REAL min_value,int c_stable, int blocks,int threads,parameters p) { c_stable = checkStable(v, c_stable, min_value, max_value, min_offset,max_offset,p.N, blocks,threads,p); if (c_stable == 0) { spiral(p,max_offset, blocks, threads,v); spiral(p,min_offset, blocks, threads,v); } return c_stable; } //grab only the positive values of a matrix ( for using the thrust function) __global__ void grabPositives(REAL* particles,REAL *extraArray,REAL* Ematrix,int N,int posNeg) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { extraArray[idx] = 0; if(particles[idx] == posNeg) { extraArray[idx] = Ematrix[idx]; } } } //reflect the fact that holes are filled and full sites are emptied void __global__ lastFlip(int intN,REAL *invertedDos,REAL *particles) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < intN*intN) { if (particles[idx] == -1) { invertedDos[idx] = -invertedDos[idx]; } } } //calculate dos by removing the particle and seeing what effect this has on the system void dosInvert (parameters p,int intN,int threads,int blocks,vectors &v) {//should work for nParticles > 1 int i,j; double result1,result2; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(v.tempDos); for(j = 0; j < intN; j++) { for (i = 0; i < intN; i++) { //i = 20; //j = 20; matrixCopy<<<blocks,threads>>>(intN, v.potentials ,v.tempPot); matrixCopy<<<blocks,threads>>>(intN, v.particles , v.tempPar); matrixCopy<<<blocks,threads>>>(intN, v.Ematrix ,v.tempDos); potChange<<<blocks,threads>>>(p,i, j, intN,v.tempPar,v.boxR,v.tempPot,v.tempDos); // dosChange<<<blocks,threads>>>(intN, tempPar,tempDos,tempPot); result1 = thrust::reduce(g_go, g_go + intN*intN); potChange<<<blocks,threads>>>(p,i, j, intN,v.tempPar,v.boxR,v.tempPot,v.tempDos); // dosChange<<<blocks,threads>>>(intN, tempPar,tempDos,tempPot); result2 = thrust::reduce(g_go, g_go + intN*intN); dosPut<<<blocks,threads>>>( i, j,intN,v.invertedDos, result2 - result1); } } //lastFlip<<<blocks,threads>>>(intN,invertedDos,tempPar); } //do the half of the Glatz algorithm which uses a density of states map to find which particle switching is optimal void switcharoo(vectors &v,int c_stable,int threads, int blocks,parameters p) { int intN = p.N; int min_offset,max_offset; REAL min_value,max_value; thrust::device_ptr<REAL> g_ptr = thrust::device_pointer_cast(v.Ematrix); thrust::device_ptr<REAL> inverted_ptr = thrust::device_pointer_cast(v.extraArray); while (c_stable == 0) { grabPositives<<<blocks,threads>>>(v.particles,v.extraArray,v.Ematrix,intN,1); min_offset = thrust::min_element(inverted_ptr, inverted_ptr + intN*intN) - inverted_ptr; min_value = *(inverted_ptr + min_offset); grabPositives<<<blocks,threads>>>(v.particles,v.extraArray,v.Ematrix,intN,-1); max_offset = thrust::min_element(inverted_ptr, inverted_ptr + intN*intN) - inverted_ptr; //grabbing the smallest positive number max_value = *(inverted_ptr + max_offset); // max_offset = thrust::max_element(inverted_ptr, inverted_ptr + intN*intN) - inverted_ptr; // max_value = *(inverted_ptr + max_offset); // potentialse = *(g_ptr + max_offset); //cout<<min_value<<" "<<max_value<<endl; c_stable = highsToLows(v, max_offset,min_offset, max_value,min_value,c_stable, blocks,threads,p); } } //2 step relaxation algorithm developed by Glatz void glatzRelax(int threads,int blocks,parameters p, vectors v) { int i,j,intN; intN = p.N; int N = p.N; int c_stable = 0; /* for(j = 0; j < intN; j++) { for(i = 0; i < intN; i++) { potAdd<<<blocks,threads>>>( i, j, intN, v.particles, v.potentials, v.boxR); } } findE<<<blocks,threads>>>(intN, v.Ematrix,v.particles,v.potentials,v.substrate); */ for (int t = 0; t < 1; t++) { //original pair exchange for(j = 0; j < N; j++) { for(i = 0; i < N; i++) { fastTest(v, i, j, intN, threads, blocks,p); } } } //highs to lows switcharoo(v,c_stable,threads, blocks,p); errorAsk("switching highs to lows"); dosInvert (p, intN,threads,blocks,v); cudaFree(v.extraArray); cudaFree(v.rangeMatrix); } //initialize jump matrix __global__ void jumpFill(REAL* jumpRecord,int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N) { jumpRecord[idx] = 999; } } //find minimum of 4 values __device__ REAL findMin(REAL d1, REAL d2, REAL d3, REAL d4) { if (d1 < d2 && d1 < d3 && d1 < d4) { return d1; } if (d2 < d1 && d2 < d3 && d2 < d4) { return d2; } if (d3 < d1 && d3 < d2 && d3 < d4) { return d3; } if (d4 < d1 && d4 < d2 && d4 < d3) { return d4; } return d1; //default } //create matrix which finds individual granule radii(a) by linking it to distance between granules __global__ void aMaker(REAL *aMatrix,REAL *boxR,int N) { int i,j,iPrev,jPrev,iPost,jPost; REAL distanceUp,distanceDown,distanceLeft,distanceRight, minDistance; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { i = idx%(N); j = (idx%(N*N) - idx%(N))/N; iPrev = G_mod(i - 1,N); jPrev = G_mod(j - 1,N); iPost = G_mod(i + 1,N); jPost = G_mod(j + 1,N); distanceUp = boxR[i + N*j + N*N*i + N*N*N*jPost]; distanceDown = boxR[i + N*j + N*N*i + N*N*N*jPrev]; distanceLeft = boxR[i + N*j + N*N*iPrev + N*N*N*j]; distanceRight = boxR[i + N*j + N*N*iPost + N*N*N*j]; minDistance = findMin(distanceUp,distanceDown,distanceLeft,distanceRight); aMatrix[idx] = minDistance/2; } } //substrate contribution from the granule size __global__ void subCombine(REAL *aMatrix,REAL *substrate,REAL L, int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { substrate[idx] = substrate[idx]*aMatrix[idx]/L; } } __global__ void tempGradient(REAL *TField,double lowTemp,double highTemp,int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int i,j; double doubleI; if (idx < N*N) { i = idx/N; j = idx%N; doubleI = (double) i; TField[i + N*j] = lowTemp + doubleI*(highTemp-lowTemp)/N; } } __global__ void noGradient(REAL *TField,double temperature, int N) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { TField[idx] = temperature; } } __global__ void createKd(REAL *KdArray,double highKd,int KdFrequency,int N){ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if (idx < N*N) { if (idx%KdFrequency == 0){ KdArray[idx] = 4/highKd; //since kd is the denominator } else { KdArray[idx] = 1; } } } //load parameters void paramLoad(parameters &p, char *argv[]){ sprintf(p.lineName, "line.txt"); sprintf(p.boxName, "box.txt"); sprintf(p.timeName,"time.txt"); // N = 32; p.N = 30; //size of system (N x N) // N = 256; p.muVar = 0; // randomness of substrate (site energy?) -muvar to muvar // muVar = 1e-5; // p.boltzmann = 1.38e-23; p.boltzmann = .01;//test // p.changeToV = 1;//test p.changeToV = 3.6e-10; // Ke*Q/Kd // eV = .05; p.eV = 0; //voltage (arbitrary units for now) p.Ec = 1600; //penalty for double-stacking // Ec = 1.6e-5; // Ec = 1; // T = 1; p.alphaOne = 1; // technically combined with density of states (not used at the moment) // p.alphaTwo = 1; // test p.alphaTwo = 1.16e4; //C/Kb (for converting to unitless) p.T = 25; //temperature // nParticles = input; p.nParticles = .5*p.N*p.N; //number of particles // nParticles = 1; // L = 7e-6; p.L = 1e-8; //10 nm average inter-granule spacing // tSteps = 1000000; //for statistically accurate runs (timesteps) // tSteps = 10000; //for potential runs p.tSteps = 1; // for seeing the fields // tSteps = 0; // relax = 1; p.relax = 0; //wether or not to relax the system before running (should be 0 iff muVar & xyVar = 0) p.grabJ=0; //0 grabs average jumping distance , 1 grabs current p.whichBox=1; p.xi = p.L; //tunneling factor p.xVar = 0; //variance of lattice site in x direction p.yVar = 0; // typically = xVar p.rejection = 0; //default no rejection p.xMoment = 0; p.KdFrequency=2; p.highKd=4; int intVal; REAL realVal; ifstream is_file(argv[1]); string line; //this part loads some of the variables while( getline(is_file, line) ) { istringstream is_line(line); string key; if( getline(is_line, key, '=') ) { string value; if( getline(is_line, value) ) // store_line(key, value); if(key == "Temp") { realVal = atof(value.c_str()); p.T = realVal; } if(key == "muVar") { realVal = atof(value.c_str()); p.muVar = realVal; } if(key == "XYvar") { realVal = atof(value.c_str()); p.xVar = realVal*p.L; p.yVar = realVal*p.L; } if(key == "tSteps") { intVal = atoi(value.c_str()); p.tSteps = intVal; } if(key == "L") { realVal = atof(value.c_str()); p.L = realVal; } if(key == "eV") { realVal = atof(value.c_str()); p.eV = realVal; } if(key == "relax") { intVal = atoi(value.c_str()); p.relax = intVal; } if(key == "grabJ") { intVal = atoi(value.c_str()); p.grabJ = intVal; } if(key == "whichBox") { intVal = atoi(value.c_str()); p.whichBox = intVal; } if(key == "lineName") { sprintf(p.lineName, value.c_str()); // lineName = value.c_str(); } if(key == "boxName") { sprintf(p.boxName, value.c_str()); // boxName = value.c_str(); } if(key == "timeName") { sprintf(p.timeName, value.c_str()); } if(key == "rejection") { realVal = atof(value.c_str()); p.rejection = realVal; } if(key == "Ec") { realVal = atof(value.c_str()); p.Ec = realVal; } if(key == "highKd") { realVal = atof(value.c_str()); p.highKd = realVal; } if(key == "KdFrequency") { realVal = atof(value.c_str()); p.KdFrequency = realVal; } } } p.recordLength = p.tSteps; } //load arrays void vectorLoad(vectors &v,parameters p,int blocks, int threads){ int N = p.N; cudaMalloc(&v.KdArray,N*N*sizeof(REAL)); cudaMalloc(&v.watcher,N*N*sizeof(REAL)); cudaMalloc(&v.reducedProb,N*N*sizeof(REAL)); cudaMalloc(&v.particles,N*N*sizeof(REAL)); cudaMalloc(&v.probabilities,N*N*sizeof(REAL)); cudaMalloc(&v.potentials,N*N*sizeof(REAL)); cudaMalloc(&v.substrate,N*N*sizeof(REAL)); cudaMalloc(&v.Ematrix,N*N*sizeof(REAL)); cudaMalloc(&v.tempDos,N*N*sizeof(REAL)); cudaMalloc(&v.tempPar,N*N*sizeof(REAL)); cudaMalloc(&v.tempPot,N*N*sizeof(REAL)); cudaMalloc(&v.invertedDos,N*N*sizeof(REAL)); cudaMalloc(&v.jumpRecord,p.recordLength*sizeof(REAL)); cudaMalloc(&v.aMatrix,N*N*sizeof(REAL)); cudaMalloc(&v.boxR,N*N*N*N*sizeof(REAL)); cudaMalloc(&v.picked,sizeof(int)); v.herePicked = new int[1]; v.herePicked[0] = 0; v.timeRun = new REAL[p.recordLength]; v.sumRun = new REAL[p.recordLength]; v.herePot = new REAL[N*N]; v.herePot = C_zeros(N, v.herePot); v.hereProb = new REAL[N*N]; v.hereProb = C_random(N,0,v.hereProb); v.hereP = new REAL[N*N]; // v.hereP = C_clump(p.N,p.nParticles,v.hereP);//test relaxation v.hereP = C_spread(N,p.nParticles,v.hereP); //test general potential // v.hereP = C_zeros(p.N,v.hereP); //zeros for the true neutral map (-2,0,2 ...etc) // hereP = C_random(N,nParticles,hereP); // hereP = C_random(N,0,hereP); //empty system // hereP = C_more(N,nParticles,hereP); v.hereXDiff = new REAL[N*N]; v.hereYDiff = new REAL[N*N]; v.hereXDiff = createDiff(v.hereXDiff, p.xVar, N); v.hereYDiff = createDiff(v.hereYDiff, p.yVar, N); v.hereS = new REAL[N*N]; v.hereS = createSub(v.hereS,p.muVar,N); v.hereBoxR = new REAL[N*N*N*N]; v.hereBoxR = createR(v.hereBoxR,v.hereXDiff,v.hereYDiff,N,p.L,p.xi); // hereBoxR = createHex(hereBoxR,hereXDiff,hereYDiff,N,L,xi); cudaMemcpy(v.watcher,v.herePot,N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(v.potentials,v.herePot,N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(v.Ematrix,v.herePot,N*N*sizeof(REAL),cudaMemcpyHostToDevice);//just filling it with 0s cudaMemcpy(v.substrate,v.hereS,N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(v.boxR,v.hereBoxR,N*N*N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(v.particles,v.hereP,N*N*sizeof(REAL),cudaMemcpyHostToDevice); // aMaker<<<blocks,threads>>>(v.aMatrix,v.boxR,N); // subCombine<<<blocks,threads>>>(v.aMatrix,v.substrate, p.L, N); jumpFill<<<blocks,threads>>>(v.jumpRecord,p.recordLength); createKd<<<blocks,threads>>>(v.KdArray,p.highKd,p.KdFrequency,p.N); v.min1 = 999; v.min2 = 999; v.min3 = 999; v.min4 = 999; v.max1 = 999; v.max2 = 999; v.max3 = 999; v.max4 = 999; int sizeSum = 6; v.hereSum = new REAL[sizeSum]; v.hereSum = C_zeros(sizeSum,v.hereSum); cudaMalloc(&v.TField,N*N*sizeof(REAL)); cudaMalloc(&v.extraArray,N*N*sizeof(REAL)); cudaMalloc(&v.rangeMatrix,N*N*sizeof(REAL)); cudaMalloc(&v.sumArray,sizeSum*sizeof(REAL)); cudaMemcpy(v.sumArray,v.hereSum,sizeSum*sizeof(REAL),cudaMemcpyHostToDevice); int i,j; for(j = 0; j < p.N; j++) { for(i = 0; i < p.N; i++) { potAdd<<<blocks,threads>>>(p, i, j, p.N, v.particles, v.potentials, v.boxR); } } findE<<<blocks,threads>>>(p.N, v.Ematrix,v.particles,v.potentials,v.substrate); // noGradient<<<blocks,threads>>>(v.TField,15,p.N); tempGradient<<<blocks,threads>>>(v.TField,15,500,p.N); //maximum gradient // tempGradient<<<blocks,threads>>>(v.TField,30,31,p.N); //almost no gradient } double findDipole(REAL *g_array,int size) { double moment; REAL *c_array; c_array = new REAL[size*size]; int k,l; cudaMemcpy(c_array,g_array,size*size*sizeof(REAL),cudaMemcpyDeviceToHost); for (k = 0; k < size; k++) { for (l = 0; l < size; l++) { if (c_array[k + size*l] == 1) { moment += k*c_array[k + size*l]; } } } moment = 2*moment/(size*size); //half filled cells return moment; } int main(int argc,char *argv[]) { cudaDeviceReset(); cudaSetDevice(0); cudaDeviceSynchronize(); cudaThreadSynchronize(); parameters p; vectors v; // srand48(time(0)); clock_t begin = clock(); paramLoad(p,argv); int threads,blocks; int N = p.N; N = p.N; threads=MAXT; blocks=N*N/threads+(N*N%threads==0?0:1); vectorLoad(v,p,blocks, threads); /* char nameP[256]; sprintf(nameP, "line.txt"); hereP = loadMatrix(hereP,nameP); */ //system relax if (p.relax == 1) { glatzRelax(threads, blocks,p,v ); } //run simulation for(int t = 0; t < p.tSteps ; t++) { countThese = 1; v.tStep = t; findJump(v, threads, blocks,p); } //save data // sprintf(str1, "line.txt"); // printBoxCPU(hereXDiff,N,boxName); lastFlip<<<blocks,threads>>>(p.N,v.Ematrix,v.particles); switch(p.whichBox) { case 1: printBoxGPU(v.particles,p.N,p.boxName); break; case 2: printBoxGPU(v.probabilities,p.N,p.boxName); break; case 3: printBoxGPU(v.potentials,p.N,p.boxName); break; case 4: printBoxGPU(v.Ematrix,p.N,p.boxName); break; case 5: //no output box break; } printLineCPU(v.sumRun, p.timeName); printLineGPU(v.jumpRecord,p.recordLength,p.lineName); printBoxGPU(v.watcher,p.N,"watcher.dat"); p.xMoment = findDipole(v.particles,p.N); cout<<p.xMoment<<endl; /* cudaMemcpy(hereP,jumpRecord,N*N*sizeof(REAL),cudaMemcpyDeviceToHost); FILE *fp1; // char str1[256]; // sprintf(str1, "particles.txt"); fp1 = fopen(fileName, "w"); for (int k = 0; k < N*N ; k++){ fprintf(fp1, "%lf ",hereP[k]); } //cleanup fclose(fp1); */ delete[] v.herePicked; delete[] v.herePot; delete[] v.hereProb; delete[] v.hereP; delete[] v.hereS; delete[] v.hereBoxR; cudaFree(v.particles); cudaFree(v.probabilities); cudaFree(v.potentials); cudaFree(v.substrate); cudaFree(v.boxR); cudaFree(v.Ematrix); cudaFree(v.jumpRecord); cudaFree(v.picked); clock_t end = clock(); // double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //cout<<"this took "<<elapsed_secs<<" seconds"<<endl; }
4dc0bd814379c0102c5762d388b19c7351fd631d.hip
// !!! This is a file automatically generated by hipify!!! /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author raver119@gmail.com // #include <array/CudaPointerDeallocator.h> namespace sd { void CudaPointerDeallocator::release(void *ptr) { hipFree(ptr); } } // namespace sd
4dc0bd814379c0102c5762d388b19c7351fd631d.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author raver119@gmail.com // #include <array/CudaPointerDeallocator.h> namespace sd { void CudaPointerDeallocator::release(void *ptr) { cudaFree(ptr); } } // namespace sd
ab43de2657b446d0e7ebef50ab36e7090551c71b.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> c d s */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #include "sm_32_intrinsics.h" #define PRECISION_z //#define TEXTURE /* // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_ldg( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, const magmaDoubleComplex* __restrict__ d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { #if defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = __ldg( d_x+ i1 ); x2 = __ldg( d_x+ i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = __ldg( d_x + d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } #endif } */ // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; magmaDoubleComplex val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ double read_from_tex( hipTextureObject_t texdx, const int& i){ int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2double(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in SELLP @param d_colind magma_int_t* columnindices of A in SELLP @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( blocksize, alignment, 1); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( magmaDoubleComplex ); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)d_x; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(double); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if( alignment == 4) hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 8) hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 16) hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 32) hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } hipDestroyTextureObject(texdx); #else if( alignment == 1) hipLaunchKernelGGL(( zgesellptmv2d_kernel_1), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 4) hipLaunchKernelGGL(( zgesellptmv2d_kernel_4), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 8) hipLaunchKernelGGL(( zgesellptmv2d_kernel_8), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 16) hipLaunchKernelGGL(( zgesellptmv2d_kernel_16), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 32) hipLaunchKernelGGL(( zgesellptmv2d_kernel_32), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } #endif return MAGMA_SUCCESS; }
ab43de2657b446d0e7ebef50ab36e7090551c71b.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> c d s */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #include "sm_32_intrinsics.h" #define PRECISION_z //#define TEXTURE /* // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_ldg( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, const magmaDoubleComplex* __restrict__ d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { #if defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = __ldg( d_x+ i1 ); x2 = __ldg( d_x+ i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = __ldg( d_x + d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } #endif } */ // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; magmaDoubleComplex val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex* d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ double read_from_tex( cudaTextureObject_t texdx, const int& i){ int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2double(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; magmaDoubleComplex x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaDoubleComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in SELLP @param d_colind magma_int_t* columnindices of A in SELLP @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( blocksize, alignment, 1); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( magmaDoubleComplex ); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)d_x; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(double); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if( alignment == 4) zgesellptmv2d_kernel_4_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 8) zgesellptmv2d_kernel_8_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 16) zgesellptmv2d_kernel_16_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 32) zgesellptmv2d_kernel_32_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } cudaDestroyTextureObject(texdx); #else if( alignment == 1) zgesellptmv2d_kernel_1<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 4) zgesellptmv2d_kernel_4<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 8) zgesellptmv2d_kernel_8<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 16) zgesellptmv2d_kernel_16<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 32) zgesellptmv2d_kernel_32<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } #endif return MAGMA_SUCCESS; }
70b7a5573175fdc32f1830a5a35d3c2ef5cf7fd4.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ndarray_function.cu * \brief GPU Implementation of ndarray function. */ // this will be invoked by nvcc and compile GPU version #include <hipcub/hipcub.hpp> #include <dmlc/logging.h> #include "../operator/mxnet_op.h" #include "../operator/tensor/init_op.h" #include "../operator/tensor/util/tensor_util-inl.h" #include "../operator/tensor/util/tensor_util-inl.cuh" #include "../common/cuda_utils.h" #include "./ndarray_function.h" #include "./ndarray_function-inl.h" #include "./ndarray_function-inl.cuh" namespace mxnet { namespace ndarray { template<> void Copy<cpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<gpu, DType>(), from.FlatTo1D<cpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, cpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<cpu, DType>(), from.FlatTo1D<gpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { if (from_ctx.dev_id == to_ctx.dev_id) { mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { mshadow::Copy(to->FlatTo1D<gpu, DType>(s), from.FlatTo1D<gpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to->FlatTo1D<gpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s)); }) } }) } else { CHECK(from.CheckContiguous() && to->CheckContiguous()) << "copy across only support continugous memory"; CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); CHECK(s != NULL) << "need stream in GPU context"; hipMemcpyPeerAsync(to->dptr_, to_ctx.dev_id, from.dptr_, from_ctx.dev_id, from.shape_.Size() * mshadow::mshadow_sizeof(to->type_flag_), s->stream_); } } /*! * \brief GPU impl of elemwise sum for rowsparse tensors. */ void ElementwiseSumRspImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace rowsparse; using nnvm::dim_t; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "Expected rowsparse storage_type (" << out->storage_type() << " given)"; int init = 0; for (const auto& nd : nds) { if (nd.storage_initialized()) { init++; break; } } if (init == 0) { FillZerosRspImpl(s, *out); return; } const dim_t num_rows = out->shape()[0]; const dim_t row_length = out->shape().ProdShape(1, out->shape().ndim()); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { // row_idx type // Allocate temporary storage for row_flg array and cub's prefix sum operation IType* row_flg = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); mshadow::Tensor<gpu, 1, char> workspace = rsc .get_space_typed<gpu, 1, char>(mshadow::Shape1(num_rows * sizeof(IType) + temp_storage_bytes), s); row_flg = reinterpret_cast<IType*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(IType); // Mark row_flg array with 0 for zero rows and 1 for non-zero rows dim_t num_threads = num_rows; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, row_flg); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr; mxnet_op::Kernel<MarkRspRowFlgKernel, gpu>::Launch(s, num_threads, row_flg, nd_row_idx, nd_nnr); } } // Compute inclusive prefix sum over row_flg hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); // Get total number of output non-zero rows from GPU and allocate out data and row_idx dim_t nnr_out = 0; CUDA_CALL(hipMemcpy(&nnr_out, &row_flg[num_rows-1], sizeof(dim_t), hipMemcpyDeviceToHost)); out->CheckAndAlloc({mshadow::Shape1(nnr_out)}); IType* out_row_idx = out->aux_data(kIdx).dptr<IType>(); DType* out_data = out->data().dptr<DType>(); // Fill row_idx array of output using row_flg num_threads = num_rows; mxnet_op::Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_threads, out_row_idx, row_flg, num_rows); // Perform elementwise addition, writing to output data num_threads = nnr_out * row_length; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, out_data); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const DType* nd_data = nd.data().dptr<DType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr * row_length; mxnet_op::Kernel<ElementWiseRspAdditionKernel, gpu>::Launch(s, num_threads, out_data, row_flg, nd_row_idx, nd_data, nd_nnr, row_length); } } }); }); } /*! * \brief Parallel gpu impl of elemwise sum for sparse tensors. * Currently only support row sparse sum. */ template<> void ElementwiseSum<gpu>(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { if (nds.empty()) return; if (nds[0].storage_type() == kRowSparseStorage) { ElementwiseSumRspImpl(s, rsc, nds, out); } else { LOG(FATAL) << "ElementwiseSum<gpu> has not been implemented for storage_type = << " << nds[0].storage_type(); } } } // namespace ndarray } // namespace mxnet
70b7a5573175fdc32f1830a5a35d3c2ef5cf7fd4.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ndarray_function.cu * \brief GPU Implementation of ndarray function. */ // this will be invoked by nvcc and compile GPU version #include <cub/cub.cuh> #include <dmlc/logging.h> #include "../operator/mxnet_op.h" #include "../operator/tensor/init_op.h" #include "../operator/tensor/util/tensor_util-inl.h" #include "../operator/tensor/util/tensor_util-inl.cuh" #include "../common/cuda_utils.h" #include "./ndarray_function.h" #include "./ndarray_function-inl.h" #include "./ndarray_function-inl.cuh" namespace mxnet { namespace ndarray { template<> void Copy<cpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<gpu, DType>(), from.FlatTo1D<cpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, cpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<cpu, DType>(), from.FlatTo1D<gpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { if (from_ctx.dev_id == to_ctx.dev_id) { mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { mshadow::Copy(to->FlatTo1D<gpu, DType>(s), from.FlatTo1D<gpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to->FlatTo1D<gpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s)); }) } }) } else { CHECK(from.CheckContiguous() && to->CheckContiguous()) << "copy across only support continugous memory"; CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); CHECK(s != NULL) << "need stream in GPU context"; cudaMemcpyPeerAsync(to->dptr_, to_ctx.dev_id, from.dptr_, from_ctx.dev_id, from.shape_.Size() * mshadow::mshadow_sizeof(to->type_flag_), s->stream_); } } /*! * \brief GPU impl of elemwise sum for rowsparse tensors. */ void ElementwiseSumRspImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace rowsparse; using nnvm::dim_t; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "Expected rowsparse storage_type (" << out->storage_type() << " given)"; int init = 0; for (const auto& nd : nds) { if (nd.storage_initialized()) { init++; break; } } if (init == 0) { FillZerosRspImpl(s, *out); return; } const dim_t num_rows = out->shape()[0]; const dim_t row_length = out->shape().ProdShape(1, out->shape().ndim()); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { // row_idx type // Allocate temporary storage for row_flg array and cub's prefix sum operation IType* row_flg = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); mshadow::Tensor<gpu, 1, char> workspace = rsc .get_space_typed<gpu, 1, char>(mshadow::Shape1(num_rows * sizeof(IType) + temp_storage_bytes), s); row_flg = reinterpret_cast<IType*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(IType); // Mark row_flg array with 0 for zero rows and 1 for non-zero rows dim_t num_threads = num_rows; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, row_flg); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr; mxnet_op::Kernel<MarkRspRowFlgKernel, gpu>::Launch(s, num_threads, row_flg, nd_row_idx, nd_nnr); } } // Compute inclusive prefix sum over row_flg cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, mshadow::Stream<gpu>::GetStream(s)); // Get total number of output non-zero rows from GPU and allocate out data and row_idx dim_t nnr_out = 0; CUDA_CALL(cudaMemcpy(&nnr_out, &row_flg[num_rows-1], sizeof(dim_t), cudaMemcpyDeviceToHost)); out->CheckAndAlloc({mshadow::Shape1(nnr_out)}); IType* out_row_idx = out->aux_data(kIdx).dptr<IType>(); DType* out_data = out->data().dptr<DType>(); // Fill row_idx array of output using row_flg num_threads = num_rows; mxnet_op::Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_threads, out_row_idx, row_flg, num_rows); // Perform elementwise addition, writing to output data num_threads = nnr_out * row_length; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, out_data); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const DType* nd_data = nd.data().dptr<DType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr * row_length; mxnet_op::Kernel<ElementWiseRspAdditionKernel, gpu>::Launch(s, num_threads, out_data, row_flg, nd_row_idx, nd_data, nd_nnr, row_length); } } }); }); } /*! * \brief Parallel gpu impl of elemwise sum for sparse tensors. * Currently only support row sparse sum. */ template<> void ElementwiseSum<gpu>(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { if (nds.empty()) return; if (nds[0].storage_type() == kRowSparseStorage) { ElementwiseSumRspImpl(s, rsc, nds, out); } else { LOG(FATAL) << "ElementwiseSum<gpu> has not been implemented for storage_type = << " << nds[0].storage_type(); } } } // namespace ndarray } // namespace mxnet
8d1d97459e87d2e2893a7beb3cc9003b52d6dc72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <algorithm> #include <string> #include <iomanip> #include <cstdlib> #include <ctime> #include <Timer.hpp> using std::cout; using std::cerr; using std::endl; using std::fixed; using std::setprecision; using LOFAR::NSTimer; // const unsigned int CHANNELS = 256; // const unsigned int SAMPLES = 1024; // const unsigned int STATIONS = 128; // const unsigned int BEAMS = 512; const unsigned int DIV = 1; const unsigned int CHANNELS = 256/DIV; const unsigned int SAMPLES = 1024/DIV; const unsigned int STATIONS = 128/DIV; const unsigned int BEAMS = 512/DIV; // Sequential kernel void kernel_h(const float * weights, const float * input, float * output); // CUDA kernel void kernel_d(const float * weights, const float * input, float * output); // Floating point comparison inline bool same(const float a, const float b); int main(int argc, char * argv[]) { // Generate input std::vector< float > weights(CHANNELS * STATIONS * BEAMS); std::vector< float > input(STATIONS * CHANNELS * SAMPLES); std::vector< float > output_h(BEAMS * CHANNELS * SAMPLES); std::vector< float > output_d(BEAMS * CHANNELS * SAMPLES); std::srand(std::time(NULL)); for ( unsigned int channel = 0; channel < CHANNELS; channel++ ) { for ( unsigned int station = 0; station < STATIONS; station++ ) { for ( unsigned int beam = 0; beam < BEAMS; beam++ ) { weights[(channel * STATIONS * BEAMS) + (station * BEAMS) + beam] = static_cast< float >(std::rand() % 10); } for ( unsigned int sample = 0; sample < SAMPLES; sample++ ) { input[(station * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] = static_cast< float >(std::rand() % 100); } } } std::fill(output_h.begin(), output_h.end(), 0.0f); std::fill(output_d.begin(), output_d.end(), 0.0f); // Lauch kernels kernel_h(weights.data(), input.data(), output_h.data()); kernel_d(weights.data(), input.data(), output_d.data()); // Correctness check long long unsigned int wrongItems = 0; for ( unsigned int beam = 0; beam < BEAMS; beam++ ) { for ( unsigned int channel = 0; channel < CHANNELS; channel++ ) { for ( unsigned int sample = 0; sample < SAMPLES; sample++ ) { if ( !same(output_h[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample], output_d[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample]) ) { wrongItems++; } } } } if ( wrongItems > 0 ) { std::cout << "Wrong: \t\t" << wrongItems << std::fixed << std::setprecision(2) << " (" << (wrongItems * 100.0) / (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES) << "%)" << std::endl; } else{ std::cout << "Nothing Wrong." << std::endl; } return 0; } void kernel_h(const float * weights, const float * input, float * output) { LOFAR::NSTimer kernelTimer("Kernel", false, false); kernelTimer.start(); // Kernel for ( unsigned int beam = 0; beam < BEAMS; beam++ ) { for ( unsigned int channel = 0; channel < CHANNELS; channel++ ) { for ( unsigned int sample = 0; sample < SAMPLES; sample++ ) { for ( unsigned int station = 0; station < STATIONS; station++ ) { output[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] += input[(station * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] * weights[(channel * STATIONS * BEAMS) + (station * BEAMS) + beam]; } output[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] /= STATIONS; } } } // /Kernel kernelTimer.stop(); // Print performance metrics std::cout << "Sequential" << std::endl; std::cout << "Time: \t\t" << std::fixed << std::setprecision(6) << kernelTimer.getElapsed() << std::endl; std::cout << "GFLOP/s: \t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 2) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES)) / 1000000000.0 / kernelTimer.getElapsed() << std::endl; std::cout << "GB/s: \t\t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 3 * sizeof(float)) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * 2 * sizeof(float))) / 1000000000.0 / kernelTimer.getElapsed() << std::endl; } __global__ void radio(float *input, float *weights, float *output, const unsigned int BEAMS, const unsigned int CHANNELS, const unsigned int SAMPLES, const unsigned int STATIONS) { unsigned long long int blockId = blockIdx.y * gridDim.x + blockIdx.x; unsigned long long int index = blockId * blockDim.x + threadIdx.x; unsigned int N1 = STATIONS; unsigned int N12 = SAMPLES * N1; unsigned long int N123 = CHANNELS * N12; unsigned long long int N1234 = BEAMS * N123; if(index >= N1234) return; unsigned int beamIndex = (index - (index % (N123)))/(N123); index = (index - (beamIndex * N123)); unsigned int channelIndex = (index - (index % (N12))) / (N12); index = (index - (channelIndex * N12)); unsigned int sampleIndex = (index - (index % (N1))) / (N1); unsigned int stationIndex = (index - (sampleIndex * N1)); atomicAdd(&output[(beamIndex * CHANNELS * SAMPLES) + (channelIndex * SAMPLES) + sampleIndex], input[(stationIndex * CHANNELS * SAMPLES) + (channelIndex * SAMPLES) + sampleIndex] * weights[(channelIndex * STATIONS * BEAMS) + (stationIndex * BEAMS) + beamIndex]); __syncthreads(); if(stationIndex == 0){ //Whole block writes to one output channel - to prevent multiple divisions, only the first of the block may divide. output[(beamIndex * CHANNELS * SAMPLES) + (channelIndex * SAMPLES) + sampleIndex] /= STATIONS; } } void kernel_d(const float * weights, const float * input, float * output) { LOFAR::NSTimer kernelTimer("Kernel", false, false); LOFAR::NSTimer memoryTimer("Memory", false, false); LOFAR::NSTimer globalTimer("Global", false, false); hipError_t devRetVal = hipSuccess; float * devA = 0; float * devB = 0; float * devC = 0; // Start of the computation globalTimer.start(); // Allocate CUDA memory if ((devRetVal = hipMalloc(reinterpret_cast < void ** > ( & devA), (STATIONS * CHANNELS * SAMPLES) * sizeof(float))) != hipSuccess) { cerr << "Impossible to allocate device memory for inputImage." << endl; return; } if ((devRetVal = hipMalloc(reinterpret_cast < void ** > ( & devB), (CHANNELS * STATIONS * BEAMS) * sizeof(float))) != hipSuccess) { cerr << "Impossible to allocate device memory for inputImage." << endl; return; } if ((devRetVal = hipMalloc(reinterpret_cast < void ** > ( & devC), (BEAMS * CHANNELS * SAMPLES) * sizeof(float))) != hipSuccess) { cerr << "Impossible to allocate device memory for outputImage." << endl; return; } // Copy input to device memoryTimer.start(); if ((devRetVal = hipMemcpy(devA, (input), (STATIONS * CHANNELS * SAMPLES) * sizeof(float), hipMemcpyHostToDevice)) != hipSuccess) { cerr << "Impossible to copy devA to device." << endl; return; } if ((devRetVal = hipMemcpy(devB, (weights), (CHANNELS * STATIONS * BEAMS) * sizeof(float), hipMemcpyHostToDevice)) != hipSuccess) { cerr << "Impossible to copy devA to device." << endl; return; } if ((devRetVal = hipMemset(devC, 0, (BEAMS * CHANNELS * SAMPLES) * sizeof(float))) != hipSuccess) { cerr << "Impossible to zero devC." << endl; return; } memoryTimer.stop(); // Execute the kernel dim3 blockSize(STATIONS); dim3 gridSize(BEAMS*CHANNELS,SAMPLES);//num of loops kernelTimer.start(); hipLaunchKernelGGL(( radio) , dim3(gridSize), dim3(blockSize), 0, 0, devA, devB, devC, BEAMS, CHANNELS, SAMPLES, STATIONS); hipDeviceSynchronize(); kernelTimer.stop(); // Check if the kernel returned an error if ((devRetVal = hipGetLastError()) != hipSuccess) { cerr << "Uh, the kernel had some kind of issue: " << hipGetErrorString(devRetVal) << endl; return; } // Copy the output back to host memoryTimer.start(); if ((devRetVal = hipMemcpy(reinterpret_cast < void * > (output), devC, (BEAMS * CHANNELS * SAMPLES) * sizeof(float), hipMemcpyDeviceToHost)) != hipSuccess) { cerr << "Impossible to copy devC to host." << hipGetErrorString(devRetVal) << endl; return; } memoryTimer.stop(); // End of the computation globalTimer.stop(); // Print performance metrics std::cout << "CUDA" << std::endl; std::cout << "Time (g): \t" << std::fixed << std::setprecision(6) << globalTimer.getElapsed() << std::endl; std::cout << "Time (m): \t" << std::fixed << std::setprecision(6) << memoryTimer.getElapsed() << std::endl; std::cout << "Time (k): \t" << std::fixed << std::setprecision(6) << kernelTimer.getElapsed() << std::endl; std::cout << "GFLOP/s: \t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 2) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES)) / 1000000000.0 / kernelTimer.getElapsed() << std::endl; std::cout << "GB/s: \t\t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 3 * sizeof(float)) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * 2 * sizeof(float))) / 1000000000.0 / memoryTimer.getElapsed() << std::endl; hipFree(devA); hipFree(devC); } inline bool same(const float a, const float b) { return abs(a - b) < 1e-6; }
8d1d97459e87d2e2893a7beb3cc9003b52d6dc72.cu
#include <iostream> #include <vector> #include <algorithm> #include <string> #include <iomanip> #include <cstdlib> #include <ctime> #include <Timer.hpp> using std::cout; using std::cerr; using std::endl; using std::fixed; using std::setprecision; using LOFAR::NSTimer; // const unsigned int CHANNELS = 256; // const unsigned int SAMPLES = 1024; // const unsigned int STATIONS = 128; // const unsigned int BEAMS = 512; const unsigned int DIV = 1; const unsigned int CHANNELS = 256/DIV; const unsigned int SAMPLES = 1024/DIV; const unsigned int STATIONS = 128/DIV; const unsigned int BEAMS = 512/DIV; // Sequential kernel void kernel_h(const float * weights, const float * input, float * output); // CUDA kernel void kernel_d(const float * weights, const float * input, float * output); // Floating point comparison inline bool same(const float a, const float b); int main(int argc, char * argv[]) { // Generate input std::vector< float > weights(CHANNELS * STATIONS * BEAMS); std::vector< float > input(STATIONS * CHANNELS * SAMPLES); std::vector< float > output_h(BEAMS * CHANNELS * SAMPLES); std::vector< float > output_d(BEAMS * CHANNELS * SAMPLES); std::srand(std::time(NULL)); for ( unsigned int channel = 0; channel < CHANNELS; channel++ ) { for ( unsigned int station = 0; station < STATIONS; station++ ) { for ( unsigned int beam = 0; beam < BEAMS; beam++ ) { weights[(channel * STATIONS * BEAMS) + (station * BEAMS) + beam] = static_cast< float >(std::rand() % 10); } for ( unsigned int sample = 0; sample < SAMPLES; sample++ ) { input[(station * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] = static_cast< float >(std::rand() % 100); } } } std::fill(output_h.begin(), output_h.end(), 0.0f); std::fill(output_d.begin(), output_d.end(), 0.0f); // Lauch kernels kernel_h(weights.data(), input.data(), output_h.data()); kernel_d(weights.data(), input.data(), output_d.data()); // Correctness check long long unsigned int wrongItems = 0; for ( unsigned int beam = 0; beam < BEAMS; beam++ ) { for ( unsigned int channel = 0; channel < CHANNELS; channel++ ) { for ( unsigned int sample = 0; sample < SAMPLES; sample++ ) { if ( !same(output_h[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample], output_d[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample]) ) { wrongItems++; } } } } if ( wrongItems > 0 ) { std::cout << "Wrong: \t\t" << wrongItems << std::fixed << std::setprecision(2) << " (" << (wrongItems * 100.0) / (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES) << "%)" << std::endl; } else{ std::cout << "Nothing Wrong." << std::endl; } return 0; } void kernel_h(const float * weights, const float * input, float * output) { LOFAR::NSTimer kernelTimer("Kernel", false, false); kernelTimer.start(); // Kernel for ( unsigned int beam = 0; beam < BEAMS; beam++ ) { for ( unsigned int channel = 0; channel < CHANNELS; channel++ ) { for ( unsigned int sample = 0; sample < SAMPLES; sample++ ) { for ( unsigned int station = 0; station < STATIONS; station++ ) { output[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] += input[(station * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] * weights[(channel * STATIONS * BEAMS) + (station * BEAMS) + beam]; } output[(beam * CHANNELS * SAMPLES) + (channel * SAMPLES) + sample] /= STATIONS; } } } // /Kernel kernelTimer.stop(); // Print performance metrics std::cout << "Sequential" << std::endl; std::cout << "Time: \t\t" << std::fixed << std::setprecision(6) << kernelTimer.getElapsed() << std::endl; std::cout << "GFLOP/s: \t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 2) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES)) / 1000000000.0 / kernelTimer.getElapsed() << std::endl; std::cout << "GB/s: \t\t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 3 * sizeof(float)) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * 2 * sizeof(float))) / 1000000000.0 / kernelTimer.getElapsed() << std::endl; } __global__ void radio(float *input, float *weights, float *output, const unsigned int BEAMS, const unsigned int CHANNELS, const unsigned int SAMPLES, const unsigned int STATIONS) { unsigned long long int blockId = blockIdx.y * gridDim.x + blockIdx.x; unsigned long long int index = blockId * blockDim.x + threadIdx.x; unsigned int N1 = STATIONS; unsigned int N12 = SAMPLES * N1; unsigned long int N123 = CHANNELS * N12; unsigned long long int N1234 = BEAMS * N123; if(index >= N1234) return; unsigned int beamIndex = (index - (index % (N123)))/(N123); index = (index - (beamIndex * N123)); unsigned int channelIndex = (index - (index % (N12))) / (N12); index = (index - (channelIndex * N12)); unsigned int sampleIndex = (index - (index % (N1))) / (N1); unsigned int stationIndex = (index - (sampleIndex * N1)); atomicAdd(&output[(beamIndex * CHANNELS * SAMPLES) + (channelIndex * SAMPLES) + sampleIndex], input[(stationIndex * CHANNELS * SAMPLES) + (channelIndex * SAMPLES) + sampleIndex] * weights[(channelIndex * STATIONS * BEAMS) + (stationIndex * BEAMS) + beamIndex]); __syncthreads(); if(stationIndex == 0){ //Whole block writes to one output channel - to prevent multiple divisions, only the first of the block may divide. output[(beamIndex * CHANNELS * SAMPLES) + (channelIndex * SAMPLES) + sampleIndex] /= STATIONS; } } void kernel_d(const float * weights, const float * input, float * output) { LOFAR::NSTimer kernelTimer("Kernel", false, false); LOFAR::NSTimer memoryTimer("Memory", false, false); LOFAR::NSTimer globalTimer("Global", false, false); cudaError_t devRetVal = cudaSuccess; float * devA = 0; float * devB = 0; float * devC = 0; // Start of the computation globalTimer.start(); // Allocate CUDA memory if ((devRetVal = cudaMalloc(reinterpret_cast < void ** > ( & devA), (STATIONS * CHANNELS * SAMPLES) * sizeof(float))) != cudaSuccess) { cerr << "Impossible to allocate device memory for inputImage." << endl; return; } if ((devRetVal = cudaMalloc(reinterpret_cast < void ** > ( & devB), (CHANNELS * STATIONS * BEAMS) * sizeof(float))) != cudaSuccess) { cerr << "Impossible to allocate device memory for inputImage." << endl; return; } if ((devRetVal = cudaMalloc(reinterpret_cast < void ** > ( & devC), (BEAMS * CHANNELS * SAMPLES) * sizeof(float))) != cudaSuccess) { cerr << "Impossible to allocate device memory for outputImage." << endl; return; } // Copy input to device memoryTimer.start(); if ((devRetVal = cudaMemcpy(devA, (input), (STATIONS * CHANNELS * SAMPLES) * sizeof(float), cudaMemcpyHostToDevice)) != cudaSuccess) { cerr << "Impossible to copy devA to device." << endl; return; } if ((devRetVal = cudaMemcpy(devB, (weights), (CHANNELS * STATIONS * BEAMS) * sizeof(float), cudaMemcpyHostToDevice)) != cudaSuccess) { cerr << "Impossible to copy devA to device." << endl; return; } if ((devRetVal = cudaMemset(devC, 0, (BEAMS * CHANNELS * SAMPLES) * sizeof(float))) != cudaSuccess) { cerr << "Impossible to zero devC." << endl; return; } memoryTimer.stop(); // Execute the kernel dim3 blockSize(STATIONS); dim3 gridSize(BEAMS*CHANNELS,SAMPLES);//num of loops kernelTimer.start(); radio <<< gridSize, blockSize>>> (devA, devB, devC, BEAMS, CHANNELS, SAMPLES, STATIONS); cudaDeviceSynchronize(); kernelTimer.stop(); // Check if the kernel returned an error if ((devRetVal = cudaGetLastError()) != cudaSuccess) { cerr << "Uh, the kernel had some kind of issue: " << cudaGetErrorString(devRetVal) << endl; return; } // Copy the output back to host memoryTimer.start(); if ((devRetVal = cudaMemcpy(reinterpret_cast < void * > (output), devC, (BEAMS * CHANNELS * SAMPLES) * sizeof(float), cudaMemcpyDeviceToHost)) != cudaSuccess) { cerr << "Impossible to copy devC to host." << cudaGetErrorString(devRetVal) << endl; return; } memoryTimer.stop(); // End of the computation globalTimer.stop(); // Print performance metrics std::cout << "CUDA" << std::endl; std::cout << "Time (g): \t" << std::fixed << std::setprecision(6) << globalTimer.getElapsed() << std::endl; std::cout << "Time (m): \t" << std::fixed << std::setprecision(6) << memoryTimer.getElapsed() << std::endl; std::cout << "Time (k): \t" << std::fixed << std::setprecision(6) << kernelTimer.getElapsed() << std::endl; std::cout << "GFLOP/s: \t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 2) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES)) / 1000000000.0 / kernelTimer.getElapsed() << std::endl; std::cout << "GB/s: \t\t" << std::fixed << std::setprecision(3) << ((static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * STATIONS * 3 * sizeof(float)) + (static_cast< long long unsigned int >(BEAMS) * CHANNELS * SAMPLES * 2 * sizeof(float))) / 1000000000.0 / memoryTimer.getElapsed() << std::endl; cudaFree(devA); cudaFree(devC); } inline bool same(const float a, const float b) { return abs(a - b) < 1e-6; }
9c39dfb5f9e6737c581fe2ba5918eca72518367f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_list_utilities.hpp> #include <tests/utilities/type_lists.hpp> #include <thrust/device_vector.h> struct DispatcherTest : public cudf::test::BaseFixture { }; template <typename T> struct TypedDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected> struct type_tester { template <typename Dispatched> bool operator()() { return std::is_same<Expected, Dispatched>::value; } }; } // namespace TYPED_TEST(TypedDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, type_tester<TypeParam>{})); } namespace { struct verify_dispatched_type { template <typename T> __host__ __device__ bool operator()(cudf::type_id id) { return id == cudf::type_to_id<T>(); } }; __global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id); } } // namespace TYPED_TEST(TypedDispatcherTest, DeviceDispatch) { thrust::device_vector<bool> result(1, false); hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, 0, cudf::type_to_id<TypeParam>(), result.data().get()); CUDA_TRY(hipDeviceSynchronize()); EXPECT_EQ(true, result[0]); } struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDispatcherTest, IdToType) { auto t = GetParam(); EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t)); } template <typename T> struct TypedDoubleDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDoubleDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected1, typename Expected2> struct two_type_tester { template <typename Dispatched1, typename Dispatched2> bool operator()() { return std::is_same<Expected1, Dispatched1>::value && std::is_same<Expected2, Dispatched2>::value; } }; } // namespace TYPED_TEST(TypedDoubleDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, cudf::data_type{cudf::type_to_id<TypeParam>()}, two_type_tester<TypeParam, TypeParam>{})); } namespace { struct verify_double_dispatched_type { template <typename T1, typename T2> __host__ __device__ bool operator()(cudf::type_id id1, cudf::type_id id2) { return id1 == cudf::type_to_id<T1>() && id2 == cudf::type_to_id<T2>(); } }; __global__ void double_dispatch_test_kernel(cudf::type_id id1, cudf::type_id id2, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::double_type_dispatcher( cudf::data_type{id1}, cudf::data_type{id2}, verify_double_dispatched_type{}, id1, id2); } } // namespace TYPED_TEST(TypedDoubleDispatcherTest, DeviceDoubleDispatch) { thrust::device_vector<bool> result(1, false); hipLaunchKernelGGL(( double_dispatch_test_kernel), dim3(1), dim3(1), 0, 0, cudf::type_to_id<TypeParam>(), cudf::type_to_id<TypeParam>(), result.data().get()); CUDA_TRY(hipDeviceSynchronize()); EXPECT_EQ(true, result[0]); } struct IdDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types using the same type for both dispatches auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher( cudf::data_type{t}, cudf::data_type{t}, verify_double_dispatched_type{}, t, t)); } struct IdFixedDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdFixedDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdFixedDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types against one fixed type, in each direction auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{t}, cudf::data_type{cudf::type_to_id<float>()}, verify_double_dispatched_type{}, t, cudf::type_to_id<float>())); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<float>()}, cudf::data_type{t}, verify_double_dispatched_type{}, cudf::type_to_id<float>(), t)); } CUDF_TEST_PROGRAM_MAIN()
9c39dfb5f9e6737c581fe2ba5918eca72518367f.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_list_utilities.hpp> #include <tests/utilities/type_lists.hpp> #include <thrust/device_vector.h> struct DispatcherTest : public cudf::test::BaseFixture { }; template <typename T> struct TypedDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected> struct type_tester { template <typename Dispatched> bool operator()() { return std::is_same<Expected, Dispatched>::value; } }; } // namespace TYPED_TEST(TypedDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, type_tester<TypeParam>{})); } namespace { struct verify_dispatched_type { template <typename T> __host__ __device__ bool operator()(cudf::type_id id) { return id == cudf::type_to_id<T>(); } }; __global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id); } } // namespace TYPED_TEST(TypedDispatcherTest, DeviceDispatch) { thrust::device_vector<bool> result(1, false); dispatch_test_kernel<<<1, 1>>>(cudf::type_to_id<TypeParam>(), result.data().get()); CUDA_TRY(cudaDeviceSynchronize()); EXPECT_EQ(true, result[0]); } struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDispatcherTest, IdToType) { auto t = GetParam(); EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t)); } template <typename T> struct TypedDoubleDispatcherTest : public DispatcherTest { }; TYPED_TEST_CASE(TypedDoubleDispatcherTest, cudf::test::AllTypes); namespace { template <typename Expected1, typename Expected2> struct two_type_tester { template <typename Dispatched1, typename Dispatched2> bool operator()() { return std::is_same<Expected1, Dispatched1>::value && std::is_same<Expected2, Dispatched2>::value; } }; } // namespace TYPED_TEST(TypedDoubleDispatcherTest, TypeToId) { EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()}, cudf::data_type{cudf::type_to_id<TypeParam>()}, two_type_tester<TypeParam, TypeParam>{})); } namespace { struct verify_double_dispatched_type { template <typename T1, typename T2> __host__ __device__ bool operator()(cudf::type_id id1, cudf::type_id id2) { return id1 == cudf::type_to_id<T1>() && id2 == cudf::type_to_id<T2>(); } }; __global__ void double_dispatch_test_kernel(cudf::type_id id1, cudf::type_id id2, bool* d_result) { if (0 == threadIdx.x + blockIdx.x * blockDim.x) *d_result = cudf::double_type_dispatcher( cudf::data_type{id1}, cudf::data_type{id2}, verify_double_dispatched_type{}, id1, id2); } } // namespace TYPED_TEST(TypedDoubleDispatcherTest, DeviceDoubleDispatch) { thrust::device_vector<bool> result(1, false); double_dispatch_test_kernel<<<1, 1>>>( cudf::type_to_id<TypeParam>(), cudf::type_to_id<TypeParam>(), result.data().get()); CUDA_TRY(cudaDeviceSynchronize()); EXPECT_EQ(true, result[0]); } struct IdDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types using the same type for both dispatches auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher( cudf::data_type{t}, cudf::data_type{t}, verify_double_dispatched_type{}, t, t)); } struct IdFixedDoubleDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> { }; INSTANTIATE_TEST_CASE_P(TestAllIds, IdFixedDoubleDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids)); TEST_P(IdFixedDoubleDispatcherTest, IdToType) { // Test double-dispatch of all types against one fixed type, in each direction auto t = GetParam(); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{t}, cudf::data_type{cudf::type_to_id<float>()}, verify_double_dispatched_type{}, t, cudf::type_to_id<float>())); EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<float>()}, cudf::data_type{t}, verify_double_dispatched_type{}, cudf::type_to_id<float>(), t)); } CUDF_TEST_PROGRAM_MAIN()
65eeb1db92e8df4e049fd6f0c279e1171af58909.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE // BEGIN PYTHON // def f(grid, block, args): // (toptr, fromptr, length, stride, invocation_index, err_code) = args // scan_in_array = cupy.empty(length, dtype=cupy.int64) // cuda_kernel_templates.get_function(fetch_specialization(["awkward_NumpyArray_getitem_boolean_nonzero_a", toptr.dtype, fromptr.dtype]))(grid, block, (toptr, fromptr, length, stride, scan_in_array, invocation_index, err_code)) // scan_in_array = inclusive_scan(grid, block, (scan_in_array, invocation_index, err_code)) // cuda_kernel_templates.get_function(fetch_specialization(["awkward_NumpyArray_getitem_boolean_nonzero_b", toptr.dtype, fromptr.dtype]))(grid, block, (toptr, fromptr, length, stride, scan_in_array, invocation_index, err_code)) // out["awkward_NumpyArray_getitem_boolean_nonzero_a", {dtype_specializations}] = None // out["awkward_NumpyArray_getitem_boolean_nonzero_b", {dtype_specializations}] = None // END PYTHON template <typename T, typename C> __global__ void awkward_NumpyArray_getitem_boolean_nonzero_a(T* toptr, const C* fromptr, int64_t length, int64_t stride, int64_t* scan_in_array, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { if (thread_id % stride == 0) { if (fromptr[thread_id] != 0) { scan_in_array[thread_id] = 1; } } else { scan_in_array[thread_id] = 0; } } } } template <typename T, typename C> __global__ void awkward_NumpyArray_getitem_boolean_nonzero_b(T* toptr, const C* fromptr, int64_t length, int64_t stride, int64_t* scan_in_array, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length && thread_id % stride == 0) { if (fromptr[thread_id] != 0) { toptr[scan_in_array[thread_id] - 1] = thread_id; } } } }
65eeb1db92e8df4e049fd6f0c279e1171af58909.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE // BEGIN PYTHON // def f(grid, block, args): // (toptr, fromptr, length, stride, invocation_index, err_code) = args // scan_in_array = cupy.empty(length, dtype=cupy.int64) // cuda_kernel_templates.get_function(fetch_specialization(["awkward_NumpyArray_getitem_boolean_nonzero_a", toptr.dtype, fromptr.dtype]))(grid, block, (toptr, fromptr, length, stride, scan_in_array, invocation_index, err_code)) // scan_in_array = inclusive_scan(grid, block, (scan_in_array, invocation_index, err_code)) // cuda_kernel_templates.get_function(fetch_specialization(["awkward_NumpyArray_getitem_boolean_nonzero_b", toptr.dtype, fromptr.dtype]))(grid, block, (toptr, fromptr, length, stride, scan_in_array, invocation_index, err_code)) // out["awkward_NumpyArray_getitem_boolean_nonzero_a", {dtype_specializations}] = None // out["awkward_NumpyArray_getitem_boolean_nonzero_b", {dtype_specializations}] = None // END PYTHON template <typename T, typename C> __global__ void awkward_NumpyArray_getitem_boolean_nonzero_a(T* toptr, const C* fromptr, int64_t length, int64_t stride, int64_t* scan_in_array, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { if (thread_id % stride == 0) { if (fromptr[thread_id] != 0) { scan_in_array[thread_id] = 1; } } else { scan_in_array[thread_id] = 0; } } } } template <typename T, typename C> __global__ void awkward_NumpyArray_getitem_boolean_nonzero_b(T* toptr, const C* fromptr, int64_t length, int64_t stride, int64_t* scan_in_array, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length && thread_id % stride == 0) { if (fromptr[thread_id] != 0) { toptr[scan_in_array[thread_id] - 1] = thread_id; } } } }
97b90bd1d6c046981ddaf98bea257fadb8b796d7.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include "oneslike_impl.cuh" #include "runtime/device/gpu/cuda_common.h" template <typename T> __global__ void OnesLike(const size_t size, const T* input, T* output) { int one = 1; T val = static_cast<T>(one); for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output[pos] = val; } return; } template <typename T> void CalOnesLike(const size_t size, const T* input, T* output, hipStream_t cuda_stream) { hipLaunchKernelGGL(( OnesLike), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, output); return; } template void CalOnesLike<double>(const size_t size, const double* input, double* output, hipStream_t cuda_stream); template void CalOnesLike<float>(const size_t size, const float* input, float* output, hipStream_t cuda_stream); template void CalOnesLike<half>(const size_t size, const half* input, half* output, hipStream_t cuda_stream); template void CalOnesLike<int8_t>(const size_t size, const int8_t* input, int8_t* output, hipStream_t cuda_stream); template void CalOnesLike<int16_t>(const size_t size, const int16_t* input, int16_t* output, hipStream_t cuda_stream); template void CalOnesLike<int32_t>(const size_t size, const int32_t* input, int32_t* output, hipStream_t cuda_stream); template void CalOnesLike<int64_t>(const size_t size, const int64_t* input, int64_t* output, hipStream_t cuda_stream); template void CalOnesLike<uint8_t>(const size_t size, const uint8_t* input, uint8_t* output, hipStream_t cuda_stream); template void CalOnesLike<uint16_t>(const size_t size, const uint16_t* input, uint16_t* output, hipStream_t cuda_stream); template void CalOnesLike<uint32_t>(const size_t size, const uint32_t* input, uint32_t* output, hipStream_t cuda_stream); template void CalOnesLike<uint64_t>(const size_t size, const uint64_t* input, uint64_t* output, hipStream_t cuda_stream);
97b90bd1d6c046981ddaf98bea257fadb8b796d7.cu
/** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include "oneslike_impl.cuh" #include "runtime/device/gpu/cuda_common.h" template <typename T> __global__ void OnesLike(const size_t size, const T* input, T* output) { int one = 1; T val = static_cast<T>(one); for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output[pos] = val; } return; } template <typename T> void CalOnesLike(const size_t size, const T* input, T* output, cudaStream_t cuda_stream) { OnesLike<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, output); return; } template void CalOnesLike<double>(const size_t size, const double* input, double* output, cudaStream_t cuda_stream); template void CalOnesLike<float>(const size_t size, const float* input, float* output, cudaStream_t cuda_stream); template void CalOnesLike<half>(const size_t size, const half* input, half* output, cudaStream_t cuda_stream); template void CalOnesLike<int8_t>(const size_t size, const int8_t* input, int8_t* output, cudaStream_t cuda_stream); template void CalOnesLike<int16_t>(const size_t size, const int16_t* input, int16_t* output, cudaStream_t cuda_stream); template void CalOnesLike<int32_t>(const size_t size, const int32_t* input, int32_t* output, cudaStream_t cuda_stream); template void CalOnesLike<int64_t>(const size_t size, const int64_t* input, int64_t* output, cudaStream_t cuda_stream); template void CalOnesLike<uint8_t>(const size_t size, const uint8_t* input, uint8_t* output, cudaStream_t cuda_stream); template void CalOnesLike<uint16_t>(const size_t size, const uint16_t* input, uint16_t* output, cudaStream_t cuda_stream); template void CalOnesLike<uint32_t>(const size_t size, const uint32_t* input, uint32_t* output, cudaStream_t cuda_stream); template void CalOnesLike<uint64_t>(const size_t size, const uint64_t* input, uint64_t* output, cudaStream_t cuda_stream);
230a703efc958542615d55c14b25feaae415f483.hip
// !!! This is a file automatically generated by hipify!!! //#include "CudaSplitEncseg.h" //#include "CudaSplitEncsubface.h" //#include "CudaSplitBadtet.h" //#include "CudaInsertPoint.h" //#include "CudaMesh.h" //#include <time.h> // ///* Host */ //// This function assumes the input status has be set correctly //// in the initialization //void initTetBadstatus( // RealD& t_pointlist, // IntD& t_tetlist, // TetStatusD& t_tetstatus, // REAL minratio, // int& numofbadtet //) //{ // int numberofblocks = (ceil)((float)numofbadtet / BLOCK_SIZE); // kernelMarkAllBadtets << <numberofblocks, BLOCK_SIZE >> >( // thrust::raw_pointer_cast(&t_pointlist[0]), // thrust::raw_pointer_cast(&t_tetlist[0]), // thrust::raw_pointer_cast(&t_tetstatus[0]), // minratio, // numofbadtet // ); //} // //// This function splits the bad tets iteratively //void splitBadTets( // RealD& t_pointlist, // TriHandleD& t_point2trilist, // TetHandleD& t_point2tetlist, // PointTypeD& t_pointtypelist, // RealD& t_pointradius, // IntD& t_seglist, // TriHandleD& t_seg2trilist, // TetHandleD& t_seg2tetlist, // IntD& t_seg2parentidxlist, // IntD& t_segparentendpointidxlist, // TriStatusD& t_segstatus, // IntD& t_trifacelist, // TetHandleD& t_tri2tetlist, // TriHandleD& t_tri2trilist, // TriHandleD& t_tri2seglist, // IntD& t_tri2parentidxlist, // IntD& t_triid2parentoffsetlist, // IntD& t_triparentendpointidxlist, // TriStatusD& t_tristatus, // IntD& t_tetlist, // TetHandleD& t_neighborlist, // TriHandleD& t_tet2trilist, // TriHandleD& t_tet2seglist, // TetStatusD& t_tetstatus, // IntD& t_segencmarker, // IntD& t_subfaceencmarker, // int& numofpoints, // int& numofsubseg, // int& numofsubface, // int& numoftet, // MESHBH* behavior, // int debug_msg, // bool debug_error, // bool debug_timing //) //{ // int numberofbadtets; // number of bad tets // IntD t_badtetlist; // IntD t_threadmarker; // // clock_t tv[2]; // int npt[2]; // int code = 1; // int iteration = 0; // int counter = 0; // while (true) // { // // Update the active bad tet list. // // Exclude the empty ones (their status have already been set to empty). // numberofbadtets = updateActiveListByStatus_Slot(t_tetstatus, t_badtetlist, numoftet); // if (debug_msg) printf(" Iteration #%d: number of bad tets = %d\n", iteration, numberofbadtets); // if (numberofbadtets == 0) // break; // // if (numberofbadtets <= behavior->minbadtets && iteration >= behavior->miniter) // { // code = 0; // break; // } // // // t_threadmarker.resize(numberofbadtets); // thrust::fill(t_threadmarker.begin(), t_threadmarker.end(), 2); // // //hipDeviceSynchronize(); // //tv[0] = clock(); // //npt[0] = numofpoints; // // code = // insertPoint( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // t_badtetlist, // t_threadmarker, // numberofbadtets, // 0, // 0, // split tets // numberofbadtets, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // -1, // -1, // iteration, // debug_msg, // debug_error, // debug_timing // ); // // //hipDeviceSynchronize(); // //tv[1] = clock(); // //npt[1] = numofpoints; // //printf("%f, %d\n", (REAL)(tv[1] - tv[0]), npt[1] - npt[0]); // // if (!code) // break; // // splitEncsegs( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // -1, // iteration, // 0, // debug_error, // false // ); // // splitEncsubfaces( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // iteration, // 0, // debug_error, // false // ); // // //hipDeviceSynchronize(); // //tv[0] = clock(); // //npt[0] = numofpoints; // //printf("%f, %d\n", (REAL)(tv[0] - tv[1]), npt[0] - npt[1]); // // iteration++; // } // // if (!code) // printf(" Ended with %d bad tets\n", numberofbadtets); //}
230a703efc958542615d55c14b25feaae415f483.cu
//#include "CudaSplitEncseg.h" //#include "CudaSplitEncsubface.h" //#include "CudaSplitBadtet.h" //#include "CudaInsertPoint.h" //#include "CudaMesh.h" //#include <time.h> // ///* Host */ //// This function assumes the input status has be set correctly //// in the initialization //void initTetBadstatus( // RealD& t_pointlist, // IntD& t_tetlist, // TetStatusD& t_tetstatus, // REAL minratio, // int& numofbadtet //) //{ // int numberofblocks = (ceil)((float)numofbadtet / BLOCK_SIZE); // kernelMarkAllBadtets << <numberofblocks, BLOCK_SIZE >> >( // thrust::raw_pointer_cast(&t_pointlist[0]), // thrust::raw_pointer_cast(&t_tetlist[0]), // thrust::raw_pointer_cast(&t_tetstatus[0]), // minratio, // numofbadtet // ); //} // //// This function splits the bad tets iteratively //void splitBadTets( // RealD& t_pointlist, // TriHandleD& t_point2trilist, // TetHandleD& t_point2tetlist, // PointTypeD& t_pointtypelist, // RealD& t_pointradius, // IntD& t_seglist, // TriHandleD& t_seg2trilist, // TetHandleD& t_seg2tetlist, // IntD& t_seg2parentidxlist, // IntD& t_segparentendpointidxlist, // TriStatusD& t_segstatus, // IntD& t_trifacelist, // TetHandleD& t_tri2tetlist, // TriHandleD& t_tri2trilist, // TriHandleD& t_tri2seglist, // IntD& t_tri2parentidxlist, // IntD& t_triid2parentoffsetlist, // IntD& t_triparentendpointidxlist, // TriStatusD& t_tristatus, // IntD& t_tetlist, // TetHandleD& t_neighborlist, // TriHandleD& t_tet2trilist, // TriHandleD& t_tet2seglist, // TetStatusD& t_tetstatus, // IntD& t_segencmarker, // IntD& t_subfaceencmarker, // int& numofpoints, // int& numofsubseg, // int& numofsubface, // int& numoftet, // MESHBH* behavior, // int debug_msg, // bool debug_error, // bool debug_timing //) //{ // int numberofbadtets; // number of bad tets // IntD t_badtetlist; // IntD t_threadmarker; // // clock_t tv[2]; // int npt[2]; // int code = 1; // int iteration = 0; // int counter = 0; // while (true) // { // // Update the active bad tet list. // // Exclude the empty ones (their status have already been set to empty). // numberofbadtets = updateActiveListByStatus_Slot(t_tetstatus, t_badtetlist, numoftet); // if (debug_msg) printf(" Iteration #%d: number of bad tets = %d\n", iteration, numberofbadtets); // if (numberofbadtets == 0) // break; // // if (numberofbadtets <= behavior->minbadtets && iteration >= behavior->miniter) // { // code = 0; // break; // } // // // t_threadmarker.resize(numberofbadtets); // thrust::fill(t_threadmarker.begin(), t_threadmarker.end(), 2); // // //cudaDeviceSynchronize(); // //tv[0] = clock(); // //npt[0] = numofpoints; // // code = // insertPoint( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // t_badtetlist, // t_threadmarker, // numberofbadtets, // 0, // 0, // split tets // numberofbadtets, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // -1, // -1, // iteration, // debug_msg, // debug_error, // debug_timing // ); // // //cudaDeviceSynchronize(); // //tv[1] = clock(); // //npt[1] = numofpoints; // //printf("%f, %d\n", (REAL)(tv[1] - tv[0]), npt[1] - npt[0]); // // if (!code) // break; // // splitEncsegs( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // -1, // iteration, // 0, // debug_error, // false // ); // // splitEncsubfaces( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // iteration, // 0, // debug_error, // false // ); // // //cudaDeviceSynchronize(); // //tv[0] = clock(); // //npt[0] = numofpoints; // //printf("%f, %d\n", (REAL)(tv[0] - tv[1]), npt[0] - npt[1]); // // iteration++; // } // // if (!code) // printf(" Ended with %d bad tets\n", numberofbadtets); //}
ffcb13ccf9728ede847730d8440310ca11cb0ce3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename T> __global__ void _ChannelAffine( const int nthreads, const int inner_dim, const int axis_dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 y[i] = x[i] * __ldg(w + (i / inner_dim) % axis_dim); #else y[i] = x[i] * w[(i / inner_dim) % axis_dim]; #endif } } template <> __global__ void _ChannelAffine<half>( const int nthreads, const int inner_dim, const int axis_dim, const half* x, const half* w, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 530 y[i] = __hmul(x[i], __ldg(w + (i / inner_dim) % axis_dim)); #elif __CUDA_ARCH__ >= 350 y[i] = __float2half( __half2float(x[i]) * __half2float(__ldg(w + (i / inner_dim) % axis_dim))); #else y[i] = __float2half( __half2float(x[i]) * __half2float(w[(i / inner_dim) % axis_dim])); #endif } } template <typename T> __global__ void _ChannelAffine( const int nthreads, const int inner_dim, const int axis_dim, const T* x, const T* w, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 350 y[i] = x[i] * __ldg(w + wi) + __ldg(b + wi); #else y[i] = x[i] * w[wi] + b[wi]; #endif } } template <> __global__ void _ChannelAffine<half>( const int nthreads, const int inner_dim, const int axis_dim, const half* x, const half* w, const half* b, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 530 y[i] = __hfma(x[i], __ldg(w + wi), __ldg(b + wi)); #elif __CUDA_ARCH__ >= 350 y[i] = __float2half(fmaf( __half2float(x[i]), __half2float(__ldg(w + wi)), __half2float(__ldg(b + wi)))); #else y[i] = __float2half( fmaf(__half2float(x[i]), __half2float(w[wi]), __half2float(b[wi]))); #endif } } template <> __global__ void _ChannelAffine<float>( const int nthreads, const int inner_dim, const int axis_dim, const float* x, const float* w, const float* b, float* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 350 y[i] = fmaf(x[i], __ldg(w + wi), __ldg(b + wi)); #else y[i] = fmaf(x[i], w[wi], b[wi]); #endif } } template <> __global__ void _ChannelAffine<double>( const int nthreads, const int inner_dim, const int axis_dim, const double* x, const double* w, const double* b, double* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 350 y[i] = fma(x[i], __ldg(w + wi), __ldg(b + wi)); #else y[i] = fma(x[i], w[wi], b[wi]); #endif } } } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void ChannelAffine<float16, CUDAContext>( const int outer_dim, const int inner_dim, const int axis_dim, const float16* x, const float16* w, const float16* b, float16* y, CUDAContext* ctx) { const auto nthreads = outer_dim * axis_dim * inner_dim; if (b != nullptr) { hipLaunchKernelGGL(( _ChannelAffine), dim3(CUDA_BLOCKS(nthreads)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), nthreads, inner_dim, axis_dim, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(w), reinterpret_cast<const half*>(b), reinterpret_cast<half*>(y)); } else { hipLaunchKernelGGL(( _ChannelAffine), dim3(CUDA_BLOCKS(nthreads)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), nthreads, inner_dim, axis_dim, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(w), reinterpret_cast<half*>(y)); } } #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void ChannelAffine<T, CUDAContext>( \ const int outer_dim, \ const int inner_dim, \ const int axis_dim, \ const T* x, \ const T* w, \ const T* b, \ T* y, \ CUDAContext* ctx) { \ const auto nthreads = outer_dim * axis_dim * inner_dim; \ if (b != nullptr) { \ hipLaunchKernelGGL(( _ChannelAffine), \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream(), nthreads, inner_dim, axis_dim, x, w, b, y); \ } else { \ hipLaunchKernelGGL(( _ChannelAffine), \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream(), nthreads, inner_dim, axis_dim, x, w, y); \ } \ } DEFINE_KERNEL_LAUNCHER(int8_t); DEFINE_KERNEL_LAUNCHER(uint8_t); DEFINE_KERNEL_LAUNCHER(int); DEFINE_KERNEL_LAUNCHER(int64_t); DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_ROCM
ffcb13ccf9728ede847730d8440310ca11cb0ce3.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename T> __global__ void _ChannelAffine( const int nthreads, const int inner_dim, const int axis_dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 y[i] = x[i] * __ldg(w + (i / inner_dim) % axis_dim); #else y[i] = x[i] * w[(i / inner_dim) % axis_dim]; #endif } } template <> __global__ void _ChannelAffine<half>( const int nthreads, const int inner_dim, const int axis_dim, const half* x, const half* w, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 530 y[i] = __hmul(x[i], __ldg(w + (i / inner_dim) % axis_dim)); #elif __CUDA_ARCH__ >= 350 y[i] = __float2half( __half2float(x[i]) * __half2float(__ldg(w + (i / inner_dim) % axis_dim))); #else y[i] = __float2half( __half2float(x[i]) * __half2float(w[(i / inner_dim) % axis_dim])); #endif } } template <typename T> __global__ void _ChannelAffine( const int nthreads, const int inner_dim, const int axis_dim, const T* x, const T* w, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 350 y[i] = x[i] * __ldg(w + wi) + __ldg(b + wi); #else y[i] = x[i] * w[wi] + b[wi]; #endif } } template <> __global__ void _ChannelAffine<half>( const int nthreads, const int inner_dim, const int axis_dim, const half* x, const half* w, const half* b, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 530 y[i] = __hfma(x[i], __ldg(w + wi), __ldg(b + wi)); #elif __CUDA_ARCH__ >= 350 y[i] = __float2half(fmaf( __half2float(x[i]), __half2float(__ldg(w + wi)), __half2float(__ldg(b + wi)))); #else y[i] = __float2half( fmaf(__half2float(x[i]), __half2float(w[wi]), __half2float(b[wi]))); #endif } } template <> __global__ void _ChannelAffine<float>( const int nthreads, const int inner_dim, const int axis_dim, const float* x, const float* w, const float* b, float* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 350 y[i] = fmaf(x[i], __ldg(w + wi), __ldg(b + wi)); #else y[i] = fmaf(x[i], w[wi], b[wi]); #endif } } template <> __global__ void _ChannelAffine<double>( const int nthreads, const int inner_dim, const int axis_dim, const double* x, const double* w, const double* b, double* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int wi = (i / inner_dim) % axis_dim; #if __CUDA_ARCH__ >= 350 y[i] = fma(x[i], __ldg(w + wi), __ldg(b + wi)); #else y[i] = fma(x[i], w[wi], b[wi]); #endif } } } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void ChannelAffine<float16, CUDAContext>( const int outer_dim, const int inner_dim, const int axis_dim, const float16* x, const float16* w, const float16* b, float16* y, CUDAContext* ctx) { const auto nthreads = outer_dim * axis_dim * inner_dim; if (b != nullptr) { _ChannelAffine<<< CUDA_BLOCKS(nthreads), CUDA_THREADS, 0, ctx->cuda_stream()>>>( nthreads, inner_dim, axis_dim, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(w), reinterpret_cast<const half*>(b), reinterpret_cast<half*>(y)); } else { _ChannelAffine<<< CUDA_BLOCKS(nthreads), CUDA_THREADS, 0, ctx->cuda_stream()>>>( nthreads, inner_dim, axis_dim, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(w), reinterpret_cast<half*>(y)); } } #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void ChannelAffine<T, CUDAContext>( \ const int outer_dim, \ const int inner_dim, \ const int axis_dim, \ const T* x, \ const T* w, \ const T* b, \ T* y, \ CUDAContext* ctx) { \ const auto nthreads = outer_dim * axis_dim * inner_dim; \ if (b != nullptr) { \ _ChannelAffine<<< \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream()>>>(nthreads, inner_dim, axis_dim, x, w, b, y); \ } else { \ _ChannelAffine<<< \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream()>>>(nthreads, inner_dim, axis_dim, x, w, y); \ } \ } DEFINE_KERNEL_LAUNCHER(int8_t); DEFINE_KERNEL_LAUNCHER(uint8_t); DEFINE_KERNEL_LAUNCHER(int); DEFINE_KERNEL_LAUNCHER(int64_t); DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_CUDA
ed6ccef1801ac8b577e357e18a234ac61ce611e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #ifdef USE_ROCM #include "thrust/device_vector.h" #endif #include "caffe/filler.hpp" #include "caffe/layers/normalize_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #ifdef USE_ROCM // divid a matrix with vector template <typename Dtype> __global__ void DivBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] / v[c]; } else { B[index] = A[index] / v[r]; } } } template <typename Dtype> __global__ void MulBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] * v[c]; } else { B[index] = A[index] * v[r]; } } } #endif template <typename Dtype> void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* norm_data; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow caffe_gpu_set<Dtype>(norm_.count(), Dtype(eps_), norm_data); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { caffe_gpu_powx<Dtype>(dim, bottom_data, Dtype(2), buffer_data); if (across_spatial_) { Dtype normsqr; caffe_gpu_asum<Dtype>(dim, buffer_data, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_data, top_data); } else { // compute norm caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(1), norm_data); caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(0.5), norm_data); // scale the layer // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, top_data); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the output if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, top_data, scale, channels, spatial_dim, CblasTrans, top_data); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_data += dim; } #endif //USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->template program<Dtype>(); if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow greentea_gpu_set<Dtype>(this->device_->id(), norm_.count(), static_cast<Dtype>(eps_), (cl_mem)norm_data, 0); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { greentea_gpu_powx<Dtype>(this->device_->id(), dim, (cl_mem)bottom_data, n*dim, Dtype(2), (cl_mem)buffer_data, 0); if (across_spatial_) { Dtype normsqr; greentea_gpu_asum<Dtype>(this->device_->id(), dim, (cl_mem)buffer_data, 0, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); greentea_gpu_scale<Dtype>(this->device_->id(), dim, Dtype(1.0 / norm_data[n]), (cl_mem)bottom_data, n*dim, (cl_mem)top_data, n*dim); } else { // compute norm greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, channels, spatial_dim, 1., (cl_mem)buffer_data, 0, (cl_mem)sum_channel_multiplier, 0, 1., (cl_mem)norm_data, n*spatial_dim); greentea_gpu_powx<Dtype>(this->device_->id(), spatial_dim, (cl_mem)norm_data, n*spatial_dim, 0.5, (cl_mem)norm_data, n*spatial_dim); // scale the layer //TODO // NOLINT_NEXT_LINE(whitespace/operators) //hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, // dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, // top_data); viennacl::ocl::kernel &oclk_divbsx = program.get_kernel( CL_KERNEL_SELECT("DivBsx")); viennacl::ocl::enqueue( oclk_divbsx(dim, WrapHandle((cl_mem) bottom_data, &ctx), n*dim, WrapHandle((cl_mem)norm_data, &ctx), n*spatial_dim, channels, spatial_dim, WrapHandle((cl_mem) top_data, &ctx), n*dim), ctx.get_queue()); } // scale the output if (channel_shared_) { greentea_gpu_scal<Dtype>(this->device_->id(), dim, scale[0], (cl_mem)top_data, n*dim); } else { // NOLINT_NEXT_LINE(whitespace/operators) //hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, // dim, top_data, scale, channels, spatial_dim, CblasTrans, // top_data); viennacl::ocl::kernel &oclk_mulbsx = program.get_kernel( CL_KERNEL_SELECT("MulBsx")); viennacl::ocl::enqueue( oclk_mulbsx(dim, WrapHandle((cl_mem) top_data, &ctx), n*dim, WrapHandle((cl_mem) scale, &ctx), channels, spatial_dim, 1, WrapHandle((cl_mem) top_data, &ctx), n*dim), ctx.get_queue()); } } #endif //USE_GREENTEA } } template <typename Dtype> void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { #ifdef USE_ROCM const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.cpu_data(); } else { norm_data = norm_.gpu_data(); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* buffer_channel = buffer_channel_.mutable_gpu_data(); Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data(); const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data(); int count = top[0]->count(); int num = top[0]->num(); int dim = count / num; int spatial_dim = top[0]->height() * top[0]->width(); int channels = top[0]->channels(); // Propagate to param if (this->param_propagate_down_[0]) { if (channel_shared_) { Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff(); Dtype a; caffe_gpu_dot<Dtype>(count, top_data, top_diff, &a); scale_diff[0] += a / scale[0]; } else { Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); for (int n = 0; n < num; ++n) { // compute a caffe_gpu_mul<Dtype>(dim, top_data+n*dim, top_diff+n*dim, buffer_data); caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_spatial_multiplier, Dtype(0), buffer_channel); // store a / scale[i] in buffer_data temporary caffe_gpu_div<Dtype>(channels, buffer_channel, scale, buffer_channel); caffe_gpu_add<Dtype>(channels, buffer_channel, scale_diff, scale_diff); } } } // Propagate to bottom if (propagate_down[0]) { for (int n = 0; n < num; ++n) { if (across_spatial_) { Dtype a; caffe_gpu_dot<Dtype>(dim, bottom_data, top_diff, &a); caffe_gpu_scale<Dtype>(dim, a / norm_data[n] / norm_data[n], bottom_data, bottom_diff); caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_diff, bottom_diff); } else { // dot product between bottom_data and top_diff caffe_gpu_mul<Dtype>(dim, bottom_data, top_diff, buffer_data); caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(0), buffer_spatial); // scale botom_diff // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_data, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; // divide by square of norm caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(2), buffer_spatial); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_diff, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); // divide by norm // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the diff if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], bottom_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, dim, bottom_diff, scale, channels, spatial_dim, CblasTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_diff += dim; bottom_diff += dim; } } #else this->Backward_cpu(top, propagate_down, bottom); #endif //USE_ROCM } INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer); } // namespace caffe
ed6ccef1801ac8b577e357e18a234ac61ce611e8.cu
#include <algorithm> #include <cfloat> #include <vector> #ifdef USE_CUDA #include "thrust/device_vector.h" #endif #include "caffe/filler.hpp" #include "caffe/layers/normalize_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #ifdef USE_CUDA // divid a matrix with vector template <typename Dtype> __global__ void DivBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] / v[c]; } else { B[index] = A[index] / v[r]; } } } template <typename Dtype> __global__ void MulBsx(const int nthreads, const Dtype* A, const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, Dtype* B) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index % cols; int r = (index / cols) % rows; if (trans == CblasNoTrans) { B[index] = A[index] * v[c]; } else { B[index] = A[index] * v[r]; } } } #endif template <typename Dtype> void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* norm_data; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow caffe_gpu_set<Dtype>(norm_.count(), Dtype(eps_), norm_data); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { caffe_gpu_powx<Dtype>(dim, bottom_data, Dtype(2), buffer_data); if (across_spatial_) { Dtype normsqr; caffe_gpu_asum<Dtype>(dim, buffer_data, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_data, top_data); } else { // compute norm caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(1), norm_data); caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(0.5), norm_data); // scale the layer // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, top_data); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the output if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, top_data, scale, channels, spatial_dim, CblasTrans, top_data); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_data += dim; } #endif //USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->template program<Dtype>(); if (across_spatial_) { // need to index it norm_data = norm_.mutable_cpu_data(); } else { norm_data = norm_.mutable_gpu_data(); // add eps to avoid overflow greentea_gpu_set<Dtype>(this->device_->id(), norm_.count(), static_cast<Dtype>(eps_), (cl_mem)norm_data, 0); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; int spatial_dim = bottom[0]->height() * bottom[0]->width(); int channels = bottom[0]->channels(); for (int n = 0; n < num; ++n) { greentea_gpu_powx<Dtype>(this->device_->id(), dim, (cl_mem)bottom_data, n*dim, Dtype(2), (cl_mem)buffer_data, 0); if (across_spatial_) { Dtype normsqr; greentea_gpu_asum<Dtype>(this->device_->id(), dim, (cl_mem)buffer_data, 0, &normsqr); // add eps to avoid overflow norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); greentea_gpu_scale<Dtype>(this->device_->id(), dim, Dtype(1.0 / norm_data[n]), (cl_mem)bottom_data, n*dim, (cl_mem)top_data, n*dim); } else { // compute norm greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, channels, spatial_dim, 1., (cl_mem)buffer_data, 0, (cl_mem)sum_channel_multiplier, 0, 1., (cl_mem)norm_data, n*spatial_dim); greentea_gpu_powx<Dtype>(this->device_->id(), spatial_dim, (cl_mem)norm_data, n*spatial_dim, 0.5, (cl_mem)norm_data, n*spatial_dim); // scale the layer //TODO // NOLINT_NEXT_LINE(whitespace/operators) // DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( // dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, // top_data); viennacl::ocl::kernel &oclk_divbsx = program.get_kernel( CL_KERNEL_SELECT("DivBsx")); viennacl::ocl::enqueue( oclk_divbsx(dim, WrapHandle((cl_mem) bottom_data, &ctx), n*dim, WrapHandle((cl_mem)norm_data, &ctx), n*spatial_dim, channels, spatial_dim, WrapHandle((cl_mem) top_data, &ctx), n*dim), ctx.get_queue()); } // scale the output if (channel_shared_) { greentea_gpu_scal<Dtype>(this->device_->id(), dim, scale[0], (cl_mem)top_data, n*dim); } else { // NOLINT_NEXT_LINE(whitespace/operators) // MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( // dim, top_data, scale, channels, spatial_dim, CblasTrans, // top_data); viennacl::ocl::kernel &oclk_mulbsx = program.get_kernel( CL_KERNEL_SELECT("MulBsx")); viennacl::ocl::enqueue( oclk_mulbsx(dim, WrapHandle((cl_mem) top_data, &ctx), n*dim, WrapHandle((cl_mem) scale, &ctx), channels, spatial_dim, 1, WrapHandle((cl_mem) top_data, &ctx), n*dim), ctx.get_queue()); } } #endif //USE_GREENTEA } } template <typename Dtype> void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { #ifdef USE_CUDA const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* norm_data; if (across_spatial_) { // need to index it norm_data = norm_.cpu_data(); } else { norm_data = norm_.gpu_data(); } const Dtype* scale; if (channel_shared_) { scale = this->blobs_[0]->cpu_data(); } else { scale = this->blobs_[0]->gpu_data(); } Dtype* buffer_data = buffer_.mutable_gpu_data(); Dtype* buffer_channel = buffer_channel_.mutable_gpu_data(); Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data(); const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data(); int count = top[0]->count(); int num = top[0]->num(); int dim = count / num; int spatial_dim = top[0]->height() * top[0]->width(); int channels = top[0]->channels(); // Propagate to param if (this->param_propagate_down_[0]) { if (channel_shared_) { Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff(); Dtype a; caffe_gpu_dot<Dtype>(count, top_data, top_diff, &a); scale_diff[0] += a / scale[0]; } else { Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); for (int n = 0; n < num; ++n) { // compute a caffe_gpu_mul<Dtype>(dim, top_data+n*dim, top_diff+n*dim, buffer_data); caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_spatial_multiplier, Dtype(0), buffer_channel); // store a / scale[i] in buffer_data temporary caffe_gpu_div<Dtype>(channels, buffer_channel, scale, buffer_channel); caffe_gpu_add<Dtype>(channels, buffer_channel, scale_diff, scale_diff); } } } // Propagate to bottom if (propagate_down[0]) { for (int n = 0; n < num; ++n) { if (across_spatial_) { Dtype a; caffe_gpu_dot<Dtype>(dim, bottom_data, top_diff, &a); caffe_gpu_scale<Dtype>(dim, a / norm_data[n] / norm_data[n], bottom_data, bottom_diff); caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_diff, bottom_diff); } else { // dot product between bottom_data and top_diff caffe_gpu_mul<Dtype>(dim, bottom_data, top_diff, buffer_data); caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1), buffer_data, sum_channel_multiplier, Dtype(0), buffer_spatial); // scale botom_diff // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_data, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; // divide by square of norm caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(2), buffer_spatial); // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_diff, buffer_spatial, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff); // divide by norm // NOLINT_NEXT_LINE(whitespace/operators) DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; norm_data += spatial_dim; } // scale the diff if (channel_shared_) { caffe_gpu_scal<Dtype>(dim, scale[0], bottom_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>( dim, bottom_diff, scale, channels, spatial_dim, CblasTrans, bottom_diff); CUDA_POST_KERNEL_CHECK; } bottom_data += dim; top_diff += dim; bottom_diff += dim; } } #else this->Backward_cpu(top, propagate_down, bottom); #endif //USE_CUDA } INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer); } // namespace caffe
c1b297d4c30a2fc87159e697a9418a7667367754.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef G_KERNEL #define G_KERNEL #ifndef GPU_KERNEL #define GPU_KERNEL #endif #include "ros.cu" #include "rog.cu" #include <stdio.h> #include "parameters.h" #include "../regular.h" #include "data_type.h" #include "../lib/common.h" //#define SHARED_SIZE 32768 __global__ void merged(GO *object1, GO *object2, int reduce_idx) { object1->mergeg(object2, reduce_function_table[reduce_idx]); } template <class T1, class T2> __device__ void merge(T1 *dstobject, T2 *srcobject, reduce_fp reduce_ptr) { for(int index = 0; index<srcobject->num_buckets; index++) { if((srcobject->buckets)[index]!=0) { int key_size = srcobject->get_key_size(index); int value_size = srcobject->get_value_size(index); void *key = srcobject->get_key_address(index); void *value = srcobject->get_value_address(index); dstobject->insert(key, key_size, value, value_size, reduce_ptr); } } } __global__ void compute_gpu( void *input, Offset *data_offset, //used to split the input data int *device_offset, //offset within each device int offset_number, unsigned long long start, GO *object_g, void *parameter, int map_num, int reduce_num ) { __shared__ unsigned int num_groupss; const unsigned int tid = threadIdx.x; //const unsigned int global_id = blockDim.x * blockIdx.x + tid; __shared__ map_fp map_ptrs; __shared__ reduce_fp reduce_ptrs; if(tid == 0) { num_groupss = get_num_groups(); map_ptrs = map_function_table[map_num]; reduce_ptrs = reduce_function_table[reduce_num]; } __syncthreads(); const int num_groups = num_groupss;//get_num_groups();//floor((double)SHARED_SIZE/sizeof(SO)); const int group_id = get_group_id(num_groups, GPU_THREADS, tid); const int gid = get_gid(num_groups, GPU_THREADS, tid); const int group_size = get_group_size(num_groups, GPU_THREADS, group_id); map_fp map_ptr = map_ptrs; reduce_fp reduce_ptr = reduce_ptrs; //if(global_id == 255) //{ // printf("num of groups: %d, group_size: %d, group_id: %d, gid: %d\n", num_groups, group_size, group_id, gid); // printf("size of double: %d\n", sizeof(double)); //} __shared__ int task_index; //task index within each SM __shared__ int has_taskl; #ifdef USE_SHARED __shared__ char object_s[SHARED_SIZE]; ((SO *)object_s + group_id)->oma_init(gid, group_size); //((SO *)object_s)->oma_init(); //object_s.oma_init(); __shared__ int do_merge; __shared__ int finished; #endif object_g->oma_init(); __syncthreads(); while(1) { __syncthreads(); if(tid == 0) { task_index = atomicAdd(device_offset, R_GPU_BLOCK_SIZE); if(task_index >= offset_number) { has_taskl = 0; } else has_taskl = 1; } __syncthreads(); if(has_taskl == 0) { break; } #ifdef USE_SHARED if(tid == 0) { finished = 0; do_merge = 0; } __syncthreads(); bool flag = true; int i = tid; while(finished != GPU_THREADS) { __syncthreads(); for(; i < R_GPU_BLOCK_SIZE && (i + task_index)<offset_number; i += GPU_THREADS) { if(do_merge) break; //bool success = FFGPU::map(((SO *)object_s + group_id), input, data_offset[i].offset, parameter, 1); bool success = map_ptr(((SO *)object_s + group_id), input, data_offset[i].offset, parameter, 1, reduce_ptr); //bool success = FFGPU::map(((SO *)object_s), input, data_offset[i].offset, parameter, 1); if(!success) { do_merge = 1; break; } } if(flag && ((i + task_index) >= offset_number || i >= R_GPU_BLOCK_SIZE)) { flag = false; atomicAdd(&finished, 1); } __syncthreads(); object_g->merge((SO *)object_s, reduce_ptr); //if(gid == 0) //{ // merge(object_g, (SO *)object_s + group_id); //} __syncthreads(); //((SO *)object_s + group_id)->oma_init(gid, group_size); //((SO *)object_s)->oma_init(); if(tid == 0) do_merge = 0; } #else for(int i = tid; (i + task_index) < offset_number; i += GPU_THREADS) { FFGPU::map(object_g, input, (char *)data_offset + unit_size * (start + i + task_index), parameter, 1); } #endif } } #endif
c1b297d4c30a2fc87159e697a9418a7667367754.cu
#ifndef G_KERNEL #define G_KERNEL #ifndef GPU_KERNEL #define GPU_KERNEL #endif #include "ros.cu" #include "rog.cu" #include <stdio.h> #include "parameters.h" #include "../regular.h" #include "data_type.h" #include "../lib/common.h" //#define SHARED_SIZE 32768 __global__ void merged(GO *object1, GO *object2, int reduce_idx) { object1->mergeg(object2, reduce_function_table[reduce_idx]); } template <class T1, class T2> __device__ void merge(T1 *dstobject, T2 *srcobject, reduce_fp reduce_ptr) { for(int index = 0; index<srcobject->num_buckets; index++) { if((srcobject->buckets)[index]!=0) { int key_size = srcobject->get_key_size(index); int value_size = srcobject->get_value_size(index); void *key = srcobject->get_key_address(index); void *value = srcobject->get_value_address(index); dstobject->insert(key, key_size, value, value_size, reduce_ptr); } } } __global__ void compute_gpu( void *input, Offset *data_offset, //used to split the input data int *device_offset, //offset within each device int offset_number, unsigned long long start, GO *object_g, void *parameter, int map_num, int reduce_num ) { __shared__ unsigned int num_groupss; const unsigned int tid = threadIdx.x; //const unsigned int global_id = blockDim.x * blockIdx.x + tid; __shared__ map_fp map_ptrs; __shared__ reduce_fp reduce_ptrs; if(tid == 0) { num_groupss = get_num_groups(); map_ptrs = map_function_table[map_num]; reduce_ptrs = reduce_function_table[reduce_num]; } __syncthreads(); const int num_groups = num_groupss;//get_num_groups();//floor((double)SHARED_SIZE/sizeof(SO)); const int group_id = get_group_id(num_groups, GPU_THREADS, tid); const int gid = get_gid(num_groups, GPU_THREADS, tid); const int group_size = get_group_size(num_groups, GPU_THREADS, group_id); map_fp map_ptr = map_ptrs; reduce_fp reduce_ptr = reduce_ptrs; //if(global_id == 255) //{ // printf("num of groups: %d, group_size: %d, group_id: %d, gid: %d\n", num_groups, group_size, group_id, gid); // printf("size of double: %d\n", sizeof(double)); //} __shared__ int task_index; //task index within each SM __shared__ int has_taskl; #ifdef USE_SHARED __shared__ char object_s[SHARED_SIZE]; ((SO *)object_s + group_id)->oma_init(gid, group_size); //((SO *)object_s)->oma_init(); //object_s.oma_init(); __shared__ int do_merge; __shared__ int finished; #endif object_g->oma_init(); __syncthreads(); while(1) { __syncthreads(); if(tid == 0) { task_index = atomicAdd(device_offset, R_GPU_BLOCK_SIZE); if(task_index >= offset_number) { has_taskl = 0; } else has_taskl = 1; } __syncthreads(); if(has_taskl == 0) { break; } #ifdef USE_SHARED if(tid == 0) { finished = 0; do_merge = 0; } __syncthreads(); bool flag = true; int i = tid; while(finished != GPU_THREADS) { __syncthreads(); for(; i < R_GPU_BLOCK_SIZE && (i + task_index)<offset_number; i += GPU_THREADS) { if(do_merge) break; //bool success = FFGPU::map(((SO *)object_s + group_id), input, data_offset[i].offset, parameter, 1); bool success = map_ptr(((SO *)object_s + group_id), input, data_offset[i].offset, parameter, 1, reduce_ptr); //bool success = FFGPU::map(((SO *)object_s), input, data_offset[i].offset, parameter, 1); if(!success) { do_merge = 1; break; } } if(flag && ((i + task_index) >= offset_number || i >= R_GPU_BLOCK_SIZE)) { flag = false; atomicAdd(&finished, 1); } __syncthreads(); object_g->merge((SO *)object_s, reduce_ptr); //if(gid == 0) //{ // merge(object_g, (SO *)object_s + group_id); //} __syncthreads(); //((SO *)object_s + group_id)->oma_init(gid, group_size); //((SO *)object_s)->oma_init(); if(tid == 0) do_merge = 0; } #else for(int i = tid; (i + task_index) < offset_number; i += GPU_THREADS) { FFGPU::map(object_g, input, (char *)data_offset + unit_size * (start + i + task_index), parameter, 1); } #endif } } #endif
f3d81f451335fb875aa483325c7e602998514176.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // @Author Song Liang, Northeastern University ,CN // // The code and binary file are attributed MIT license. See the detail and latest update in // https://github.com/SongLiang0710/RetinaCheck // // This copy of program is developed in Windows 7, Mathematica 11.0, CUDA Toolkit 7.5 // // Hessian features including curvature, confidence and deviation from horizontality. #include <math.h> #include "RcMathVisionParallel.h" #include "matrix3by3.cuh" // Jacobian iterative method --obtaining eigenvectors of symmetric 3 by 3 matrix //#define EPSILON 0.000000000001 (the early-stopping strategy is prone to make errors and provide little improvement in speed) __device__ void jacobianEigenvector (double3 *eVector, matrix3by3 sHessian) { matrix3by3 mEigen, mInner; double tmp, phi, c, s; int i, j, row = 0, col = 0, iter = 0; mEigen = make_identity3by3 (); while (iter++ < 100) { tmp = 0; for (i = 0; i <= 1; i++) { for (j = i + 1; j <= 2; j++) { if (fabs(getEntry_matrix3by3Index(sHessian, 3 * i + j)) > tmp) { row = i; col = j; tmp = fabs(getEntry_matrix3by3Index(sHessian, 3 * i + j)); } } } if (tmp == 0) break; phi = -atan2(2 * getEntry_matrix3by3Index(sHessian, 3 * row + col), getEntry_matrix3by3Index(sHessian, 3 * col + col) - getEntry_matrix3by3Index(sHessian, 3 * row + row)) / 2; c = cos(phi); s = sin(phi); mInner = make_identity3by3 (); setEntry_matrix3by3Index(mInner, 3 * row + row, c); setEntry_matrix3by3Index(mInner, 3 * row + col, s); setEntry_matrix3by3Index(mInner, 3 * col + row, -s); setEntry_matrix3by3Index(mInner, 3 * col + col, c); sHessian = matrixMultiply(mInner, sHessian); setEntry_matrix3by3Index(mInner, 3 * row + col, -s); setEntry_matrix3by3Index(mInner, 3 * col + row, s); sHessian = matrixMultiply(sHessian, mInner); mEigen = matrixMultiply(mEigen, mInner); } // Arrange the eigenvector corresponding to the lowest eigenvalue at first tmp = 9999; for (i = 0; i < 3; i++) { if (fabs(getEntry_matrix3by3Index(sHessian, 3 * i + i)) < tmp) { col = i; tmp = fabs(getEntry_matrix3by3Index(sHessian, 3 * i + i)); } } for (i = 0; i < 3; i++) { // tmp = rsqrt(pow(getEntry_matrix3by3Index(mEigen, col), 2) + pow(getEntry_matrix3by3Index(mEigen, col + 3), 2) + pow(getEntry_matrix3by3Index(mEigen, col + 6), 2)); eVector[i] = make_double3(getEntry_matrix3by3Index(mEigen, col), getEntry_matrix3by3Index(mEigen, col + 3), getEntry_matrix3by3Index(mEigen, col + 6)); col = (col + 1) % 3; } } __global__ void cuHessianFeatures( double3 *d_Dst, matrix3by3 *d_Src, int width, int height, int stack, double mu ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idz = blockIdx.z * blockDim.z + threadIdx.z; const int id = idx + idy * width + idz * width * height; if (idx < width && idy < height) { double3 eigenVectors[3]; matrix3by3 muMatrix = make_diagonal3by3(1, mu, mu); matrix3by3 Hessian = matrixMultiply(matrixMultiply(muMatrix, d_Src[id]), muMatrix); jacobianEigenvector ( eigenVectors, matrixMultiply(transpose_matrix3by3(Hessian), Hessian)); // Curvature d_Dst[id].x = eigenVectors[0].x * copysign(1., eigenVectors[0].y) * rhypot(eigenVectors[0].y,eigenVectors[0].z); // Confidence d_Dst[id].y = -vectorMultiply(vectorMatrixMultiply(eigenVectors[1], Hessian), eigenVectors[1])-vectorMultiply(vectorMatrixMultiply(eigenVectors[2], Hessian), eigenVectors[2]); // Deviation from horizontality d_Dst[id].z = atan(eigenVectors[0].z / eigenVectors[0].y); } } extern "C" void OS2DHessianFeatures( double3 *d_Dst, matrix3by3 *d_Src, int width, int height, int stack, double mu ) { dim3 blocks( iDivUp(width, DEFAULT_BLOCKDIM_X), iDivUp(height, DEFAULT_BLOCKDIM_Y), iDivUp(stack, DEFAULT_BLOCKDIM_Z)); dim3 threads( DEFAULT_BLOCKDIM_X, DEFAULT_BLOCKDIM_Y, DEFAULT_BLOCKDIM_Z); hipLaunchKernelGGL(( cuHessianFeatures), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, width, height, stack, mu); } // A deprecated test function __global__ void cuJacobianEigenvector( double3 *d_Dst, matrix3by3 *d_Src ) { jacobianEigenvector (d_Dst, *d_Src); } extern "C" void jEigenvector( matrix3by3 *d_Dst, matrix3by3 *d_Src ) { dim3 blocks(1, 1); dim3 threads(1, 1); hipLaunchKernelGGL(( cuJacobianEigenvector), dim3(blocks), dim3(threads), 0, 0, (double3 *)d_Dst, d_Src); }
f3d81f451335fb875aa483325c7e602998514176.cu
// // @Author Song Liang, Northeastern University ,CN // // The code and binary file are attributed MIT license. See the detail and latest update in // https://github.com/SongLiang0710/RetinaCheck // // This copy of program is developed in Windows 7, Mathematica 11.0, CUDA Toolkit 7.5 // // Hessian features including curvature, confidence and deviation from horizontality. #include <math.h> #include "RcMathVisionParallel.h" #include "matrix3by3.cuh" // Jacobian iterative method --obtaining eigenvectors of symmetric 3 by 3 matrix //#define EPSILON 0.000000000001 (the early-stopping strategy is prone to make errors and provide little improvement in speed) __device__ void jacobianEigenvector (double3 *eVector, matrix3by3 sHessian) { matrix3by3 mEigen, mInner; double tmp, phi, c, s; int i, j, row = 0, col = 0, iter = 0; mEigen = make_identity3by3 (); while (iter++ < 100) { tmp = 0; for (i = 0; i <= 1; i++) { for (j = i + 1; j <= 2; j++) { if (fabs(getEntry_matrix3by3Index(sHessian, 3 * i + j)) > tmp) { row = i; col = j; tmp = fabs(getEntry_matrix3by3Index(sHessian, 3 * i + j)); } } } if (tmp == 0) break; phi = -atan2(2 * getEntry_matrix3by3Index(sHessian, 3 * row + col), getEntry_matrix3by3Index(sHessian, 3 * col + col) - getEntry_matrix3by3Index(sHessian, 3 * row + row)) / 2; c = cos(phi); s = sin(phi); mInner = make_identity3by3 (); setEntry_matrix3by3Index(mInner, 3 * row + row, c); setEntry_matrix3by3Index(mInner, 3 * row + col, s); setEntry_matrix3by3Index(mInner, 3 * col + row, -s); setEntry_matrix3by3Index(mInner, 3 * col + col, c); sHessian = matrixMultiply(mInner, sHessian); setEntry_matrix3by3Index(mInner, 3 * row + col, -s); setEntry_matrix3by3Index(mInner, 3 * col + row, s); sHessian = matrixMultiply(sHessian, mInner); mEigen = matrixMultiply(mEigen, mInner); } // Arrange the eigenvector corresponding to the lowest eigenvalue at first tmp = 9999; for (i = 0; i < 3; i++) { if (fabs(getEntry_matrix3by3Index(sHessian, 3 * i + i)) < tmp) { col = i; tmp = fabs(getEntry_matrix3by3Index(sHessian, 3 * i + i)); } } for (i = 0; i < 3; i++) { // tmp = rsqrt(pow(getEntry_matrix3by3Index(mEigen, col), 2) + pow(getEntry_matrix3by3Index(mEigen, col + 3), 2) + pow(getEntry_matrix3by3Index(mEigen, col + 6), 2)); eVector[i] = make_double3(getEntry_matrix3by3Index(mEigen, col), getEntry_matrix3by3Index(mEigen, col + 3), getEntry_matrix3by3Index(mEigen, col + 6)); col = (col + 1) % 3; } } __global__ void cuHessianFeatures( double3 *d_Dst, matrix3by3 *d_Src, int width, int height, int stack, double mu ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idz = blockIdx.z * blockDim.z + threadIdx.z; const int id = idx + idy * width + idz * width * height; if (idx < width && idy < height) { double3 eigenVectors[3]; matrix3by3 muMatrix = make_diagonal3by3(1, mu, mu); matrix3by3 Hessian = matrixMultiply(matrixMultiply(muMatrix, d_Src[id]), muMatrix); jacobianEigenvector ( eigenVectors, matrixMultiply(transpose_matrix3by3(Hessian), Hessian)); // Curvature d_Dst[id].x = eigenVectors[0].x * copysign(1., eigenVectors[0].y) * rhypot(eigenVectors[0].y,eigenVectors[0].z); // Confidence d_Dst[id].y = -vectorMultiply(vectorMatrixMultiply(eigenVectors[1], Hessian), eigenVectors[1])-vectorMultiply(vectorMatrixMultiply(eigenVectors[2], Hessian), eigenVectors[2]); // Deviation from horizontality d_Dst[id].z = atan(eigenVectors[0].z / eigenVectors[0].y); } } extern "C" void OS2DHessianFeatures( double3 *d_Dst, matrix3by3 *d_Src, int width, int height, int stack, double mu ) { dim3 blocks( iDivUp(width, DEFAULT_BLOCKDIM_X), iDivUp(height, DEFAULT_BLOCKDIM_Y), iDivUp(stack, DEFAULT_BLOCKDIM_Z)); dim3 threads( DEFAULT_BLOCKDIM_X, DEFAULT_BLOCKDIM_Y, DEFAULT_BLOCKDIM_Z); cuHessianFeatures<<<blocks, threads>>>( d_Dst, d_Src, width, height, stack, mu); } // A deprecated test function __global__ void cuJacobianEigenvector( double3 *d_Dst, matrix3by3 *d_Src ) { jacobianEigenvector (d_Dst, *d_Src); } extern "C" void jEigenvector( matrix3by3 *d_Dst, matrix3by3 *d_Src ) { dim3 blocks(1, 1); dim3 threads(1, 1); cuJacobianEigenvector<<<blocks, threads>>>( (double3 *)d_Dst, d_Src); }
a5bf05cb4c430e86727116050223d9dc7564a932.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_norm_forward_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *mean = NULL; hipMalloc(&mean, XSIZE*YSIZE); float *variance = NULL; hipMalloc(&variance, XSIZE*YSIZE); int b = 2; int c = 2; int wxh = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _norm_forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,variance,b,c,wxh); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _norm_forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,variance,b,c,wxh); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _norm_forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,variance,b,c,wxh); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a5bf05cb4c430e86727116050223d9dc7564a932.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_norm_forward_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *mean = NULL; cudaMalloc(&mean, XSIZE*YSIZE); float *variance = NULL; cudaMalloc(&variance, XSIZE*YSIZE); int b = 2; int c = 2; int wxh = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _norm_forward_kernel<<<gridBlock,threadBlock>>>(x,mean,variance,b,c,wxh); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _norm_forward_kernel<<<gridBlock,threadBlock>>>(x,mean,variance,b,c,wxh); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _norm_forward_kernel<<<gridBlock,threadBlock>>>(x,mean,variance,b,c,wxh); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4ddad40e08fea47d670f74e58c565b995943b428.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // Modified by Wei Liu // ------------------------------------------------------------------ #include <vector> #include "caffe/layers/smooth_L1_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Ftype, typename Btype> __global__ void SmoothL1Forward(const int n, const Ftype* in, Ftype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Ftype val = in[index]; Ftype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Ftype, typename Btype> void SmoothL1LossLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { int count = bottom[0]->count(); caffe_gpu_sub<Ftype>( count, bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(), diff_.template mutable_gpu_data<Ftype>()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data<Ftype>(), diff_.template gpu_data<Ftype>(), diff_.template mutable_gpu_data<Ftype>()); // d := w * (b0 - b1) } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SmoothL1Forward<Ftype, Btype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.template gpu_data<Ftype>(), errors_.template mutable_gpu_data<Ftype>()); CUDA_POST_KERNEL_CHECK; Ftype loss; caffe_gpu_asum(count, errors_.template gpu_data<Ftype>(), &loss); top[0]->mutable_cpu_data<Ftype>()[0] = loss / bottom[0]->num(); } template <typename Ftype, typename Btype> __global__ void SmoothL1Backward(const int n, const Btype* in, Btype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Btype val = in[index]; Btype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Btype(0) < val) - (val < Btype(0)); } } } template <typename Ftype, typename Btype> void SmoothL1LossLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { int count = diff_.count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SmoothL1Backward<Ftype, Btype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.template gpu_data<Btype>(), diff_.template mutable_gpu_data<Btype>()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Btype sign = (i == 0) ? 1 : -1; const Btype alpha = sign * top[0]->cpu_diff<Btype>()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.template gpu_data<Btype>(), // x Btype(0), // beta bottom[i]->mutable_gpu_diff<Btype>()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS_FB(SmoothL1LossLayer); } // namespace caffe
4ddad40e08fea47d670f74e58c565b995943b428.cu
// ------------------------------------------------------------------ // Fast R-CNN // copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // Modified by Wei Liu // ------------------------------------------------------------------ #include <vector> #include "caffe/layers/smooth_L1_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Ftype, typename Btype> __global__ void SmoothL1Forward(const int n, const Ftype* in, Ftype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Ftype val = in[index]; Ftype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Ftype, typename Btype> void SmoothL1LossLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { int count = bottom[0]->count(); caffe_gpu_sub<Ftype>( count, bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(), diff_.template mutable_gpu_data<Ftype>()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data<Ftype>(), diff_.template gpu_data<Ftype>(), diff_.template mutable_gpu_data<Ftype>()); // d := w * (b0 - b1) } // NOLINT_NEXT_LINE(whitespace/operators) SmoothL1Forward<Ftype, Btype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.template gpu_data<Ftype>(), errors_.template mutable_gpu_data<Ftype>()); CUDA_POST_KERNEL_CHECK; Ftype loss; caffe_gpu_asum(count, errors_.template gpu_data<Ftype>(), &loss); top[0]->mutable_cpu_data<Ftype>()[0] = loss / bottom[0]->num(); } template <typename Ftype, typename Btype> __global__ void SmoothL1Backward(const int n, const Btype* in, Btype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Btype val = in[index]; Btype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Btype(0) < val) - (val < Btype(0)); } } } template <typename Ftype, typename Btype> void SmoothL1LossLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { int count = diff_.count(); // NOLINT_NEXT_LINE(whitespace/operators) SmoothL1Backward<Ftype, Btype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.template gpu_data<Btype>(), diff_.template mutable_gpu_data<Btype>()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Btype sign = (i == 0) ? 1 : -1; const Btype alpha = sign * top[0]->cpu_diff<Btype>()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.template gpu_data<Btype>(), // x Btype(0), // beta bottom[i]->mutable_gpu_diff<Btype>()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS_FB(SmoothL1LossLayer); } // namespace caffe
22f65ecdd9ee9c1ae48608298b49cd26ca5469fc.hip
// !!! This is a file automatically generated by hipify!!! #include <config.h> #include <CUDA_interface.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <float.h> #include <iostream> #include <fstream> #include <sstream> #include <cmath> #include <boost/format.hpp> #include <device/utils.cuh> #include <device/base_functions.cuh> #include <device/matrix_utils.cuh> #include "quadratures.h" #if __CUDA_ARCH__ >= 200 #define THREADS_PER_BLOCK 16 #else #define THREADS_PER_BLOCK 16 #endif __constant__ int QPC; __constant__ FEM_PRECISION Qp[8]; __constant__ FEM_PRECISION Qw[8]; __constant__ int QPC_e; __constant__ FEM_PRECISION Qp_e[8]; __constant__ FEM_PRECISION Qw_e[8]; __device__ FEM_PRECISION *d_B[2]; __device__ FEM_PRECISION *d_RHS; __device__ FEM_PRECISION *d_assembled; __device__ FEM_PRECISION *d_not_assembled; __device__ FEM_PRECISION *d_Nvals[2]; __device__ FEM_PRECISION *d_dNvals[2]; __device__ FEM_PRECISION *d_Nvals_err[2]; __device__ FEM_PRECISION *d_dNvals_err[2]; __device__ FEM_PRECISION *d_abssicas; __device__ FEM_PRECISION *d_fun_vals; __device__ FEM_PRECISION *d_der_vals; #define gpuAssert(ans) { gpuAssertCheck((ans), __FILE__, __LINE__); } inline void gpuAssertCheck(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { const char* msg = hipGetErrorString(code); fprintf(stderr,"GPUassert: %s %s %d\n", msg, file, line); if (abort) exit(code); } } void check_error(const char* str, hipError_t err_code) { if (err_code != ::hipSuccess) std::cerr << str << " -- " << hipGetErrorString(err_code) << "\n"; } /** * Exact solution * @param x * @return */ template<class T> inline T __device__ __host__ _u(T x) { return sin(15 * x) * cos(24 * x) * x; // return x * x; } // <editor-fold defaultstate="collapsed" desc="equation parameters"> template<class T> __device__ __host__ T _a(T x) { return sin(x); // return 1; } template<class T> __device__ __host__ T _b(T x) { return x; // return 0; } template<class T> __device__ __host__ T _c(T x) { return -x; // return 0; } template <class T> inline T __device__ __host__ _du(T x) { return (-sin(9 * x) + sin(39 * x) - 9 * x * cos(9 * x) + 39 * x * cos(39 * x)) / 2.0; // return 2 * x; } template <class T> __device__ __host__ inline T _adu(T x) { return _a(x) * _du(x); } template <class T> __device__ __host__ inline T _dadu(T x) { return (cos(x)*(-sin(9 * x) + sin(39 * x) - 9 * x * cos(9 * x) + 39 * x * cos(39 * x)) + 3 * sin(x)*(27 * x * sin(9 * x) - 507 * x * sin(39 * x) - 6 * cos(9 * x) + 26 * cos(39 * x))) / 2.0; // return 2; } template<class T> __device__ __host__ T _beta() { return 0; } template<class T> __device__ __host__ T _gamma() { return _adu<T > (1) + _beta<T > () * _u<T > (1); } template<class T> __device__ __host__ inline T _f(T x) { // return cos(M_PI * x) * cos(M_PI * y); return -(_dadu(x)) + _b(x) * _du(x) + _c(x) * _u(x); // return -2; } // </editor-fold> /** * Returns index that can be used for retrieving value of previously calculated * values of base functions (d_Nval) and their derivatives (d_dNval). * @param point_i - index of quadrature point [0, QUADRATURE_POINTS_CNT-1] * @param fun_i - index of a function [0, fun_cnt-1] * @param interval_i - number of interval * @param fun_cnt - total number of functions * @return index that can be used for d_Nvals and d_dNvals */ template<int qpc> __device__ inline int point_mem_idx(int point_i, int fun_i, int interval_i, int fun_cnt) { return (interval_i * qpc + point_i) * fun_cnt + fun_i; } template<int degree, class T> __device__ inline T get_N(int point_i, int fun_i, int fun_part, int _n) { return d_Nvals[degree & 1][point_mem_idx<degree + 1>(point_i, fun_i, fun_part, _n)]; } template<int degree, class T> __device__ inline T get_dN(int point_i, int fun_i, int interval_i, int _n) { return d_dNvals[degree & 1][point_mem_idx<degree + 1>(point_i, fun_i, interval_i, _n)]; } template<int degree, class T> __device__ inline T get_N_err(int point_i, int fun_i, int fun_part, int _n) { return d_Nvals_err[degree & 1][point_mem_idx<degree + 2>(point_i, fun_i, fun_part, _n)]; } template<int degree, class T> __device__ inline T get_dN_err(int point_i, int fun_i, int interval_i, int _n) { return d_dNvals_err[degree & 1][point_mem_idx<degree + 2>(point_i, fun_i, interval_i, _n)]; } template<int degree, class T> __device__ inline T fun_L(T x, int point_idx, int fun, int interval, int _n) { // return get_N<degree, T>(point_idx, fun, interval - fun, _n); return _f<T>(x) * get_N<degree, T>(point_idx, fun, interval - fun, _n); } template<int degree, class T> __device__ inline T fun_B(T x, int point_idx, int i, int j, int element_id, int _n) { return _a<T > (x) * get_dN<degree, T > (point_idx, i, element_id - i + degree, _n) * get_dN<degree, T > (point_idx, j, element_id - j + degree, _n) + _b<T > (x) * get_dN<degree, T > (point_idx, i, element_id - i + degree, _n) * get_N<degree, T > (point_idx, j, element_id - j + degree, _n) + _c<T > (x) * get_N<degree, T > (point_idx, i, element_id - i + degree, _n) * get_N<degree, T > (point_idx, j, element_id - j + degree, _n); } template<int degree, class T> __device__ T eval_L(int fun, int interval, int _n) { T sum(0); T aa, bb; T a = d_knot_vector[interval], b = d_knot_vector[interval + 1]; aa = (b - a) / 2.0; bb = (b + a) / 2.0; for (int i = 0; i < QPC; ++i) { sum += Qw[i] * fun_L<degree>(aa * Qp[i] + bb, i, fun, interval, _n); } return aa*sum; } /** * * @param i * @param j * @param element_idx * @param _n - number of functions * @return */ template<int degree, class T> __device__ inline T eval_B(int i, int j, int knot_i, int _n) { T sum(0); T aa, bb; T a = d_knot_vector[knot_i]; T b = d_knot_vector[knot_i + 1]; aa = (b - a) / 2.0; bb = (b + a) / 2.0; for (int idx = 0; idx < QPC; ++idx) { T x = aa * Qp[idx] + bb; sum += Qw[idx] * fun_B<degree > (x, idx, i, j, knot_i - degree, _n); } return aa*sum; } /** * Temporarily implemented only for degree 1, and _n being power of 2. * @param _c - front cell number (0,1,...) * @param _n - total number of fronts in one part * @param _p - number of part being evaluated (0,1,...) */ template<int degree, class T> __global__ void init_B(int _c, int _n, int _p) { // find number of front being initiated int n = blockDim.x * blockIdx.x + threadIdx.x; // is n greater than total number of fronts in one part? if (n >= _n) return; int element_id = n * (degree + 1) + _p; int y = _c / (degree + 1) + element_id; int x = _c % (degree + 1) + element_id; int idx = _n * (_p * (degree + 1)*(degree + 1) + _c) + n; if (y == 0) { if (x == 0) { d_B[0][0] = 1; } else { d_B[0][idx] = 0; } } else { d_B[0][idx] = eval_B<degree, T> (x, y, element_id + degree, _n * (degree + 1) + degree); // TODO uuu this is ugly } if (idx == _n * (degree + 1)*(degree + 1)*(degree + 1) - 1) d_B[0][idx] += _beta<T> (); } /** * Initializes RHS vector * @param Ndof number of basis functions */ template<int degree, class T> __global__ void init_RHS(int Ndof, int Nrhs) { int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; int n = blockDim.y * blockIdx.y + threadIdx.y; if (n >= Ndof || rhs_num >= Nrhs) return; T x = 0; for (int part = 0; part <= degree; ++part) x += eval_L<degree, T>(n, n + part, Ndof); if (n==0) x = 0; if (n == Ndof - 1) x += _gamma<T > (); d_RHS[(n*Nrhs) + rhs_num] = x; } /** * * @param _n - number of functions */ template<int degree, int qpc, class T> __global__ void init_basis_functions_and_derivatives(int _n, T **N, T **dN) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= _n) return; #pragma unroll for (int i = 0; i < (degree + 1) * qpc; ++i) { N[0][i * _n + n] = 0; N[1][i * _n + n] = 0; dN[0][i * _n + n] = 0; dN[1][i * _n + n] = 0; } #pragma unroll for (int i = 0; i < qpc; ++i) { N[0][i * _n + n] = 1; } } template<class T> __device__ inline T interval(int a, int b) { return d_knot_vector[b] - d_knot_vector[a]; } template<class T> __device__ inline T interval(int a) { return interval<T > (a, a + 1); } template<int qpc, class T> __global__ void update_base(int _n, int idx, T **N, T **dN, T *Q_points) { int n = blockDim.x * blockIdx.x + threadIdx.x; // function number if (n >= _n) return; T h1 = interval<T > (n, n + idx); T h2 = interval<T > (n + 1, n + idx + 1); if (is_zero(h1)) { for (int i = 0; i < qpc; ++i) { // function N[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = 0; //derivative dN[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = 0; } } else { for (int i = 0; i < qpc; ++i) { T x = (Q_points[i] / 2.0 + 0.5) * interval<T > (n); // function N[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = x * N[(~idx)&1][point_mem_idx<qpc>(i, n, 0, _n)] / h1; //derivative dN[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = (x * dN[(~idx)&1][point_mem_idx<qpc>(i, n, 0, _n)] + N[(~idx)&1][point_mem_idx<qpc>(i, n, 0, _n)]) / h1; } } for (int j = 1; j < idx; ++j) { for (int i = 0; i < qpc; ++i) { T sum_fun, sum_der; T x = (Q_points[i] / 2.0 + 0.5) * interval<T > (n + j) + d_knot_vector[n + j]; if (is_zero(h1)) { sum_fun = 0; sum_der = 0; } else { sum_fun = (x - d_knot_vector[n]) * N[(~idx)&1][point_mem_idx<qpc>(i, n, j, _n)] / h1; sum_der = ((x - d_knot_vector[n]) * dN[(~idx)&1][point_mem_idx<qpc>(i, n, j, _n)] + N[(~idx)&1][point_mem_idx<qpc>(i, n, j, _n)]) / h1; } if (is_zero(h2)) { sum_fun += 0; sum_der += 0; } else { sum_fun += (d_knot_vector[n + idx + 1] - x) * N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, j - 1, _n)] / h2; sum_der += ((d_knot_vector[n + idx + 1] - x) * dN[(~idx)&1][point_mem_idx<qpc>(i, n + 1, j - 1, _n)] - N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, j - 1, _n)]) / h2; } N[idx & 1][point_mem_idx<qpc>(i, n, j, _n)] = sum_fun; dN[idx & 1][point_mem_idx<qpc>(i, n, j, _n)] = sum_der; } } if (is_zero(h2)) { for (int i = 0; i < qpc; ++i) { N[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = 0; dN[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = 0; } } else { for (int i = 0; i < qpc; ++i) { T x = (Q_points[i] / 2.0 + 0.5) * interval<T > (n + idx) + d_knot_vector[n + idx]; N[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = (d_knot_vector[n + idx + 1] - x) * N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, idx - 1, _n)] / h2; dN[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = ((d_knot_vector[n + idx + 1] - x) * dN[(~idx)&1][point_mem_idx<qpc>(i, n + 1, idx - 1, _n)] - N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, idx - 1, _n)]) / h2; } } } template<int degree, class T> __device__ inline void store_new_matrix(int next_merges_cnt, T BB[][2 * degree + 1][2 * degree + 1]) { int divisor = min(blockDim.x / 2, next_merges_cnt); int gm_idx = blockIdx.x * blockDim.x / 2; if (threadIdx.x < divisor) { for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) d_B[1][(x * (2 * degree) + y) * next_merges_cnt + gm_idx + threadIdx.x] = BB[2 * threadIdx.x][x + 1][y + 1]; } else { int tid = threadIdx.x - divisor; int group2_offset = next_merges_cnt * (2 * degree) * (2 * degree); for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) d_B[1][group2_offset + (x * (2 * degree) + y) * next_merges_cnt + gm_idx + tid] = BB[2 * tid + 1][x + 1][y + 1]; } } template<int degree, class T> __device__ inline void store_new_matrix2(int _n, T BB[][3 * degree][3 * degree], int step) { int divisor = min(blockDim.x / 2, _n); int gm_idx = blockIdx.x * blockDim.x / 2; if (threadIdx.x < divisor) { for (int x = 0; x < 2 * degree; ++x) { for (int y = 0; y < 2 * degree; ++y) { d_B[(~step)&1][(x * (2 * degree) + y) * _n + gm_idx + threadIdx.x] = BB[2 * threadIdx.x][x + degree][y + degree]; } } } else { int tid = threadIdx.x - divisor; int group2_offset = _n * (2 * degree) * (2 * degree); for (int x = 0; x < 2 * degree; ++x) { for (int y = 0; y < 2 * degree; ++y) { d_B[(~step)&1][group2_offset + (x * (2 * degree) + y) * _n + gm_idx + tid] = BB[2 * tid + 1][x + degree][y + degree]; } } } } /** * @param merges_cnt - number of merges i.e. total number of elements / (degree + 1) */ template<int degree, class T, int TPB> __global__ void first_merge(int merges_cnt) { __shared__ T BB[TPB][2 * degree + 1][2 * degree + 1]; T assembled[2 * degree + 1]; T X; const int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= merges_cnt) return; // initializing shared memory for (int x = 0; x < 2 * degree + 1; ++x) for (int y = 0; y < 2 * degree + 1; ++y) BB[threadIdx.x][x][y] = T(0); // load data to shared memory for (int i = 0; i < degree + 1; ++i) for (int x = 0; x < degree + 1; ++x) for (int y = 0; y < degree + 1; ++y) BB[threadIdx.x][x + i][y + i] += d_B[0][((i * (degree + 1) + x) * (degree + 1) + y) * merges_cnt + n]; // pivoting pivot_rows_cyclic<degree>(BB); pivot_columns_cyclic<degree>(BB); // calculate first row X = BB[threadIdx.x][0][0]; for (int i = 1; i < 2 * degree + 1; ++i) assembled[i] = BB[threadIdx.x][0][i] / X; assembled[0] = X; // store first row which is already factorized for (int i = 0; i < 2 * degree + 1; ++i) d_assembled[i * merges_cnt + n] = assembled[i]; // store first column needed for forward substitution for (int i = 1; i <= 2 * degree; ++i) d_not_assembled[(i-1) * merges_cnt + n] = BB[threadIdx.x][i][0]; // elimination for (int i = 1; i < 2 * degree + 1; ++i) { T lead = BB[threadIdx.x][i][0]; for (int j = 1; j <= 2 * degree; ++j) BB[threadIdx.x][i][j] -= assembled[j] * lead; } __syncthreads(); store_new_matrix<degree, T > (merges_cnt / 2, BB); } /** * Merges fronts of size (2*degree)x(2*degree) * @param merges_cnt number of merging processes in this step * @param offset offset for factorized rows */ template<int degree, class T, int TPB> __global__ void merge(int merges_cnt, int assembled_offset, int not_assembled_offset, int step) { __shared__ T BB[TPB][3 * degree][3 * degree]; T assembled[3 * degree]; const int n = blockDim.x * blockIdx.x + threadIdx.x; const int row_len = 3 * degree; if (n >= merges_cnt) return; // initializing shared memory for (int x = 0; x < 3 * degree; ++x) for (int y = 0; y < 3 * degree; ++y) BB[threadIdx.x][x][y] = T(0); // load data to shared memory for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) { BB[threadIdx.x][x][y] += d_B[step&1][(x * 2 * degree + y) * merges_cnt + n]; BB[threadIdx.x][x + degree][y + degree] += d_B[step&1][(2 * degree)* (2 * degree) * merges_cnt + (x * 2 * degree + y) * merges_cnt + n]; } // pivoting for (int i = 0; i < degree; ++i) for (int x = 0; x < row_len; ++x) { T tmp = BB[threadIdx.x][i][x]; BB[threadIdx.x][i][x] = BB[threadIdx.x][i + degree][x]; BB[threadIdx.x][i + degree][x] = tmp; } for (int i = 0; i < degree; ++i) for (int x = 0; x < row_len; ++x) { T tmp = BB[threadIdx.x][x][i]; BB[threadIdx.x][x][i] = BB[threadIdx.x][x][i + degree]; BB[threadIdx.x][x][i + degree] = tmp; } // elimination (we eliminate |degree| rows) for (int i = 0; i < degree; ++i) { T X = BB[threadIdx.x][i][i]; for (int j = i + 1; j < row_len; ++j) assembled[j] = BB[threadIdx.x][i][j] / X; for (int j = 0; j <= i; ++j) assembled[j] = BB[threadIdx.x][i][j]; // store i-th row in global memory for (int j = 0; j < row_len; ++j) d_assembled[assembled_offset + (i * row_len + j) * merges_cnt + n] = assembled[j]; // store i-th column in global memory for (int j = 0; j < 2 * degree; ++j) d_not_assembled[not_assembled_offset + (i * (2 * degree) + j) * merges_cnt + n] = BB[threadIdx.x][j + degree][i]; // eliminate i-th row for (int x = i + 1; x < 3 * degree; ++x) { T lead = BB[threadIdx.x][x][i]; for (int y = i + 1; y < 3 * degree; ++y) { BB[threadIdx.x][x][y] -= assembled[y] * lead; } } } // store new matrices in global memory __syncthreads(); store_new_matrix2<degree, T > (merges_cnt / 2, BB, step); } template<int degree, class T> __global__ __launch_bounds__(1) void last_merge(int offset, int step) { __shared__ T BB[3 * degree][3 * degree]; for (int x = 0; x < 3 * degree; ++x) for (int y = 0; y < 3 * degree; ++y) BB[x][y] = 0; // load data from global memory for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) BB[x][y] = d_B[step&1][x * 2 * degree + y]; for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) BB[x + degree][y + degree] += d_B[step&1][(2 * degree) * (2 * degree) + x * (2 * degree) + y]; for (int i = 0; i < 3 * degree; ++i) { T lead = BB[i][i]; // BB[i][i] = 1; it is implicitly considered to be 1 later for (int x = i + 1; x < 3 * degree; ++x) BB[i][x] /= lead; for (int y = i + 1; y < 3 * degree; ++y) { lead = BB[y][i]; // BB[y][i] = 0; it is implicitly considered to be 0 later for (int x = i + 1; x < 3 * degree; ++x) BB[y][x] -= BB[i][x] * lead; } } // row first order for (int y=0 ; y<3*degree ; ++y) for (int x=0 ; x<3*degree ; ++x) { d_assembled[offset + (y * (3 * degree) + x)] = BB[y][x]; } } template<int degree, class T, int tpb> __global__ void first_forward_substitution(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> RHS(d_RHS + rhs_num, rhs_cnt); __shared__ T s_div[tpb]; T &div = s_div[threadIdx.y]; if (threadIdx.x == 0) div = d_assembled[merge_num]; __syncthreads(); int row_num = (degree + 1) * (merge_num + 1) - 1; RHS[row_num] /= div; } template<int degree, class T, int tpb> __global__ void first_forward_substitution_update_left(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; __shared__ T s_B[tpb][degree]; T *B = s_B[threadIdx.y]; if (threadIdx.x == 0) { #pragma unroll for (int i=0 ; i<degree ; ++i) B[i] = d_not_assembled[(i * merges_cnt) + merge_num]; } __syncthreads(); vertical_vec<T> RHS(d_RHS + rhs_num, rhs_cnt); int row_num = (degree + 1) * (merge_num + 1) - 1; T x = RHS[row_num]; #pragma unroll for (int i=0 ; i<degree ; ++i) RHS[row_num - degree + i] -= B[i] * x; } template<int degree, class T, int tpb> __global__ void first_forward_substitution_update_right(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; __shared__ T s_B[tpb][degree]; T *B = s_B[threadIdx.y]; if (threadIdx.x == 0) { #pragma unroll for (int i=degree ; i< 2 * degree ; ++i) B[i-degree] = d_not_assembled[(i * merges_cnt) + merge_num]; } __syncthreads(); vertical_vec<T> RHS(d_RHS + rhs_num, rhs_cnt); int row_num = (degree + 1) * (merge_num + 1) - 1; T x = RHS[row_num]; #pragma unroll for (int i=0 ; i<degree ; ++i) RHS[row_num + i + 1] -= B[i] * x; } template<int degree, class T, int tpb> __global__ void forward_substitution(int merges_cnt, int rhs_cnt, int offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[degree]; int stride = (1 << step) * (degree + 1); int row_len = 3 * degree; int g_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][degree]; T (*B)[degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int i = 0; i < degree; ++i) for (int j = 0; j <= i; ++j) B[i][j] = d_assembled[offset + (i * row_len + j) * merges_cnt + merge_num]; __syncthreads(); for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[g_idx + i]; for (int i = 0; i < degree; ++i) { RHS[i] /= B[i][i]; for (int j = i + 1; j < degree; ++j) RHS[j] -= RHS[i] * B[j][i]; } for (int i = 0; i < degree; ++i) global_RHS[g_idx + i] = RHS[i]; } template<int degree, class T, int tpb> __global__ void forward_substitution_update_left(int merges_cnt, int rhs_cnt, int not_assembled_offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[2 * degree]; int stride = (1 << step) * (degree + 1); int middle_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][degree]; T (*B)[degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int col = 0; col < degree; ++col) for (int j = 0; j < degree; ++j) B[j][col] = d_not_assembled[not_assembled_offset + (col * (2 * degree) + j) * merges_cnt + merge_num]; __syncthreads(); int small_stride = stride >> 1; for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[middle_idx + i]; for (int i = 0; i < degree; ++i) RHS[i + degree] = global_RHS[middle_idx - small_stride + i]; for (int i = 0; i < degree; ++i) for (int j = 0; j < degree; ++j) RHS[j + degree] -= RHS[i] * B[j][i]; for (int i = 0; i < degree; ++i) global_RHS[middle_idx - small_stride + i] = RHS[i + degree]; } template<int degree, class T, int tpb> __global__ void forward_substitution_update_right(int merges_cnt, int rhs_cnt, int not_assembled_offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[2 * degree]; int stride = (1 << step) * (degree + 1); int middle_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][degree]; T (*B)[degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int col = 0; col < degree; ++col) for (int j = degree; j < 2 * degree; ++j) B[j - degree][col] = d_not_assembled[not_assembled_offset + (col * (2 * degree) + j) * merges_cnt + merge_num]; __syncthreads(); int small_stride = stride >> 1; for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[middle_idx + i]; for (int i = 0; i < degree; ++i) RHS[i + degree] = global_RHS[middle_idx + small_stride + i]; for (int i = 0; i < degree; ++i) for (int j = 0; j < degree; ++j) RHS[j + degree] -= RHS[i] * B[j][i]; for (int i = 0; i < degree; ++i) global_RHS[middle_idx + small_stride + i] = RHS[i + degree]; } template<int degree, class T> __global__ void last_forward_first_backward_substitution(int ne, int rhs_cnt, int offset) { int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[3 * degree]; __shared__ T BB[3 * degree][3 * degree]; if (threadIdx.x == 0) // row first order for (int y = 0; y < 3 * degree; ++y) for (int x = 0; x < 3 * degree; ++x) BB[y][x] = d_assembled[offset + (y * (3 * degree) + x)]; __syncthreads(); for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[i]; for (int i = degree; i < 2 * degree; ++i) RHS[i] = global_RHS[ne / 2 - degree + i]; for (int i = 2 * degree; i < 3 * degree; ++i) RHS[i] = global_RHS[ne - 2 * degree + i]; for (int i = 0; i < 3 * degree; ++i) { RHS[i] /= BB[i][i]; for (int j = i + 1; j < 3 * degree; ++j) RHS[j] -= RHS[i] * BB[j][i]; } // we skip last row as it is already solved for (int i = 3 * degree - 2; i >= 0 ; --i) for (int j = i + 1 ; j < 3 * degree ; ++j) RHS[i] -= BB[i][j] * RHS[j]; for (int i = 0; i < degree; ++i) global_RHS[i] = RHS[i]; for (int i = degree; i < 2 * degree; ++i) global_RHS[ne / 2 - degree + i] = RHS[i]; for (int i = 2 * degree; i < 3 * degree; ++i) global_RHS[ne - 2 * degree + i] = RHS[i]; } template<int degree, class T, int tpb> __global__ void backward_substitution(int merges_cnt, int rhs_cnt, int offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[3 * degree]; int stride = (1 << step) * (degree + 1); int row_len = 3 * degree; int g_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][3 * degree]; T (*B)[3 * degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int i = 0; i < degree; ++i) for (int j = i+1; j < 3 * degree; ++j) B[i][j] = d_assembled[offset + (i * row_len + j) * merges_cnt + merge_num]; __syncthreads(); stride >>= 1; for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[g_idx + i]; for (int i = 0; i < degree; ++i) RHS[i + degree] = global_RHS[g_idx - stride + i]; for (int i = 0; i < degree; ++i) RHS[i + 2 * degree] = global_RHS[g_idx + stride + i]; for (int i = degree - 1; i >= 0; --i) for (int j = i + 1; j < 3 * degree; ++j) RHS[i] -= RHS[j] * B[i][j]; for (int i = 0; i < degree; ++i) global_RHS[g_idx + i] = RHS[i]; } /** * @param _n */ template<int degree, class T, int tpb> __global__ void last_backward_substitution(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; __shared__ T s_B[tpb][2 * degree]; T *B = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int i = 1; i <= 2 * degree; ++i) B[i - 1] = d_assembled[i * merges_cnt + merge_num]; __syncthreads(); vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); unsigned row_num = (degree + 1) * (merge_num + 1) - 1; T RHS = global_RHS[row_num]; for (int i = -degree; i < 0; ++i) RHS -= B[i + degree] * global_RHS[row_num + i]; for (int i = 1; i <= degree; ++i) RHS -= B[i + degree - 1] * global_RHS[row_num + i]; global_RHS[row_num] = RHS; } template<int degree, class T> __global__ void evaluate(int _n, T* x, int length) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= length) return; T sum = 0; T XX = x[n]; for (int i = 0; i < _n + degree; ++i) { sum += d_RHS[i] * N<degree, T > (XX, i); } x[n] = sum; } template<int degree, class T> __device__ T evaluate(T x, int elements_cnt) { T sum = 0; for (int i = 0; i < elements_cnt + degree; ++i) { sum += d_RHS[i] * N<degree > (x, i); } return sum; } template<int degree, class T, int qpc> __global__ void calculate_norm(int elm_cnt, T *sum) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= elm_cnt) return; T tmp = 0; for (int i = 0; i < qpc; ++i) { tmp += Qw_e[i] * pow(_u(d_abssicas[i * elm_cnt + n]) - d_fun_vals[i * elm_cnt + n], 2); } for (int i = 0; i < qpc; ++i) { tmp += Qw_e[i] * pow((_du(d_abssicas[i * elm_cnt + n]) - d_der_vals[i * elm_cnt + n]), 2); } sum[n] = interval<T>(n + degree) * tmp / 2.0; } template<int degree, class T> __global__ void calculate_function_in_quadrature_points(int elements_cnt, int Nrhs) { int n = blockDim.x * blockIdx.x + threadIdx.x; const int rhs_num = 0; if (n >= elements_cnt) return; for (int i = 0; i < QPC_e; ++i) for (int j = 0; j <= degree; ++j) d_fun_vals[i * elements_cnt + n] += d_RHS[(n + j)*Nrhs + rhs_num] * get_N_err<degree, T > (i, n + j, degree - j, basis_funs_cnt<degree > (elements_cnt)); } template<int degree, class T> __global__ void calculate_derivative_in_quadrature_points(int elements_cnt, int Nrhs) { const int rhs_num = 0; int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= elements_cnt) return; for (int i = 0; i < QPC_e; ++i) for (int j = 0; j <= degree; ++j) d_der_vals[i * elements_cnt + n] += d_RHS[(n + j)*Nrhs + rhs_num] * get_dN_err<degree, T > (i, n + j, degree - j, basis_funs_cnt<degree > (elements_cnt)); } template<int degree, class T> __global__ void calculate_abscissas(int elements_cnt) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= elements_cnt) return; T a = d_knot_vector[n + degree], b = d_knot_vector[n + degree + 1]; T aa = (b - a) / 2.0; T bb = (b + a) / 2.0; for (int i = 0; i < QPC_e; ++i) { d_abssicas[i * elements_cnt + n] = aa * Qp_e[i] + bb; } } template<class T> T t_gen(int i, int N, int degree) { if (i <= degree) return T(0); if (i < N + degree) return T(i - degree) / N; return T(1); } template<class T> struct vector_scattered { T *p; int skip; vector_scattered (T *p, int skip) : p(p), skip(skip) { } T & operator[](int idx) { return *(p + idx * skip); } }; template<class T, int degree> void debug_device(int n); template<class T, int degree> void print_local(int n); template<int degree, int qpc, class T> void calculate_basis(int n, T **N, T **dN, T *Q) { int tpb = 256; int block_count = ((n + degree) / tpb) + ((n + degree) % tpb ? 1 : 0); hipLaunchKernelGGL(( init_basis_functions_and_derivatives<degree, qpc>), dim3(block_count), dim3(tpb) , 0, 0, basis_funs_cnt<degree>(n), N, dN); check_error("init_base_functions_and_derivatives", hipGetLastError()); for (int i = 1; i <= degree; ++i) { hipLaunchKernelGGL(( update_base<qpc>), dim3(block_count), dim3(tpb), 0, 0, n + degree, i, N, dN, Q); check_error("update_base", hipGetLastError()); } } template<class T, int degree> void calculate_basis_solver(int n) { void *tmp; T **N, **dN, *Q; gpuAssert(hipGetSymbolAddress(&tmp, d_Nvals)); N = reinterpret_cast<T**>(tmp); gpuAssert(hipGetSymbolAddress(&tmp, d_dNvals)); dN = reinterpret_cast<T**>(tmp); gpuAssert(hipGetSymbolAddress(&tmp, Qp)); Q = reinterpret_cast<T*>(tmp); calculate_basis<degree, degree + 1>(n, N, dN, Q); } template<class T, int degree> void calculate_basis_error(int n) { void *tmp; T **N, **dN, *Q; gpuAssert(hipGetSymbolAddress(&tmp, d_Nvals_err)); N = reinterpret_cast<T**>(tmp); gpuAssert(hipGetSymbolAddress(&tmp, d_dNvals_err)); dN = reinterpret_cast<T**>(tmp); gpuAssert(hipGetSymbolAddress(&tmp, Qp_e)); Q = reinterpret_cast<T*>(tmp); // error_QPC = degree + 2 calculate_basis<degree, degree + 2>(n, N, dN, Q); } /** * Allocates necessary memory on device * @param Ne - number of elements in [0, 1] interval * @param Nrhs - number of right hand sides */ template<class T, int degree> void prepare_device(int Ne, int Nrhs) { T *tmp, *t; T * dev_ptrs[2]; const int solver_QPC = degree + 1; const int error_QPC = degree + 2; // initialize quadratures for solver { T *p = get_gauss_legendre_points<T, solver_QPC>(); T *w = get_gauss_legendre_weights<T, solver_QPC>(); gpuAssert(hipMemcpyToSymbol(QPC, &solver_QPC, sizeof (solver_QPC))); gpuAssert(hipMemcpyToSymbol(Qp, p, sizeof (T) * solver_QPC)); gpuAssert(hipMemcpyToSymbol(Qw, w, sizeof (T) * solver_QPC)); } // initialize quadratures for error calculation { T *p = get_gauss_legendre_points<T, error_QPC>(); T *w = get_gauss_legendre_weights<T, error_QPC>(); gpuAssert(hipMemcpyToSymbol(QPC_e, &error_QPC, sizeof (error_QPC))); gpuAssert(hipMemcpyToSymbol(Qp_e, p, sizeof (T) * error_QPC)); gpuAssert(hipMemcpyToSymbol(Qw_e, w, sizeof (T) * error_QPC)); } t = new T[Ne + 2 * degree + 1]; for (int i = 0; i <= Ne + 2 * degree; ++i) t[i] = t_gen<T > (i, Ne, degree); int size; int mem_size=0, total_mem_size=0; // Allocate knot vector gpuAssert(hipMalloc(&tmp, sizeof (T) * knots_cnt<degree > (Ne))); gpuAssert(hipMemcpy(tmp, t, sizeof (T) * knots_cnt<degree > (Ne), hipMemcpyHostToDevice)); gpuAssert(hipMemcpyToSymbol(d_knot_vector, &tmp, sizeof (tmp))); delete[] t; // Allocate fronts (B part) size = ::max(Ne * (degree + 1) * (degree + 1), (Ne / (degree + 1)) * (2 * degree) * (2 * degree)); gpuAssert(hipMalloc(&dev_ptrs[0], sizeof (T) * size)); gpuAssert(hipMalloc(&dev_ptrs[1], sizeof (T) * size)); gpuAssert(hipMemcpyToSymbol(d_B, &dev_ptrs, sizeof(dev_ptrs))); // allocate RHS gpuAssert(hipMalloc(&tmp, Nrhs * sizeof (T) * basis_funs_cnt<degree>(Ne))); gpuAssert(hipMemcpyToSymbol(d_RHS, &tmp, sizeof (tmp))); // allocate d_assembled gpuAssert(hipMalloc(&tmp, sizeof (T) * basis_funs_cnt<degree>(Ne) * 3 * degree)); gpuAssert(hipMemcpyToSymbol(d_assembled, &tmp, sizeof (tmp))); // allocate d_not_assembled { int M = Ne / (degree + 1); gpuAssert(hipMalloc(&tmp, sizeof (T) * 2 * M * (degree * degree + degree))); gpuAssert(hipMemcpyToSymbol(d_not_assembled, &tmp, sizeof (tmp))); } // Allocate matrices for accumulative base function evaluation // functions gpuAssert(hipMalloc(&dev_ptrs[0], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(hipMalloc(&dev_ptrs[1], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(hipMemcpyToSymbol(d_Nvals, dev_ptrs, sizeof (dev_ptrs))); // derivatives gpuAssert(hipMalloc(&dev_ptrs[0], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(hipMalloc(&dev_ptrs[1], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(hipMemcpyToSymbol(d_dNvals, dev_ptrs, sizeof (dev_ptrs))); // FOR ERROR CALCULATION // functions mem_size = sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * error_QPC; total_mem_size += mem_size; gpuAssert(hipMalloc(&dev_ptrs[0], mem_size)); total_mem_size += mem_size; gpuAssert(hipMalloc(&dev_ptrs[1], mem_size)); gpuAssert(hipMemcpyToSymbol(d_Nvals_err, dev_ptrs, sizeof (dev_ptrs))); // derivatives mem_size = sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * error_QPC; total_mem_size += mem_size; gpuAssert(hipMalloc(&dev_ptrs[0], mem_size)); total_mem_size += mem_size; gpuAssert(hipMalloc(&dev_ptrs[1], mem_size)); gpuAssert(hipMemcpyToSymbol(d_dNvals_err, dev_ptrs, sizeof (dev_ptrs))); // Allocate space for result gpuAssert(hipMalloc(&tmp, sizeof (T) * error_QPC * Ne)); gpuAssert(hipMemset(tmp, 0, sizeof (T) * error_QPC * Ne)); gpuAssert(hipMemcpyToSymbol(d_fun_vals, &tmp, sizeof (tmp))); gpuAssert(hipMalloc(&tmp, sizeof (T) * error_QPC * Ne)); gpuAssert(hipMemset(tmp, 0, sizeof (T) * error_QPC * Ne)); gpuAssert(hipMemcpyToSymbol(d_der_vals, &tmp, sizeof (tmp))); gpuAssert(hipMalloc(&tmp, sizeof (T) * error_QPC * Ne)); gpuAssert(hipMemcpyToSymbol(d_abssicas, &tmp, sizeof (tmp))); hipDeviceSynchronize(); } /** * * @param n number of elements */ template<int degree, class T> void prepare_result(int n, int Nrhs) { int tpb = 256; int block_cnt = n / tpb + (n % tpb ? 1 : 0); calculate_abscissas<degree, T> << <block_cnt, tpb >> >(n); check_error("calculate_abscissas", hipGetLastError()); calculate_function_in_quadrature_points<degree, T> << <block_cnt, tpb >> >(n, Nrhs); check_error("calculate_function_in_quadrature_points", hipGetLastError()); calculate_derivative_in_quadrature_points<degree, T> << <block_cnt, tpb >> >(n, Nrhs); check_error("calculate_derivative_in_quadrature_points", hipGetLastError()); } void cleanup_device() { void *tmp; void *dev_ptrs[2]; gpuAssert(hipMemcpyFromSymbol(&dev_ptrs, d_B, sizeof (dev_ptrs))); gpuAssert(hipFree(dev_ptrs[0])); gpuAssert(hipFree(dev_ptrs[1])); gpuAssert(hipMemcpyFromSymbol(&tmp, d_knot_vector, sizeof (tmp))); gpuAssert(hipFree(tmp)); gpuAssert(hipMemcpyFromSymbol(&tmp, d_RHS, sizeof (tmp))); gpuAssert(hipFree(tmp)); gpuAssert(hipMemcpyFromSymbol(&tmp, d_assembled, sizeof (tmp))); gpuAssert(hipFree(tmp)); gpuAssert(hipMemcpyFromSymbol(&tmp, d_not_assembled, sizeof (tmp))); gpuAssert(hipFree(tmp)); // Free matrices for accumulative base function evaluation // functions gpuAssert(hipMemcpyFromSymbol(dev_ptrs, d_Nvals, sizeof (dev_ptrs))); gpuAssert(hipFree(dev_ptrs[0])); gpuAssert(hipFree(dev_ptrs[1])); // derivatives gpuAssert(hipMemcpyFromSymbol(dev_ptrs, d_dNvals, sizeof (dev_ptrs))); gpuAssert(hipFree(dev_ptrs[0])); gpuAssert(hipFree(dev_ptrs[1])); // Free result memory gpuAssert(hipMemcpyFromSymbol(&tmp, d_fun_vals, sizeof (tmp))); gpuAssert(hipFree(tmp)); gpuAssert(hipMemcpyFromSymbol(&tmp, d_der_vals, sizeof (tmp))); gpuAssert(hipFree(tmp)); gpuAssert(hipMemcpyFromSymbol(&tmp, d_abssicas, sizeof (tmp))); gpuAssert(hipFree(tmp)); // FOR ERROR CALCULATION // functions gpuAssert(hipMemcpyFromSymbol(dev_ptrs, d_Nvals_err, sizeof (dev_ptrs))); gpuAssert(hipFree(dev_ptrs[0])); gpuAssert(hipFree(dev_ptrs[1])); // derivatives gpuAssert(hipMemcpyFromSymbol(dev_ptrs, d_dNvals_err, sizeof (dev_ptrs))); gpuAssert(hipFree(dev_ptrs[0])); gpuAssert(hipFree(dev_ptrs[1])); } template<class T, int degree> void init_fronts(int Ne, int Nrhs) { calculate_basis_solver<T, degree>(Ne); check_error("calculate_basis_solver", hipGetLastError()); calculate_basis_error<T, degree>(Ne); check_error("calculate_basis_error", hipGetLastError()); int N = Ne / (degree + 1); int threads_per_block = 32; int block_count = div_ceil(N, threads_per_block); for (int part = 0; part <= degree; ++part) { for (int i = 0; i < (degree + 1)*(degree + 1); ++i) { hipLaunchKernelGGL(( init_B<degree, T>), dim3(block_count), dim3(threads_per_block), 0, 0, i, N, part); check_error("B", hipGetLastError()); } } const int Ndof = basis_funs_cnt<degree>(Ne); int threads_per_block_per_rhs = 8; int rhs_per_block = 16; int blocks_per_rhs = div_ceil(Ndof, threads_per_block_per_rhs); int rhs_blocks = div_ceil(Nrhs, rhs_per_block); dim3 b_grid(rhs_blocks, blocks_per_rhs); dim3 t_grid(rhs_per_block, threads_per_block_per_rhs); hipLaunchKernelGGL(( init_RHS<degree, T>), dim3(b_grid), dim3(t_grid), 0, 0, Ndof, Nrhs); check_error("init_RHS", hipGetLastError()); hipDeviceSynchronize(); } /** * Calculates offset in global d_assembled for rows being factorized in * provided step * @param n - number of elements * @param step - current step * @return offset */ template<int degree> int assembled_offset_for_step(int Ne, int step) { if (step <= 0) return 0; const int rows_per_merge = degree; int merges_cnt = Ne / (degree + 1); int offset = 0; offset += merges_cnt * (2 * degree + 1); while (--step) { merges_cnt >>= 1; // div 2 offset += (3 * degree) * merges_cnt * rows_per_merge; } return offset; } /** * Calculates offset in global d_not_assembled for columns for not assembled * rows * @param n - number of elements * @param step - current step * @return offset */ template<int degree> int not_assembled_offset_for_step(int Ne, int step) { if (step <= 0) return 0; int merges_cnt = Ne / (degree + 1); int offset = 0; offset += merges_cnt * (2 * degree); while (--step) { merges_cnt >>= 1; // div 2 offset += (2 * degree * degree) * merges_cnt; } return offset; } template<class T, int degree> void launch_matrix_factorization(int ne) { const int tpb = degree >= 4 ? THREADS_PER_BLOCK / 2 : THREADS_PER_BLOCK; int block_grid; int merges_cnt; merges_cnt = merges_cnt_for_step<degree>(ne, 0); block_grid = prepare_block_grid(merges_cnt, tpb); hipLaunchKernelGGL(( first_merge<degree, T, tpb>) , dim3(block_grid), dim3(tpb), 0, 0, merges_cnt); check_error("first merge", hipGetLastError()); // print_local<T, degree>(ne); int max_step = steps_cnt<degree>(ne); for (int step = 1; step < max_step ; ++step) { merges_cnt = merges_cnt_for_step<degree>(ne, step); block_grid = prepare_block_grid(merges_cnt, tpb); hipLaunchKernelGGL(( merge<degree, T, tpb>), dim3(block_grid), dim3(tpb), 0, 0, merges_cnt, assembled_offset_for_step<degree>(ne, step), not_assembled_offset_for_step<degree>(ne, step), step); check_error("merge", hipGetLastError()); } hipLaunchKernelGGL(( last_merge<degree, T>), dim3(1), dim3(1), 0, 0, assembled_offset_for_step<degree>(ne, max_step), max_step); check_error("last merge", hipGetLastError()); } template<class T, int degree> void launch_forward_substitution(int ne, int Nrhs) { const int tpb = degree >= 4 ? 8 : 16; const int RHSes_per_block = 16; int merges_cnt; merges_cnt = merges_cnt_for_step<degree>(ne, 0); dim3 t_grid(RHSes_per_block, tpb); dim3 b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); // debug_device<T, degree>(ne); hipLaunchKernelGGL(( first_forward_substitution<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs); check_error("first_forward_substitution", hipGetLastError()); // debug_device<T, degree>(ne); hipLaunchKernelGGL(( first_forward_substitution_update_left<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs); check_error("first_forward_substitution_update_left", hipGetLastError()); // debug_device<T, degree>(ne); hipLaunchKernelGGL(( first_forward_substitution_update_right<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs); check_error("first_forward_substitution_update_right", hipGetLastError()); // debug_device<T, degree>(ne); int max_step = steps_cnt<degree>(ne); for (int step = 1; step < max_step ; ++step) { merges_cnt = merges_cnt_for_step<degree>(ne, step); b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); hipLaunchKernelGGL(( forward_substitution<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs, assembled_offset_for_step<degree>(ne, step), step); check_error("forward_substitution", hipGetLastError()); // debug_device<T, degree>(ne); hipLaunchKernelGGL(( forward_substitution_update_left<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs, not_assembled_offset_for_step<degree>(ne, step), step); check_error("forward_substitution_update_left", hipGetLastError()); // debug_device<T, degree>(ne); hipLaunchKernelGGL(( forward_substitution_update_right<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs, not_assembled_offset_for_step<degree>(ne, step), step); check_error("forward_substitution_update_right", hipGetLastError()); // debug_device<T, degree>(ne); } t_grid = dim3(RHSes_per_block, 1); b_grid = calculate_blocks(dim3(Nrhs, 1), t_grid); hipLaunchKernelGGL(( last_forward_first_backward_substitution<degree, T>), dim3(b_grid), dim3(t_grid), 0, 0, ne, Nrhs, assembled_offset_for_step<degree>(ne, max_step)); check_error("last_forward_first_backward_substitution", hipGetLastError()); } template<class T, int degree> void launch_backward_substitution(int ne, int Nrhs) { const int tpb = degree >= 4 ? 8 : 16; const int RHSes_per_block = 16; int merges_cnt; dim3 t_grid(RHSes_per_block, tpb); dim3 b_grid; int max_step = steps_cnt<degree>(ne); for (int step = max_step - 1; step > 0 ; --step) { merges_cnt = merges_cnt_for_step<degree>(ne, step); b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); hipLaunchKernelGGL(( backward_substitution<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs, assembled_offset_for_step<degree>(ne, step), step); check_error("backward_substitution", hipGetLastError()); } merges_cnt = merges_cnt_for_step<degree>(ne, 0); b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); hipLaunchKernelGGL(( last_backward_substitution<degree, T, RHSes_per_block>), dim3(b_grid), dim3(t_grid), 0, 0, merges_cnt, Nrhs); check_error("last_backward_substitution", hipGetLastError()); } template<class T, int degree> void factorize_matrix(int Ne) { // debug_device<T, degree>(ne); launch_matrix_factorization<T, degree>(Ne); hipDeviceSynchronize(); } template<class T, int degree> void solve_equation(int Ne, int Nrhs) { launch_forward_substitution<T, degree>(Ne, Nrhs); // debug_device<T, degree>(ne); launch_backward_substitution<T, degree>(Ne, Nrhs); // debug_device<T, degree>(ne); prepare_result<degree, T>(Ne, Nrhs); hipDeviceSynchronize(); } template<class T, int degree> void print_result(int n, std::ostream &ostr) { T *x, *y, *tmp; const int QPC = degree + 2; x = new T[n * QPC]; y = new T[n * QPC]; gpuAssert(hipMemcpyFromSymbol(&tmp, d_abssicas, sizeof (tmp))); gpuAssert(hipMemcpy(x, tmp, sizeof (T) * QPC * n, hipMemcpyDeviceToHost)); gpuAssert(hipMemcpyFromSymbol(&tmp, d_fun_vals, sizeof (tmp))); gpuAssert(hipMemcpy(y, tmp, sizeof (T) * QPC * n, hipMemcpyDeviceToHost)); for (int i = 0; i < n * QPC; ++i) ostr << x[i] << ' ' << y[i] << '\n'; delete []x; delete []y; // debug_device<T, degree>(n); } template<class T, int degree> T calculate_error(int N) { T *tmp; T *result = new T[N]; gpuAssert(hipMalloc(&tmp, sizeof (T) * N)); int tpb = 256; int block_cnt = N / tpb + (N % tpb ? 1 : 0); hipLaunchKernelGGL(( calculate_norm<degree, T, degree + 2>), dim3(block_cnt), dim3(tpb), 0, 0, N, tmp); check_error("calculate_norm", hipGetLastError()); gpuAssert(hipMemcpy(result, tmp, sizeof (T) * N, hipMemcpyDeviceToHost)); T sum = 0; for (int i = 0; i < N; ++i) { sum += result[i]; } delete []result; gpuAssert(hipFree(tmp)); return sqrt(sum); } template<class T, int degree> void print_local(int n) { T *bb; void *tmp[2]; int merges_cnt = merges_cnt_for_step<degree > (n, 1); int size = merges_cnt * (2 * degree + 1) * (2 * degree + 1); bb = new T[size]; gpuAssert(hipMemcpyFromSymbol(tmp, d_B, sizeof (tmp))); gpuAssert(hipMemcpy(bb, tmp[0], sizeof (T) * size, hipMemcpyDeviceToHost)); for (int i = 0; i < merges_cnt; ++i) { for (int x = 0; x < 2 * degree + 1; ++x) { for (int y = 0; y < 2 * degree + 1; ++y) { std::cout << bb[(x * 2 * degree + y) * merges_cnt + i] << " "; } std::cout << "\n"; } std::cout << "\n"; } delete[] bb; // bb = new T[basis_funs_cnt<degree>(n)]; // gpuAssert(hipMemcpyFromSymbol(&tmp, d_RHS, sizeof (tmp))); // gpuAssert(hipMemcpy(bb, tmp, sizeof (T) * basis_funs_cnt<degree>(n), hipMemcpyDeviceToHost)); // // // for (int i = 0 ; i < basis_funs_cnt<degree>(n) ; ++i) // std::cout << bb[i] << "\n"; // // std::cout << "------------------------\n------------------------\n"; // // // delete[] bb; } template<class T, int degree> void debug_device(int n) { // T *bb, *tmp; // int QPC = degree + 2; // bb = new T[(n + degree) * (degree + 1) * QPC]; // T * dev_ptrs[2]; // gpuAssert(hipMemcpyFromSymbol(dev_ptrs, d_Nvals_err, sizeof (dev_ptrs))); // gpuAssert(hipMemcpy(bb, dev_ptrs[degree & 1], // sizeof (T) * basis_funs_cnt<degree > (n) * (degree + 1) * QPC, // hipMemcpyDeviceToHost)); // // std::cout << "------------------------\n------------------------\n"; // for (int i = 0; i < basis_funs_cnt<degree > (n) * (degree + 1) * QPC; ++i) // { // std::cout << bb[i] << '\n'; // } // delete [] bb; // // gpuAssert(hipMemcpy(bb, dev_ptrs[1], sizeof (T) * (n + degree) * (degree + 1) * QPC, hipMemcpyDeviceToHost)); // // std::cout << "------------------------\n------------------------\n"; // for (int i = 0; i < (n + degree) * (degree + 1) * QPC; ++i) // { // std::cout << bb[i] << '\n'; // } // // delete [] bb; // int Ndof = basis_funs_cnt<degree>(n); // bb = new T[RHSC * Ndof]; // gpuAssert(hipMemcpyFromSymbol(&tmp, d_RHS, sizeof (tmp))); // gpuAssert(hipMemcpy(bb, tmp, sizeof (T) * Ndof * RHSC, hipMemcpyDeviceToHost)); // // for (int i=0 ; i<Ndof ; ++i) // { // for (int r = 0 ; r < RHSC ; ++r) // { // std::cerr << bb[i*RHSC + r] << "\t\t"; // } // std::cerr << "\n"; // } // // delete[] bb; } typedef void (*device_fun_i)(int); typedef void (*device_fun_ii)(int, int); // <editor-fold defaultstate="collapsed" desc="interface functions (float)"> // //template<> //void //CUDA_prepare_device<float>(int degree, int n) //{ // static device_fun prepares[] = { // prepare_device<float, 1 >, // prepare_device<float, 2 >, // prepare_device<float, 3 >, // prepare_device<float, 4 >, // prepare_device<float, 5 > // }; // // prepares[degree - 1](n); //} // //template<> //void //CUDA_init_fronts<float>(int degree, int n) //{ // static device_fun initializers[] = { // init_fronts<float, 1 >, // init_fronts<float, 2 >, // init_fronts<float, 3 >, // init_fronts<float, 4 >, // init_fronts<float, 5 > // }; // // initializers[degree - 1](n); //} // //template<> //void //CUDA_solve<float>(int degree, int n) //{ // static device_fun solvers[] = { // solve_equation<float, 1 >, // solve_equation<float, 2 >, // solve_equation<float, 3 >, // solve_equation<float, 4 >, // solve_equation<float, 5 > // }; // // solvers[degree - 1](n); //} // //template<> //float //CUDA_error<float>(int degree, int n) //{ // typedef float (*error_fun)(int); // static error_fun calculators[] = { // calculate_error<float, 1 >, // calculate_error<float, 2 >, // calculate_error<float, 3 >, // calculate_error<float, 4 >, // calculate_error<float, 5 > // }; // // return calculators[degree - 1](n); //} // //template<> //void //CUDA_print_result<float>(int degree, int n, std::ostream &ostr) //{ // typedef void (*print_fun)(int, std::ostream &); // static print_fun printers[] = { // print_result<float, 1 >, // print_result<float, 2 >, // print_result<float, 3 >, // print_result<float, 4 >, // print_result<float, 5 > // }; // // printers[degree - 1](n, ostr); //} // //template<> //void //CUDA_debug<float>(int degree, int n) //{ // typedef void (*print_fun)(int); // static print_fun debuggers[] = { // debug_device<float, 1 >, // debug_device<float, 2 >, // debug_device<float, 3 >, // debug_device<float, 4 >, // debug_device<float, 5 > // }; // // debuggers[degree - 1](n); //} // </editor-fold> // <editor-fold defaultstate="collapsed" desc="interface functions (double)"> template<> void CUDA_prepare_device<double>(int degree, int n, int rhs_cnt) { static device_fun_ii prepares[] = { prepare_device<double, 1 >, prepare_device<double, 2 >, prepare_device<double, 3 >, prepare_device<double, 4 >, prepare_device<double, 5 > }; prepares[degree - 1](n, rhs_cnt); } template<> void CUDA_init_fronts<double>(int degree, int n, int rhs_cnt) { static device_fun_ii initializers[] = { init_fronts<double, 1 >, init_fronts<double, 2 >, init_fronts<double, 3 >, init_fronts<double, 4 >, init_fronts<double, 5 > }; initializers[degree - 1](n, rhs_cnt); } template<> void CUDA_factorize_matrix<double>(int degree, int n) { static device_fun_i factorize[] = { factorize_matrix<double, 1 >, factorize_matrix<double, 2 >, factorize_matrix<double, 3 >, factorize_matrix<double, 4 >, factorize_matrix<double, 5 > }; factorize[degree - 1](n); } template<> void CUDA_solve<double>(int degree, int n, int rhs_cnt) { static device_fun_ii solvers[] = { solve_equation<double, 1 >, solve_equation<double, 2 >, solve_equation<double, 3 >, solve_equation<double, 4 >, solve_equation<double, 5 > }; solvers[degree - 1](n, rhs_cnt); } template<> double CUDA_error<double>(int degree, int n) { typedef double (*error_fun)(int); static error_fun calculators[] = { calculate_error<double, 1 >, calculate_error<double, 2 >, calculate_error<double, 3 >, calculate_error<double, 4 >, calculate_error<double, 5 > }; return calculators[degree - 1](n); } template<> void CUDA_print_result<double>(int degree, int n, std::ostream &ostr) { typedef void (*print_fun)(int, std::ostream &); static print_fun printers[] = { print_result<double, 1 >, print_result<double, 2 >, print_result<double, 3 >, print_result<double, 4 >, print_result<double, 5 > }; printers[degree - 1](n, ostr); } template<> void CUDA_debug<double>(int degree, int n) { typedef void (*print_fun)(int); static print_fun debuggers[] = { debug_device<double, 1 >, debug_device<double, 2 >, debug_device<double, 3 >, debug_device<double, 4 >, debug_device<double, 5 > }; debuggers[degree - 1](n); } // </editor-fold>
22f65ecdd9ee9c1ae48608298b49cd26ca5469fc.cu
#include <config.h> #include <CUDA_interface.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <float.h> #include <iostream> #include <fstream> #include <sstream> #include <cmath> #include <boost/format.hpp> #include <device/utils.cuh> #include <device/base_functions.cuh> #include <device/matrix_utils.cuh> #include "quadratures.h" #if __CUDA_ARCH__ >= 200 #define THREADS_PER_BLOCK 16 #else #define THREADS_PER_BLOCK 16 #endif __constant__ int QPC; __constant__ FEM_PRECISION Qp[8]; __constant__ FEM_PRECISION Qw[8]; __constant__ int QPC_e; __constant__ FEM_PRECISION Qp_e[8]; __constant__ FEM_PRECISION Qw_e[8]; __device__ FEM_PRECISION *d_B[2]; __device__ FEM_PRECISION *d_RHS; __device__ FEM_PRECISION *d_assembled; __device__ FEM_PRECISION *d_not_assembled; __device__ FEM_PRECISION *d_Nvals[2]; __device__ FEM_PRECISION *d_dNvals[2]; __device__ FEM_PRECISION *d_Nvals_err[2]; __device__ FEM_PRECISION *d_dNvals_err[2]; __device__ FEM_PRECISION *d_abssicas; __device__ FEM_PRECISION *d_fun_vals; __device__ FEM_PRECISION *d_der_vals; #define gpuAssert(ans) { gpuAssertCheck((ans), __FILE__, __LINE__); } inline void gpuAssertCheck(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { const char* msg = cudaGetErrorString(code); fprintf(stderr,"GPUassert: %s %s %d\n", msg, file, line); if (abort) exit(code); } } void check_error(const char* str, cudaError_t err_code) { if (err_code != ::cudaSuccess) std::cerr << str << " -- " << cudaGetErrorString(err_code) << "\n"; } /** * Exact solution * @param x * @return */ template<class T> inline T __device__ __host__ _u(T x) { return sin(15 * x) * cos(24 * x) * x; // return x * x; } // <editor-fold defaultstate="collapsed" desc="equation parameters"> template<class T> __device__ __host__ T _a(T x) { return sin(x); // return 1; } template<class T> __device__ __host__ T _b(T x) { return x; // return 0; } template<class T> __device__ __host__ T _c(T x) { return -x; // return 0; } template <class T> inline T __device__ __host__ _du(T x) { return (-sin(9 * x) + sin(39 * x) - 9 * x * cos(9 * x) + 39 * x * cos(39 * x)) / 2.0; // return 2 * x; } template <class T> __device__ __host__ inline T _adu(T x) { return _a(x) * _du(x); } template <class T> __device__ __host__ inline T _dadu(T x) { return (cos(x)*(-sin(9 * x) + sin(39 * x) - 9 * x * cos(9 * x) + 39 * x * cos(39 * x)) + 3 * sin(x)*(27 * x * sin(9 * x) - 507 * x * sin(39 * x) - 6 * cos(9 * x) + 26 * cos(39 * x))) / 2.0; // return 2; } template<class T> __device__ __host__ T _beta() { return 0; } template<class T> __device__ __host__ T _gamma() { return _adu<T > (1) + _beta<T > () * _u<T > (1); } template<class T> __device__ __host__ inline T _f(T x) { // return cos(M_PI * x) * cos(M_PI * y); return -(_dadu(x)) + _b(x) * _du(x) + _c(x) * _u(x); // return -2; } // </editor-fold> /** * Returns index that can be used for retrieving value of previously calculated * values of base functions (d_Nval) and their derivatives (d_dNval). * @param point_i - index of quadrature point [0, QUADRATURE_POINTS_CNT-1] * @param fun_i - index of a function [0, fun_cnt-1] * @param interval_i - number of interval * @param fun_cnt - total number of functions * @return index that can be used for d_Nvals and d_dNvals */ template<int qpc> __device__ inline int point_mem_idx(int point_i, int fun_i, int interval_i, int fun_cnt) { return (interval_i * qpc + point_i) * fun_cnt + fun_i; } template<int degree, class T> __device__ inline T get_N(int point_i, int fun_i, int fun_part, int _n) { return d_Nvals[degree & 1][point_mem_idx<degree + 1>(point_i, fun_i, fun_part, _n)]; } template<int degree, class T> __device__ inline T get_dN(int point_i, int fun_i, int interval_i, int _n) { return d_dNvals[degree & 1][point_mem_idx<degree + 1>(point_i, fun_i, interval_i, _n)]; } template<int degree, class T> __device__ inline T get_N_err(int point_i, int fun_i, int fun_part, int _n) { return d_Nvals_err[degree & 1][point_mem_idx<degree + 2>(point_i, fun_i, fun_part, _n)]; } template<int degree, class T> __device__ inline T get_dN_err(int point_i, int fun_i, int interval_i, int _n) { return d_dNvals_err[degree & 1][point_mem_idx<degree + 2>(point_i, fun_i, interval_i, _n)]; } template<int degree, class T> __device__ inline T fun_L(T x, int point_idx, int fun, int interval, int _n) { // return get_N<degree, T>(point_idx, fun, interval - fun, _n); return _f<T>(x) * get_N<degree, T>(point_idx, fun, interval - fun, _n); } template<int degree, class T> __device__ inline T fun_B(T x, int point_idx, int i, int j, int element_id, int _n) { return _a<T > (x) * get_dN<degree, T > (point_idx, i, element_id - i + degree, _n) * get_dN<degree, T > (point_idx, j, element_id - j + degree, _n) + _b<T > (x) * get_dN<degree, T > (point_idx, i, element_id - i + degree, _n) * get_N<degree, T > (point_idx, j, element_id - j + degree, _n) + _c<T > (x) * get_N<degree, T > (point_idx, i, element_id - i + degree, _n) * get_N<degree, T > (point_idx, j, element_id - j + degree, _n); } template<int degree, class T> __device__ T eval_L(int fun, int interval, int _n) { T sum(0); T aa, bb; T a = d_knot_vector[interval], b = d_knot_vector[interval + 1]; aa = (b - a) / 2.0; bb = (b + a) / 2.0; for (int i = 0; i < QPC; ++i) { sum += Qw[i] * fun_L<degree>(aa * Qp[i] + bb, i, fun, interval, _n); } return aa*sum; } /** * * @param i * @param j * @param element_idx * @param _n - number of functions * @return */ template<int degree, class T> __device__ inline T eval_B(int i, int j, int knot_i, int _n) { T sum(0); T aa, bb; T a = d_knot_vector[knot_i]; T b = d_knot_vector[knot_i + 1]; aa = (b - a) / 2.0; bb = (b + a) / 2.0; for (int idx = 0; idx < QPC; ++idx) { T x = aa * Qp[idx] + bb; sum += Qw[idx] * fun_B<degree > (x, idx, i, j, knot_i - degree, _n); } return aa*sum; } /** * Temporarily implemented only for degree 1, and _n being power of 2. * @param _c - front cell number (0,1,...) * @param _n - total number of fronts in one part * @param _p - number of part being evaluated (0,1,...) */ template<int degree, class T> __global__ void init_B(int _c, int _n, int _p) { // find number of front being initiated int n = blockDim.x * blockIdx.x + threadIdx.x; // is n greater than total number of fronts in one part? if (n >= _n) return; int element_id = n * (degree + 1) + _p; int y = _c / (degree + 1) + element_id; int x = _c % (degree + 1) + element_id; int idx = _n * (_p * (degree + 1)*(degree + 1) + _c) + n; if (y == 0) { if (x == 0) { d_B[0][0] = 1; } else { d_B[0][idx] = 0; } } else { d_B[0][idx] = eval_B<degree, T> (x, y, element_id + degree, _n * (degree + 1) + degree); // TODO uuu this is ugly } if (idx == _n * (degree + 1)*(degree + 1)*(degree + 1) - 1) d_B[0][idx] += _beta<T> (); } /** * Initializes RHS vector * @param Ndof number of basis functions */ template<int degree, class T> __global__ void init_RHS(int Ndof, int Nrhs) { int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; int n = blockDim.y * blockIdx.y + threadIdx.y; if (n >= Ndof || rhs_num >= Nrhs) return; T x = 0; for (int part = 0; part <= degree; ++part) x += eval_L<degree, T>(n, n + part, Ndof); if (n==0) x = 0; if (n == Ndof - 1) x += _gamma<T > (); d_RHS[(n*Nrhs) + rhs_num] = x; } /** * * @param _n - number of functions */ template<int degree, int qpc, class T> __global__ void init_basis_functions_and_derivatives(int _n, T **N, T **dN) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= _n) return; #pragma unroll for (int i = 0; i < (degree + 1) * qpc; ++i) { N[0][i * _n + n] = 0; N[1][i * _n + n] = 0; dN[0][i * _n + n] = 0; dN[1][i * _n + n] = 0; } #pragma unroll for (int i = 0; i < qpc; ++i) { N[0][i * _n + n] = 1; } } template<class T> __device__ inline T interval(int a, int b) { return d_knot_vector[b] - d_knot_vector[a]; } template<class T> __device__ inline T interval(int a) { return interval<T > (a, a + 1); } template<int qpc, class T> __global__ void update_base(int _n, int idx, T **N, T **dN, T *Q_points) { int n = blockDim.x * blockIdx.x + threadIdx.x; // function number if (n >= _n) return; T h1 = interval<T > (n, n + idx); T h2 = interval<T > (n + 1, n + idx + 1); if (is_zero(h1)) { for (int i = 0; i < qpc; ++i) { // function N[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = 0; //derivative dN[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = 0; } } else { for (int i = 0; i < qpc; ++i) { T x = (Q_points[i] / 2.0 + 0.5) * interval<T > (n); // function N[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = x * N[(~idx)&1][point_mem_idx<qpc>(i, n, 0, _n)] / h1; //derivative dN[idx & 1][point_mem_idx<qpc>(i, n, 0, _n)] = (x * dN[(~idx)&1][point_mem_idx<qpc>(i, n, 0, _n)] + N[(~idx)&1][point_mem_idx<qpc>(i, n, 0, _n)]) / h1; } } for (int j = 1; j < idx; ++j) { for (int i = 0; i < qpc; ++i) { T sum_fun, sum_der; T x = (Q_points[i] / 2.0 + 0.5) * interval<T > (n + j) + d_knot_vector[n + j]; if (is_zero(h1)) { sum_fun = 0; sum_der = 0; } else { sum_fun = (x - d_knot_vector[n]) * N[(~idx)&1][point_mem_idx<qpc>(i, n, j, _n)] / h1; sum_der = ((x - d_knot_vector[n]) * dN[(~idx)&1][point_mem_idx<qpc>(i, n, j, _n)] + N[(~idx)&1][point_mem_idx<qpc>(i, n, j, _n)]) / h1; } if (is_zero(h2)) { sum_fun += 0; sum_der += 0; } else { sum_fun += (d_knot_vector[n + idx + 1] - x) * N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, j - 1, _n)] / h2; sum_der += ((d_knot_vector[n + idx + 1] - x) * dN[(~idx)&1][point_mem_idx<qpc>(i, n + 1, j - 1, _n)] - N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, j - 1, _n)]) / h2; } N[idx & 1][point_mem_idx<qpc>(i, n, j, _n)] = sum_fun; dN[idx & 1][point_mem_idx<qpc>(i, n, j, _n)] = sum_der; } } if (is_zero(h2)) { for (int i = 0; i < qpc; ++i) { N[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = 0; dN[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = 0; } } else { for (int i = 0; i < qpc; ++i) { T x = (Q_points[i] / 2.0 + 0.5) * interval<T > (n + idx) + d_knot_vector[n + idx]; N[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = (d_knot_vector[n + idx + 1] - x) * N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, idx - 1, _n)] / h2; dN[idx & 1][point_mem_idx<qpc>(i, n, idx, _n)] = ((d_knot_vector[n + idx + 1] - x) * dN[(~idx)&1][point_mem_idx<qpc>(i, n + 1, idx - 1, _n)] - N[(~idx)&1][point_mem_idx<qpc>(i, n + 1, idx - 1, _n)]) / h2; } } } template<int degree, class T> __device__ inline void store_new_matrix(int next_merges_cnt, T BB[][2 * degree + 1][2 * degree + 1]) { int divisor = min(blockDim.x / 2, next_merges_cnt); int gm_idx = blockIdx.x * blockDim.x / 2; if (threadIdx.x < divisor) { for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) d_B[1][(x * (2 * degree) + y) * next_merges_cnt + gm_idx + threadIdx.x] = BB[2 * threadIdx.x][x + 1][y + 1]; } else { int tid = threadIdx.x - divisor; int group2_offset = next_merges_cnt * (2 * degree) * (2 * degree); for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) d_B[1][group2_offset + (x * (2 * degree) + y) * next_merges_cnt + gm_idx + tid] = BB[2 * tid + 1][x + 1][y + 1]; } } template<int degree, class T> __device__ inline void store_new_matrix2(int _n, T BB[][3 * degree][3 * degree], int step) { int divisor = min(blockDim.x / 2, _n); int gm_idx = blockIdx.x * blockDim.x / 2; if (threadIdx.x < divisor) { for (int x = 0; x < 2 * degree; ++x) { for (int y = 0; y < 2 * degree; ++y) { d_B[(~step)&1][(x * (2 * degree) + y) * _n + gm_idx + threadIdx.x] = BB[2 * threadIdx.x][x + degree][y + degree]; } } } else { int tid = threadIdx.x - divisor; int group2_offset = _n * (2 * degree) * (2 * degree); for (int x = 0; x < 2 * degree; ++x) { for (int y = 0; y < 2 * degree; ++y) { d_B[(~step)&1][group2_offset + (x * (2 * degree) + y) * _n + gm_idx + tid] = BB[2 * tid + 1][x + degree][y + degree]; } } } } /** * @param merges_cnt - number of merges i.e. total number of elements / (degree + 1) */ template<int degree, class T, int TPB> __global__ void first_merge(int merges_cnt) { __shared__ T BB[TPB][2 * degree + 1][2 * degree + 1]; T assembled[2 * degree + 1]; T X; const int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= merges_cnt) return; // initializing shared memory for (int x = 0; x < 2 * degree + 1; ++x) for (int y = 0; y < 2 * degree + 1; ++y) BB[threadIdx.x][x][y] = T(0); // load data to shared memory for (int i = 0; i < degree + 1; ++i) for (int x = 0; x < degree + 1; ++x) for (int y = 0; y < degree + 1; ++y) BB[threadIdx.x][x + i][y + i] += d_B[0][((i * (degree + 1) + x) * (degree + 1) + y) * merges_cnt + n]; // pivoting pivot_rows_cyclic<degree>(BB); pivot_columns_cyclic<degree>(BB); // calculate first row X = BB[threadIdx.x][0][0]; for (int i = 1; i < 2 * degree + 1; ++i) assembled[i] = BB[threadIdx.x][0][i] / X; assembled[0] = X; // store first row which is already factorized for (int i = 0; i < 2 * degree + 1; ++i) d_assembled[i * merges_cnt + n] = assembled[i]; // store first column needed for forward substitution for (int i = 1; i <= 2 * degree; ++i) d_not_assembled[(i-1) * merges_cnt + n] = BB[threadIdx.x][i][0]; // elimination for (int i = 1; i < 2 * degree + 1; ++i) { T lead = BB[threadIdx.x][i][0]; for (int j = 1; j <= 2 * degree; ++j) BB[threadIdx.x][i][j] -= assembled[j] * lead; } __syncthreads(); store_new_matrix<degree, T > (merges_cnt / 2, BB); } /** * Merges fronts of size (2*degree)x(2*degree) * @param merges_cnt number of merging processes in this step * @param offset offset for factorized rows */ template<int degree, class T, int TPB> __global__ void merge(int merges_cnt, int assembled_offset, int not_assembled_offset, int step) { __shared__ T BB[TPB][3 * degree][3 * degree]; T assembled[3 * degree]; const int n = blockDim.x * blockIdx.x + threadIdx.x; const int row_len = 3 * degree; if (n >= merges_cnt) return; // initializing shared memory for (int x = 0; x < 3 * degree; ++x) for (int y = 0; y < 3 * degree; ++y) BB[threadIdx.x][x][y] = T(0); // load data to shared memory for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) { BB[threadIdx.x][x][y] += d_B[step&1][(x * 2 * degree + y) * merges_cnt + n]; BB[threadIdx.x][x + degree][y + degree] += d_B[step&1][(2 * degree)* (2 * degree) * merges_cnt + (x * 2 * degree + y) * merges_cnt + n]; } // pivoting for (int i = 0; i < degree; ++i) for (int x = 0; x < row_len; ++x) { T tmp = BB[threadIdx.x][i][x]; BB[threadIdx.x][i][x] = BB[threadIdx.x][i + degree][x]; BB[threadIdx.x][i + degree][x] = tmp; } for (int i = 0; i < degree; ++i) for (int x = 0; x < row_len; ++x) { T tmp = BB[threadIdx.x][x][i]; BB[threadIdx.x][x][i] = BB[threadIdx.x][x][i + degree]; BB[threadIdx.x][x][i + degree] = tmp; } // elimination (we eliminate |degree| rows) for (int i = 0; i < degree; ++i) { T X = BB[threadIdx.x][i][i]; for (int j = i + 1; j < row_len; ++j) assembled[j] = BB[threadIdx.x][i][j] / X; for (int j = 0; j <= i; ++j) assembled[j] = BB[threadIdx.x][i][j]; // store i-th row in global memory for (int j = 0; j < row_len; ++j) d_assembled[assembled_offset + (i * row_len + j) * merges_cnt + n] = assembled[j]; // store i-th column in global memory for (int j = 0; j < 2 * degree; ++j) d_not_assembled[not_assembled_offset + (i * (2 * degree) + j) * merges_cnt + n] = BB[threadIdx.x][j + degree][i]; // eliminate i-th row for (int x = i + 1; x < 3 * degree; ++x) { T lead = BB[threadIdx.x][x][i]; for (int y = i + 1; y < 3 * degree; ++y) { BB[threadIdx.x][x][y] -= assembled[y] * lead; } } } // store new matrices in global memory __syncthreads(); store_new_matrix2<degree, T > (merges_cnt / 2, BB, step); } template<int degree, class T> __global__ __launch_bounds__(1) void last_merge(int offset, int step) { __shared__ T BB[3 * degree][3 * degree]; for (int x = 0; x < 3 * degree; ++x) for (int y = 0; y < 3 * degree; ++y) BB[x][y] = 0; // load data from global memory for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) BB[x][y] = d_B[step&1][x * 2 * degree + y]; for (int x = 0; x < 2 * degree; ++x) for (int y = 0; y < 2 * degree; ++y) BB[x + degree][y + degree] += d_B[step&1][(2 * degree) * (2 * degree) + x * (2 * degree) + y]; for (int i = 0; i < 3 * degree; ++i) { T lead = BB[i][i]; // BB[i][i] = 1; it is implicitly considered to be 1 later for (int x = i + 1; x < 3 * degree; ++x) BB[i][x] /= lead; for (int y = i + 1; y < 3 * degree; ++y) { lead = BB[y][i]; // BB[y][i] = 0; it is implicitly considered to be 0 later for (int x = i + 1; x < 3 * degree; ++x) BB[y][x] -= BB[i][x] * lead; } } // row first order for (int y=0 ; y<3*degree ; ++y) for (int x=0 ; x<3*degree ; ++x) { d_assembled[offset + (y * (3 * degree) + x)] = BB[y][x]; } } template<int degree, class T, int tpb> __global__ void first_forward_substitution(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> RHS(d_RHS + rhs_num, rhs_cnt); __shared__ T s_div[tpb]; T &div = s_div[threadIdx.y]; if (threadIdx.x == 0) div = d_assembled[merge_num]; __syncthreads(); int row_num = (degree + 1) * (merge_num + 1) - 1; RHS[row_num] /= div; } template<int degree, class T, int tpb> __global__ void first_forward_substitution_update_left(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; __shared__ T s_B[tpb][degree]; T *B = s_B[threadIdx.y]; if (threadIdx.x == 0) { #pragma unroll for (int i=0 ; i<degree ; ++i) B[i] = d_not_assembled[(i * merges_cnt) + merge_num]; } __syncthreads(); vertical_vec<T> RHS(d_RHS + rhs_num, rhs_cnt); int row_num = (degree + 1) * (merge_num + 1) - 1; T x = RHS[row_num]; #pragma unroll for (int i=0 ; i<degree ; ++i) RHS[row_num - degree + i] -= B[i] * x; } template<int degree, class T, int tpb> __global__ void first_forward_substitution_update_right(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; __shared__ T s_B[tpb][degree]; T *B = s_B[threadIdx.y]; if (threadIdx.x == 0) { #pragma unroll for (int i=degree ; i< 2 * degree ; ++i) B[i-degree] = d_not_assembled[(i * merges_cnt) + merge_num]; } __syncthreads(); vertical_vec<T> RHS(d_RHS + rhs_num, rhs_cnt); int row_num = (degree + 1) * (merge_num + 1) - 1; T x = RHS[row_num]; #pragma unroll for (int i=0 ; i<degree ; ++i) RHS[row_num + i + 1] -= B[i] * x; } template<int degree, class T, int tpb> __global__ void forward_substitution(int merges_cnt, int rhs_cnt, int offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[degree]; int stride = (1 << step) * (degree + 1); int row_len = 3 * degree; int g_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][degree]; T (*B)[degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int i = 0; i < degree; ++i) for (int j = 0; j <= i; ++j) B[i][j] = d_assembled[offset + (i * row_len + j) * merges_cnt + merge_num]; __syncthreads(); for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[g_idx + i]; for (int i = 0; i < degree; ++i) { RHS[i] /= B[i][i]; for (int j = i + 1; j < degree; ++j) RHS[j] -= RHS[i] * B[j][i]; } for (int i = 0; i < degree; ++i) global_RHS[g_idx + i] = RHS[i]; } template<int degree, class T, int tpb> __global__ void forward_substitution_update_left(int merges_cnt, int rhs_cnt, int not_assembled_offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[2 * degree]; int stride = (1 << step) * (degree + 1); int middle_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][degree]; T (*B)[degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int col = 0; col < degree; ++col) for (int j = 0; j < degree; ++j) B[j][col] = d_not_assembled[not_assembled_offset + (col * (2 * degree) + j) * merges_cnt + merge_num]; __syncthreads(); int small_stride = stride >> 1; for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[middle_idx + i]; for (int i = 0; i < degree; ++i) RHS[i + degree] = global_RHS[middle_idx - small_stride + i]; for (int i = 0; i < degree; ++i) for (int j = 0; j < degree; ++j) RHS[j + degree] -= RHS[i] * B[j][i]; for (int i = 0; i < degree; ++i) global_RHS[middle_idx - small_stride + i] = RHS[i + degree]; } template<int degree, class T, int tpb> __global__ void forward_substitution_update_right(int merges_cnt, int rhs_cnt, int not_assembled_offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[2 * degree]; int stride = (1 << step) * (degree + 1); int middle_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][degree]; T (*B)[degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int col = 0; col < degree; ++col) for (int j = degree; j < 2 * degree; ++j) B[j - degree][col] = d_not_assembled[not_assembled_offset + (col * (2 * degree) + j) * merges_cnt + merge_num]; __syncthreads(); int small_stride = stride >> 1; for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[middle_idx + i]; for (int i = 0; i < degree; ++i) RHS[i + degree] = global_RHS[middle_idx + small_stride + i]; for (int i = 0; i < degree; ++i) for (int j = 0; j < degree; ++j) RHS[j + degree] -= RHS[i] * B[j][i]; for (int i = 0; i < degree; ++i) global_RHS[middle_idx + small_stride + i] = RHS[i + degree]; } template<int degree, class T> __global__ void last_forward_first_backward_substitution(int ne, int rhs_cnt, int offset) { int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[3 * degree]; __shared__ T BB[3 * degree][3 * degree]; if (threadIdx.x == 0) // row first order for (int y = 0; y < 3 * degree; ++y) for (int x = 0; x < 3 * degree; ++x) BB[y][x] = d_assembled[offset + (y * (3 * degree) + x)]; __syncthreads(); for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[i]; for (int i = degree; i < 2 * degree; ++i) RHS[i] = global_RHS[ne / 2 - degree + i]; for (int i = 2 * degree; i < 3 * degree; ++i) RHS[i] = global_RHS[ne - 2 * degree + i]; for (int i = 0; i < 3 * degree; ++i) { RHS[i] /= BB[i][i]; for (int j = i + 1; j < 3 * degree; ++j) RHS[j] -= RHS[i] * BB[j][i]; } // we skip last row as it is already solved for (int i = 3 * degree - 2; i >= 0 ; --i) for (int j = i + 1 ; j < 3 * degree ; ++j) RHS[i] -= BB[i][j] * RHS[j]; for (int i = 0; i < degree; ++i) global_RHS[i] = RHS[i]; for (int i = degree; i < 2 * degree; ++i) global_RHS[ne / 2 - degree + i] = RHS[i]; for (int i = 2 * degree; i < 3 * degree; ++i) global_RHS[ne - 2 * degree + i] = RHS[i]; } template<int degree, class T, int tpb> __global__ void backward_substitution(int merges_cnt, int rhs_cnt, int offset, int step) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); T RHS[3 * degree]; int stride = (1 << step) * (degree + 1); int row_len = 3 * degree; int g_idx = (stride >> 1) + merge_num * stride; __shared__ T s_B[tpb][degree][3 * degree]; T (*B)[3 * degree] = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int i = 0; i < degree; ++i) for (int j = i+1; j < 3 * degree; ++j) B[i][j] = d_assembled[offset + (i * row_len + j) * merges_cnt + merge_num]; __syncthreads(); stride >>= 1; for (int i = 0; i < degree; ++i) RHS[i] = global_RHS[g_idx + i]; for (int i = 0; i < degree; ++i) RHS[i + degree] = global_RHS[g_idx - stride + i]; for (int i = 0; i < degree; ++i) RHS[i + 2 * degree] = global_RHS[g_idx + stride + i]; for (int i = degree - 1; i >= 0; --i) for (int j = i + 1; j < 3 * degree; ++j) RHS[i] -= RHS[j] * B[i][j]; for (int i = 0; i < degree; ++i) global_RHS[g_idx + i] = RHS[i]; } /** * @param _n */ template<int degree, class T, int tpb> __global__ void last_backward_substitution(int merges_cnt, int rhs_cnt) { int merge_num = blockDim.y * blockIdx.y + threadIdx.y; int rhs_num = blockDim.x * blockIdx.x + threadIdx.x; if (merge_num >= merges_cnt || rhs_num >= rhs_cnt) return; __shared__ T s_B[tpb][2 * degree]; T *B = s_B[threadIdx.y]; if (threadIdx.x == 0) for (int i = 1; i <= 2 * degree; ++i) B[i - 1] = d_assembled[i * merges_cnt + merge_num]; __syncthreads(); vertical_vec<T> global_RHS(d_RHS + rhs_num, rhs_cnt); unsigned row_num = (degree + 1) * (merge_num + 1) - 1; T RHS = global_RHS[row_num]; for (int i = -degree; i < 0; ++i) RHS -= B[i + degree] * global_RHS[row_num + i]; for (int i = 1; i <= degree; ++i) RHS -= B[i + degree - 1] * global_RHS[row_num + i]; global_RHS[row_num] = RHS; } template<int degree, class T> __global__ void evaluate(int _n, T* x, int length) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= length) return; T sum = 0; T XX = x[n]; for (int i = 0; i < _n + degree; ++i) { sum += d_RHS[i] * N<degree, T > (XX, i); } x[n] = sum; } template<int degree, class T> __device__ T evaluate(T x, int elements_cnt) { T sum = 0; for (int i = 0; i < elements_cnt + degree; ++i) { sum += d_RHS[i] * N<degree > (x, i); } return sum; } template<int degree, class T, int qpc> __global__ void calculate_norm(int elm_cnt, T *sum) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= elm_cnt) return; T tmp = 0; for (int i = 0; i < qpc; ++i) { tmp += Qw_e[i] * pow(_u(d_abssicas[i * elm_cnt + n]) - d_fun_vals[i * elm_cnt + n], 2); } for (int i = 0; i < qpc; ++i) { tmp += Qw_e[i] * pow((_du(d_abssicas[i * elm_cnt + n]) - d_der_vals[i * elm_cnt + n]), 2); } sum[n] = interval<T>(n + degree) * tmp / 2.0; } template<int degree, class T> __global__ void calculate_function_in_quadrature_points(int elements_cnt, int Nrhs) { int n = blockDim.x * blockIdx.x + threadIdx.x; const int rhs_num = 0; if (n >= elements_cnt) return; for (int i = 0; i < QPC_e; ++i) for (int j = 0; j <= degree; ++j) d_fun_vals[i * elements_cnt + n] += d_RHS[(n + j)*Nrhs + rhs_num] * get_N_err<degree, T > (i, n + j, degree - j, basis_funs_cnt<degree > (elements_cnt)); } template<int degree, class T> __global__ void calculate_derivative_in_quadrature_points(int elements_cnt, int Nrhs) { const int rhs_num = 0; int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= elements_cnt) return; for (int i = 0; i < QPC_e; ++i) for (int j = 0; j <= degree; ++j) d_der_vals[i * elements_cnt + n] += d_RHS[(n + j)*Nrhs + rhs_num] * get_dN_err<degree, T > (i, n + j, degree - j, basis_funs_cnt<degree > (elements_cnt)); } template<int degree, class T> __global__ void calculate_abscissas(int elements_cnt) { int n = blockDim.x * blockIdx.x + threadIdx.x; if (n >= elements_cnt) return; T a = d_knot_vector[n + degree], b = d_knot_vector[n + degree + 1]; T aa = (b - a) / 2.0; T bb = (b + a) / 2.0; for (int i = 0; i < QPC_e; ++i) { d_abssicas[i * elements_cnt + n] = aa * Qp_e[i] + bb; } } template<class T> T t_gen(int i, int N, int degree) { if (i <= degree) return T(0); if (i < N + degree) return T(i - degree) / N; return T(1); } template<class T> struct vector_scattered { T *p; int skip; vector_scattered (T *p, int skip) : p(p), skip(skip) { } T & operator[](int idx) { return *(p + idx * skip); } }; template<class T, int degree> void debug_device(int n); template<class T, int degree> void print_local(int n); template<int degree, int qpc, class T> void calculate_basis(int n, T **N, T **dN, T *Q) { int tpb = 256; int block_count = ((n + degree) / tpb) + ((n + degree) % tpb ? 1 : 0); init_basis_functions_and_derivatives<degree, qpc><<<block_count, tpb >>>(basis_funs_cnt<degree>(n), N, dN); check_error("init_base_functions_and_derivatives", cudaGetLastError()); for (int i = 1; i <= degree; ++i) { update_base<qpc><<<block_count, tpb>>>(n + degree, i, N, dN, Q); check_error("update_base", cudaGetLastError()); } } template<class T, int degree> void calculate_basis_solver(int n) { void *tmp; T **N, **dN, *Q; gpuAssert(cudaGetSymbolAddress(&tmp, d_Nvals)); N = reinterpret_cast<T**>(tmp); gpuAssert(cudaGetSymbolAddress(&tmp, d_dNvals)); dN = reinterpret_cast<T**>(tmp); gpuAssert(cudaGetSymbolAddress(&tmp, Qp)); Q = reinterpret_cast<T*>(tmp); calculate_basis<degree, degree + 1>(n, N, dN, Q); } template<class T, int degree> void calculate_basis_error(int n) { void *tmp; T **N, **dN, *Q; gpuAssert(cudaGetSymbolAddress(&tmp, d_Nvals_err)); N = reinterpret_cast<T**>(tmp); gpuAssert(cudaGetSymbolAddress(&tmp, d_dNvals_err)); dN = reinterpret_cast<T**>(tmp); gpuAssert(cudaGetSymbolAddress(&tmp, Qp_e)); Q = reinterpret_cast<T*>(tmp); // error_QPC = degree + 2 calculate_basis<degree, degree + 2>(n, N, dN, Q); } /** * Allocates necessary memory on device * @param Ne - number of elements in [0, 1] interval * @param Nrhs - number of right hand sides */ template<class T, int degree> void prepare_device(int Ne, int Nrhs) { T *tmp, *t; T * dev_ptrs[2]; const int solver_QPC = degree + 1; const int error_QPC = degree + 2; // initialize quadratures for solver { T *p = get_gauss_legendre_points<T, solver_QPC>(); T *w = get_gauss_legendre_weights<T, solver_QPC>(); gpuAssert(cudaMemcpyToSymbol(QPC, &solver_QPC, sizeof (solver_QPC))); gpuAssert(cudaMemcpyToSymbol(Qp, p, sizeof (T) * solver_QPC)); gpuAssert(cudaMemcpyToSymbol(Qw, w, sizeof (T) * solver_QPC)); } // initialize quadratures for error calculation { T *p = get_gauss_legendre_points<T, error_QPC>(); T *w = get_gauss_legendre_weights<T, error_QPC>(); gpuAssert(cudaMemcpyToSymbol(QPC_e, &error_QPC, sizeof (error_QPC))); gpuAssert(cudaMemcpyToSymbol(Qp_e, p, sizeof (T) * error_QPC)); gpuAssert(cudaMemcpyToSymbol(Qw_e, w, sizeof (T) * error_QPC)); } t = new T[Ne + 2 * degree + 1]; for (int i = 0; i <= Ne + 2 * degree; ++i) t[i] = t_gen<T > (i, Ne, degree); int size; int mem_size=0, total_mem_size=0; // Allocate knot vector gpuAssert(cudaMalloc(&tmp, sizeof (T) * knots_cnt<degree > (Ne))); gpuAssert(cudaMemcpy(tmp, t, sizeof (T) * knots_cnt<degree > (Ne), cudaMemcpyHostToDevice)); gpuAssert(cudaMemcpyToSymbol(d_knot_vector, &tmp, sizeof (tmp))); delete[] t; // Allocate fronts (B part) size = std::max(Ne * (degree + 1) * (degree + 1), (Ne / (degree + 1)) * (2 * degree) * (2 * degree)); gpuAssert(cudaMalloc(&dev_ptrs[0], sizeof (T) * size)); gpuAssert(cudaMalloc(&dev_ptrs[1], sizeof (T) * size)); gpuAssert(cudaMemcpyToSymbol(d_B, &dev_ptrs, sizeof(dev_ptrs))); // allocate RHS gpuAssert(cudaMalloc(&tmp, Nrhs * sizeof (T) * basis_funs_cnt<degree>(Ne))); gpuAssert(cudaMemcpyToSymbol(d_RHS, &tmp, sizeof (tmp))); // allocate d_assembled gpuAssert(cudaMalloc(&tmp, sizeof (T) * basis_funs_cnt<degree>(Ne) * 3 * degree)); gpuAssert(cudaMemcpyToSymbol(d_assembled, &tmp, sizeof (tmp))); // allocate d_not_assembled { int M = Ne / (degree + 1); gpuAssert(cudaMalloc(&tmp, sizeof (T) * 2 * M * (degree * degree + degree))); gpuAssert(cudaMemcpyToSymbol(d_not_assembled, &tmp, sizeof (tmp))); } // Allocate matrices for accumulative base function evaluation // functions gpuAssert(cudaMalloc(&dev_ptrs[0], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(cudaMalloc(&dev_ptrs[1], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(cudaMemcpyToSymbol(d_Nvals, dev_ptrs, sizeof (dev_ptrs))); // derivatives gpuAssert(cudaMalloc(&dev_ptrs[0], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(cudaMalloc(&dev_ptrs[1], sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * solver_QPC)); gpuAssert(cudaMemcpyToSymbol(d_dNvals, dev_ptrs, sizeof (dev_ptrs))); // FOR ERROR CALCULATION // functions mem_size = sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * error_QPC; total_mem_size += mem_size; gpuAssert(cudaMalloc(&dev_ptrs[0], mem_size)); total_mem_size += mem_size; gpuAssert(cudaMalloc(&dev_ptrs[1], mem_size)); gpuAssert(cudaMemcpyToSymbol(d_Nvals_err, dev_ptrs, sizeof (dev_ptrs))); // derivatives mem_size = sizeof (T) * basis_funs_cnt<degree > (Ne) * (degree + 1) * error_QPC; total_mem_size += mem_size; gpuAssert(cudaMalloc(&dev_ptrs[0], mem_size)); total_mem_size += mem_size; gpuAssert(cudaMalloc(&dev_ptrs[1], mem_size)); gpuAssert(cudaMemcpyToSymbol(d_dNvals_err, dev_ptrs, sizeof (dev_ptrs))); // Allocate space for result gpuAssert(cudaMalloc(&tmp, sizeof (T) * error_QPC * Ne)); gpuAssert(cudaMemset(tmp, 0, sizeof (T) * error_QPC * Ne)); gpuAssert(cudaMemcpyToSymbol(d_fun_vals, &tmp, sizeof (tmp))); gpuAssert(cudaMalloc(&tmp, sizeof (T) * error_QPC * Ne)); gpuAssert(cudaMemset(tmp, 0, sizeof (T) * error_QPC * Ne)); gpuAssert(cudaMemcpyToSymbol(d_der_vals, &tmp, sizeof (tmp))); gpuAssert(cudaMalloc(&tmp, sizeof (T) * error_QPC * Ne)); gpuAssert(cudaMemcpyToSymbol(d_abssicas, &tmp, sizeof (tmp))); cudaThreadSynchronize(); } /** * * @param n number of elements */ template<int degree, class T> void prepare_result(int n, int Nrhs) { int tpb = 256; int block_cnt = n / tpb + (n % tpb ? 1 : 0); calculate_abscissas<degree, T> << <block_cnt, tpb >> >(n); check_error("calculate_abscissas", cudaGetLastError()); calculate_function_in_quadrature_points<degree, T> << <block_cnt, tpb >> >(n, Nrhs); check_error("calculate_function_in_quadrature_points", cudaGetLastError()); calculate_derivative_in_quadrature_points<degree, T> << <block_cnt, tpb >> >(n, Nrhs); check_error("calculate_derivative_in_quadrature_points", cudaGetLastError()); } void cleanup_device() { void *tmp; void *dev_ptrs[2]; gpuAssert(cudaMemcpyFromSymbol(&dev_ptrs, d_B, sizeof (dev_ptrs))); gpuAssert(cudaFree(dev_ptrs[0])); gpuAssert(cudaFree(dev_ptrs[1])); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_knot_vector, sizeof (tmp))); gpuAssert(cudaFree(tmp)); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_RHS, sizeof (tmp))); gpuAssert(cudaFree(tmp)); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_assembled, sizeof (tmp))); gpuAssert(cudaFree(tmp)); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_not_assembled, sizeof (tmp))); gpuAssert(cudaFree(tmp)); // Free matrices for accumulative base function evaluation // functions gpuAssert(cudaMemcpyFromSymbol(dev_ptrs, d_Nvals, sizeof (dev_ptrs))); gpuAssert(cudaFree(dev_ptrs[0])); gpuAssert(cudaFree(dev_ptrs[1])); // derivatives gpuAssert(cudaMemcpyFromSymbol(dev_ptrs, d_dNvals, sizeof (dev_ptrs))); gpuAssert(cudaFree(dev_ptrs[0])); gpuAssert(cudaFree(dev_ptrs[1])); // Free result memory gpuAssert(cudaMemcpyFromSymbol(&tmp, d_fun_vals, sizeof (tmp))); gpuAssert(cudaFree(tmp)); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_der_vals, sizeof (tmp))); gpuAssert(cudaFree(tmp)); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_abssicas, sizeof (tmp))); gpuAssert(cudaFree(tmp)); // FOR ERROR CALCULATION // functions gpuAssert(cudaMemcpyFromSymbol(dev_ptrs, d_Nvals_err, sizeof (dev_ptrs))); gpuAssert(cudaFree(dev_ptrs[0])); gpuAssert(cudaFree(dev_ptrs[1])); // derivatives gpuAssert(cudaMemcpyFromSymbol(dev_ptrs, d_dNvals_err, sizeof (dev_ptrs))); gpuAssert(cudaFree(dev_ptrs[0])); gpuAssert(cudaFree(dev_ptrs[1])); } template<class T, int degree> void init_fronts(int Ne, int Nrhs) { calculate_basis_solver<T, degree>(Ne); check_error("calculate_basis_solver", cudaGetLastError()); calculate_basis_error<T, degree>(Ne); check_error("calculate_basis_error", cudaGetLastError()); int N = Ne / (degree + 1); int threads_per_block = 32; int block_count = div_ceil(N, threads_per_block); for (int part = 0; part <= degree; ++part) { for (int i = 0; i < (degree + 1)*(degree + 1); ++i) { init_B<degree, T><<<block_count, threads_per_block>>>(i, N, part); check_error("B", cudaGetLastError()); } } const int Ndof = basis_funs_cnt<degree>(Ne); int threads_per_block_per_rhs = 8; int rhs_per_block = 16; int blocks_per_rhs = div_ceil(Ndof, threads_per_block_per_rhs); int rhs_blocks = div_ceil(Nrhs, rhs_per_block); dim3 b_grid(rhs_blocks, blocks_per_rhs); dim3 t_grid(rhs_per_block, threads_per_block_per_rhs); init_RHS<degree, T><<<b_grid, t_grid>>>(Ndof, Nrhs); check_error("init_RHS", cudaGetLastError()); cudaThreadSynchronize(); } /** * Calculates offset in global d_assembled for rows being factorized in * provided step * @param n - number of elements * @param step - current step * @return offset */ template<int degree> int assembled_offset_for_step(int Ne, int step) { if (step <= 0) return 0; const int rows_per_merge = degree; int merges_cnt = Ne / (degree + 1); int offset = 0; offset += merges_cnt * (2 * degree + 1); while (--step) { merges_cnt >>= 1; // div 2 offset += (3 * degree) * merges_cnt * rows_per_merge; } return offset; } /** * Calculates offset in global d_not_assembled for columns for not assembled * rows * @param n - number of elements * @param step - current step * @return offset */ template<int degree> int not_assembled_offset_for_step(int Ne, int step) { if (step <= 0) return 0; int merges_cnt = Ne / (degree + 1); int offset = 0; offset += merges_cnt * (2 * degree); while (--step) { merges_cnt >>= 1; // div 2 offset += (2 * degree * degree) * merges_cnt; } return offset; } template<class T, int degree> void launch_matrix_factorization(int ne) { const int tpb = degree >= 4 ? THREADS_PER_BLOCK / 2 : THREADS_PER_BLOCK; int block_grid; int merges_cnt; merges_cnt = merges_cnt_for_step<degree>(ne, 0); block_grid = prepare_block_grid(merges_cnt, tpb); first_merge<degree, T, tpb> <<<block_grid, tpb>>>(merges_cnt); check_error("first merge", cudaGetLastError()); // print_local<T, degree>(ne); int max_step = steps_cnt<degree>(ne); for (int step = 1; step < max_step ; ++step) { merges_cnt = merges_cnt_for_step<degree>(ne, step); block_grid = prepare_block_grid(merges_cnt, tpb); merge<degree, T, tpb><<<block_grid, tpb>>>(merges_cnt, assembled_offset_for_step<degree>(ne, step), not_assembled_offset_for_step<degree>(ne, step), step); check_error("merge", cudaGetLastError()); } last_merge<degree, T><<<1, 1>>>(assembled_offset_for_step<degree>(ne, max_step), max_step); check_error("last merge", cudaGetLastError()); } template<class T, int degree> void launch_forward_substitution(int ne, int Nrhs) { const int tpb = degree >= 4 ? 8 : 16; const int RHSes_per_block = 16; int merges_cnt; merges_cnt = merges_cnt_for_step<degree>(ne, 0); dim3 t_grid(RHSes_per_block, tpb); dim3 b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); // debug_device<T, degree>(ne); first_forward_substitution<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs); check_error("first_forward_substitution", cudaGetLastError()); // debug_device<T, degree>(ne); first_forward_substitution_update_left<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs); check_error("first_forward_substitution_update_left", cudaGetLastError()); // debug_device<T, degree>(ne); first_forward_substitution_update_right<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs); check_error("first_forward_substitution_update_right", cudaGetLastError()); // debug_device<T, degree>(ne); int max_step = steps_cnt<degree>(ne); for (int step = 1; step < max_step ; ++step) { merges_cnt = merges_cnt_for_step<degree>(ne, step); b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); forward_substitution<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs, assembled_offset_for_step<degree>(ne, step), step); check_error("forward_substitution", cudaGetLastError()); // debug_device<T, degree>(ne); forward_substitution_update_left<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs, not_assembled_offset_for_step<degree>(ne, step), step); check_error("forward_substitution_update_left", cudaGetLastError()); // debug_device<T, degree>(ne); forward_substitution_update_right<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs, not_assembled_offset_for_step<degree>(ne, step), step); check_error("forward_substitution_update_right", cudaGetLastError()); // debug_device<T, degree>(ne); } t_grid = dim3(RHSes_per_block, 1); b_grid = calculate_blocks(dim3(Nrhs, 1), t_grid); last_forward_first_backward_substitution<degree, T><<<b_grid, t_grid>>>(ne, Nrhs, assembled_offset_for_step<degree>(ne, max_step)); check_error("last_forward_first_backward_substitution", cudaGetLastError()); } template<class T, int degree> void launch_backward_substitution(int ne, int Nrhs) { const int tpb = degree >= 4 ? 8 : 16; const int RHSes_per_block = 16; int merges_cnt; dim3 t_grid(RHSes_per_block, tpb); dim3 b_grid; int max_step = steps_cnt<degree>(ne); for (int step = max_step - 1; step > 0 ; --step) { merges_cnt = merges_cnt_for_step<degree>(ne, step); b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); backward_substitution<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs, assembled_offset_for_step<degree>(ne, step), step); check_error("backward_substitution", cudaGetLastError()); } merges_cnt = merges_cnt_for_step<degree>(ne, 0); b_grid = calculate_blocks(dim3(Nrhs, merges_cnt), t_grid); last_backward_substitution<degree, T, RHSes_per_block><<<b_grid, t_grid>>>(merges_cnt, Nrhs); check_error("last_backward_substitution", cudaGetLastError()); } template<class T, int degree> void factorize_matrix(int Ne) { // debug_device<T, degree>(ne); launch_matrix_factorization<T, degree>(Ne); cudaThreadSynchronize(); } template<class T, int degree> void solve_equation(int Ne, int Nrhs) { launch_forward_substitution<T, degree>(Ne, Nrhs); // debug_device<T, degree>(ne); launch_backward_substitution<T, degree>(Ne, Nrhs); // debug_device<T, degree>(ne); prepare_result<degree, T>(Ne, Nrhs); cudaThreadSynchronize(); } template<class T, int degree> void print_result(int n, std::ostream &ostr) { T *x, *y, *tmp; const int QPC = degree + 2; x = new T[n * QPC]; y = new T[n * QPC]; gpuAssert(cudaMemcpyFromSymbol(&tmp, d_abssicas, sizeof (tmp))); gpuAssert(cudaMemcpy(x, tmp, sizeof (T) * QPC * n, cudaMemcpyDeviceToHost)); gpuAssert(cudaMemcpyFromSymbol(&tmp, d_fun_vals, sizeof (tmp))); gpuAssert(cudaMemcpy(y, tmp, sizeof (T) * QPC * n, cudaMemcpyDeviceToHost)); for (int i = 0; i < n * QPC; ++i) ostr << x[i] << ' ' << y[i] << '\n'; delete []x; delete []y; // debug_device<T, degree>(n); } template<class T, int degree> T calculate_error(int N) { T *tmp; T *result = new T[N]; gpuAssert(cudaMalloc(&tmp, sizeof (T) * N)); int tpb = 256; int block_cnt = N / tpb + (N % tpb ? 1 : 0); calculate_norm<degree, T, degree + 2><<<block_cnt, tpb>>>(N, tmp); check_error("calculate_norm", cudaGetLastError()); gpuAssert(cudaMemcpy(result, tmp, sizeof (T) * N, cudaMemcpyDeviceToHost)); T sum = 0; for (int i = 0; i < N; ++i) { sum += result[i]; } delete []result; gpuAssert(cudaFree(tmp)); return sqrt(sum); } template<class T, int degree> void print_local(int n) { T *bb; void *tmp[2]; int merges_cnt = merges_cnt_for_step<degree > (n, 1); int size = merges_cnt * (2 * degree + 1) * (2 * degree + 1); bb = new T[size]; gpuAssert(cudaMemcpyFromSymbol(tmp, d_B, sizeof (tmp))); gpuAssert(cudaMemcpy(bb, tmp[0], sizeof (T) * size, cudaMemcpyDeviceToHost)); for (int i = 0; i < merges_cnt; ++i) { for (int x = 0; x < 2 * degree + 1; ++x) { for (int y = 0; y < 2 * degree + 1; ++y) { std::cout << bb[(x * 2 * degree + y) * merges_cnt + i] << " "; } std::cout << "\n"; } std::cout << "\n"; } delete[] bb; // bb = new T[basis_funs_cnt<degree>(n)]; // gpuAssert(cudaMemcpyFromSymbol(&tmp, d_RHS, sizeof (tmp))); // gpuAssert(cudaMemcpy(bb, tmp, sizeof (T) * basis_funs_cnt<degree>(n), cudaMemcpyDeviceToHost)); // // // for (int i = 0 ; i < basis_funs_cnt<degree>(n) ; ++i) // std::cout << bb[i] << "\n"; // // std::cout << "------------------------\n------------------------\n"; // // // delete[] bb; } template<class T, int degree> void debug_device(int n) { // T *bb, *tmp; // int QPC = degree + 2; // bb = new T[(n + degree) * (degree + 1) * QPC]; // T * dev_ptrs[2]; // gpuAssert(cudaMemcpyFromSymbol(dev_ptrs, d_Nvals_err, sizeof (dev_ptrs))); // gpuAssert(cudaMemcpy(bb, dev_ptrs[degree & 1], // sizeof (T) * basis_funs_cnt<degree > (n) * (degree + 1) * QPC, // cudaMemcpyDeviceToHost)); // // std::cout << "------------------------\n------------------------\n"; // for (int i = 0; i < basis_funs_cnt<degree > (n) * (degree + 1) * QPC; ++i) // { // std::cout << bb[i] << '\n'; // } // delete [] bb; // // gpuAssert(cudaMemcpy(bb, dev_ptrs[1], sizeof (T) * (n + degree) * (degree + 1) * QPC, cudaMemcpyDeviceToHost)); // // std::cout << "------------------------\n------------------------\n"; // for (int i = 0; i < (n + degree) * (degree + 1) * QPC; ++i) // { // std::cout << bb[i] << '\n'; // } // // delete [] bb; // int Ndof = basis_funs_cnt<degree>(n); // bb = new T[RHSC * Ndof]; // gpuAssert(cudaMemcpyFromSymbol(&tmp, d_RHS, sizeof (tmp))); // gpuAssert(cudaMemcpy(bb, tmp, sizeof (T) * Ndof * RHSC, cudaMemcpyDeviceToHost)); // // for (int i=0 ; i<Ndof ; ++i) // { // for (int r = 0 ; r < RHSC ; ++r) // { // std::cerr << bb[i*RHSC + r] << "\t\t"; // } // std::cerr << "\n"; // } // // delete[] bb; } typedef void (*device_fun_i)(int); typedef void (*device_fun_ii)(int, int); // <editor-fold defaultstate="collapsed" desc="interface functions (float)"> // //template<> //void //CUDA_prepare_device<float>(int degree, int n) //{ // static device_fun prepares[] = { // prepare_device<float, 1 >, // prepare_device<float, 2 >, // prepare_device<float, 3 >, // prepare_device<float, 4 >, // prepare_device<float, 5 > // }; // // prepares[degree - 1](n); //} // //template<> //void //CUDA_init_fronts<float>(int degree, int n) //{ // static device_fun initializers[] = { // init_fronts<float, 1 >, // init_fronts<float, 2 >, // init_fronts<float, 3 >, // init_fronts<float, 4 >, // init_fronts<float, 5 > // }; // // initializers[degree - 1](n); //} // //template<> //void //CUDA_solve<float>(int degree, int n) //{ // static device_fun solvers[] = { // solve_equation<float, 1 >, // solve_equation<float, 2 >, // solve_equation<float, 3 >, // solve_equation<float, 4 >, // solve_equation<float, 5 > // }; // // solvers[degree - 1](n); //} // //template<> //float //CUDA_error<float>(int degree, int n) //{ // typedef float (*error_fun)(int); // static error_fun calculators[] = { // calculate_error<float, 1 >, // calculate_error<float, 2 >, // calculate_error<float, 3 >, // calculate_error<float, 4 >, // calculate_error<float, 5 > // }; // // return calculators[degree - 1](n); //} // //template<> //void //CUDA_print_result<float>(int degree, int n, std::ostream &ostr) //{ // typedef void (*print_fun)(int, std::ostream &); // static print_fun printers[] = { // print_result<float, 1 >, // print_result<float, 2 >, // print_result<float, 3 >, // print_result<float, 4 >, // print_result<float, 5 > // }; // // printers[degree - 1](n, ostr); //} // //template<> //void //CUDA_debug<float>(int degree, int n) //{ // typedef void (*print_fun)(int); // static print_fun debuggers[] = { // debug_device<float, 1 >, // debug_device<float, 2 >, // debug_device<float, 3 >, // debug_device<float, 4 >, // debug_device<float, 5 > // }; // // debuggers[degree - 1](n); //} // </editor-fold> // <editor-fold defaultstate="collapsed" desc="interface functions (double)"> template<> void CUDA_prepare_device<double>(int degree, int n, int rhs_cnt) { static device_fun_ii prepares[] = { prepare_device<double, 1 >, prepare_device<double, 2 >, prepare_device<double, 3 >, prepare_device<double, 4 >, prepare_device<double, 5 > }; prepares[degree - 1](n, rhs_cnt); } template<> void CUDA_init_fronts<double>(int degree, int n, int rhs_cnt) { static device_fun_ii initializers[] = { init_fronts<double, 1 >, init_fronts<double, 2 >, init_fronts<double, 3 >, init_fronts<double, 4 >, init_fronts<double, 5 > }; initializers[degree - 1](n, rhs_cnt); } template<> void CUDA_factorize_matrix<double>(int degree, int n) { static device_fun_i factorize[] = { factorize_matrix<double, 1 >, factorize_matrix<double, 2 >, factorize_matrix<double, 3 >, factorize_matrix<double, 4 >, factorize_matrix<double, 5 > }; factorize[degree - 1](n); } template<> void CUDA_solve<double>(int degree, int n, int rhs_cnt) { static device_fun_ii solvers[] = { solve_equation<double, 1 >, solve_equation<double, 2 >, solve_equation<double, 3 >, solve_equation<double, 4 >, solve_equation<double, 5 > }; solvers[degree - 1](n, rhs_cnt); } template<> double CUDA_error<double>(int degree, int n) { typedef double (*error_fun)(int); static error_fun calculators[] = { calculate_error<double, 1 >, calculate_error<double, 2 >, calculate_error<double, 3 >, calculate_error<double, 4 >, calculate_error<double, 5 > }; return calculators[degree - 1](n); } template<> void CUDA_print_result<double>(int degree, int n, std::ostream &ostr) { typedef void (*print_fun)(int, std::ostream &); static print_fun printers[] = { print_result<double, 1 >, print_result<double, 2 >, print_result<double, 3 >, print_result<double, 4 >, print_result<double, 5 > }; printers[degree - 1](n, ostr); } template<> void CUDA_debug<double>(int degree, int n) { typedef void (*print_fun)(int); static print_fun debuggers[] = { debug_device<double, 1 >, debug_device<double, 2 >, debug_device<double, 3 >, debug_device<double, 4 >, debug_device<double, 5 > }; debuggers[degree - 1](n); } // </editor-fold>
8ae3d68c951ebf12916a89a51f3a74657666ba65.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Felipe Aramburu <felipe@blazingdb.com> * Copyright 2018 Alexander Ocsa <alexander@blazingdb.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include <hip/hip_runtime.h> #include <vector> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/remove.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include <thrust/iterator/iterator_adaptor.h> #include <thrust/iterator/transform_iterator.h> //std lib #include <map> //wow the freaking example from iterator_adaptpr, what a break right! template<typename Iterator> class repeat_iterator : public thrust::iterator_adaptor< repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating Iterator // the second template parameter is the name of the iterator we're adapting // we can use the default for the additional template parameters > { public: // shorthand for the name of the iterator_adaptor we're deriving from typedef thrust::iterator_adaptor< repeat_iterator<Iterator>, Iterator > super_t; __host__ __device__ repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {} // befriend thrust::iterator_core_access to allow it access to the private interface below friend class thrust::iterator_core_access; private: // repeat each element of the adapted range n times unsigned int n; // used to keep track of where we began const Iterator begin; // it is private because only thrust::iterator_core_access needs access to it __host__ __device__ typename super_t::reference dereference() const { return *(begin + (this->base() - begin) / n); } }; typedef repeat_iterator<thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > > gdf_valid_iterator; size_t get_number_of_bytes_for_valid (size_t column_size) { return sizeof(gdf_valid_type) * (column_size + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE; } // note: functor inherits from unary_function struct modulus_bit_width : public thrust::unary_function<gdf_size_type,gdf_size_type> { size_t n_bytes; size_t column_size; modulus_bit_width (size_t b_nytes, size_t column_size) { this->n_bytes = n_bytes; this->column_size = column_size; } __host__ __device__ gdf_size_type operator()(gdf_size_type x) const { int col_position = x / 8; int length_col = n_bytes != col_position+1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1); //return x % GDF_VALID_BITSIZE; return (length_col - 1) - (x % 8); // x << } }; struct shift_left: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; shift_left(gdf_valid_type num_bits): num_bits(num_bits){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { return x << num_bits; } }; struct shift_right: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; bool not_too_many; shift_right(gdf_valid_type num_bits, bool not_too_many) : num_bits(num_bits), not_too_many(not_too_many){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { //if you want to force the shift to be fill bits with 0 you need to use an unsigned type /*if (not_too_many) { // is the last return x; }*/ return *((unsigned char *) &x) >> num_bits; } }; struct bit_or: public thrust::unary_function<thrust::tuple<gdf_valid_type,gdf_valid_type>,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(thrust::tuple<gdf_valid_type,gdf_valid_type> x) const { return thrust::get<0>(x) | thrust::get<1>(x); } }; typedef thrust::transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> > bit_position_iterator; template<typename stencil_type> struct is_stencil_true { __host__ __device__ bool operator()(const thrust::tuple<stencil_type, gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<2>(value); return ((thrust::get<1>(value) >> position) & 1) && (thrust::get<0>(value) != 0); } }; struct is_bit_set { __host__ __device__ bool operator()(const thrust::tuple< gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<1>(value); return ((thrust::get<0>(value) >> position) & 1); } }; struct bit_mask_pack_op : public thrust::unary_function<int64_t,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(const int64_t expanded) { gdf_valid_type result = 0; for(int i = 0; i < GDF_VALID_BITSIZE; i++){ // 0, 8, 16, ....,48, 56 unsigned char byte = (expanded >> ( (GDF_VALID_BITSIZE - 1 - i ) * 8)); result |= (byte & 1) << i; } return (result); } }; std::map<gdf_dtype, int16_t> column_type_width = {{GDF_INT8, sizeof(int8_t)}, {GDF_INT16, sizeof(int16_t)},{GDF_INT32, sizeof(int32_t)}, {GDF_INT64, sizeof(int64_t)}, {GDF_FLOAT32, sizeof(float)}, {GDF_FLOAT64, sizeof(double)} }; //because applying a stencil only needs to know the WIDTH of a type for copying to output, we won't be making a bunch of templated version to store this but rather //storing a map from gdf_type to width //TODO: add a way for the space where we store temp bitmaps for compaction be allocated //on the outside gdf_error gpu_apply_stencil(gdf_column *lhs, gdf_column * stencil, gdf_column * output){ //OK: add a rquire here that output and lhs are the same size GDF_REQUIRE(output->size == lhs->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(lhs->dtype == output->dtype, GDF_VALIDITY_MISSING); //find the width in bytes of this data type auto searched_item = column_type_width.find(lhs->dtype); int16_t width = searched_item->second; //width in bytes searched_item = column_type_width.find(stencil->dtype); int16_t stencil_width= searched_item->second; //width in bytes hipStream_t stream; hipStreamCreate(&stream); size_t n_bytes = get_number_of_bytes_for_valid(stencil->size); bit_position_iterator bit_position_iter(thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)); gdf_valid_iterator valid_iterator(thrust::detail::make_normal_iterator(thrust::device_pointer_cast(stencil->valid)),GDF_VALID_BITSIZE); //TODO: can probably make this happen with some kind of iterator so it can work on any width size //zip the stencil and the valid iterator together typedef thrust::tuple<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >,gdf_valid_iterator, bit_position_iterator > zipped_stencil_tuple; typedef thrust::zip_iterator<zipped_stencil_tuple> zipped_stencil_iterator; //what kind of shit is that you might wonder? //well basically we are zipping up an iterator to the stencil, one to the bit masks, and one which lets us get the bit position based on our index zipped_stencil_iterator zipped_stencil_iter( thrust::make_tuple( thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t * )stencil->data)), valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) )); //NOTE!!!! the output column is getting set to a specific size but we are NOT compacting the allocation, //whoever calls that should handle that if(width == 1){ thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_end = thrust::copy_if(thrust::hip::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 2){ thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_end = thrust::copy_if(thrust::hip::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 4){ thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_end = thrust::copy_if(thrust::hip::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 8){ thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_end = thrust::copy_if(thrust::hip::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; } gdf_size_type num_values = lhs->size; //TODO:BRING OVER THE BITMASK!!! //need to store a prefix sum //align to size 8 thrust::device_vector<gdf_valid_type> valid_bit_mask; //we are expanding the bit mask to an int8 because I can't envision an algorithm that operates on the bitmask that if(num_values % GDF_VALID_BITSIZE != 0){ valid_bit_mask.resize(num_values + (GDF_VALID_BITSIZE - (num_values % GDF_VALID_BITSIZE))); //align this allocation on GDF_VALID_BITSIZE so we don't have to bounds check }else{ valid_bit_mask.resize(num_values); } // doesn't require the use for a prefix sum which will have size 8 * num rows which is much larger than this typedef thrust::tuple<gdf_valid_iterator, bit_position_iterator > mask_tuple; typedef thrust::zip_iterator<mask_tuple> zipped_mask; zipped_mask zipped_mask_iter( thrust::make_tuple( valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) ) ); typedef thrust::transform_iterator<is_bit_set, zipped_mask > bit_set_iterator; bit_set_iterator bit_set_iter = thrust::make_transform_iterator<is_bit_set,zipped_mask>( zipped_mask_iter, is_bit_set() ); //copy the bitmask to device_vector of int8 thrust::copy(thrust::hip::par.on(stream), bit_set_iter, bit_set_iter + num_values, valid_bit_mask.begin()); //remove the values that don't pass the stencil thrust::remove_if(thrust::hip::par.on(stream),valid_bit_mask.begin(), valid_bit_mask.begin() + num_values,zipped_stencil_iter, is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); //recompact the values and store them in the output bitmask //we can group them into pieces of 8 because we aligned this earlier on when we made the device_vector thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > valid_bit_mask_group_8_iter = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) valid_bit_mask.data().get())); //you may notice that we can write out more bytes than our valid_num_bytes, this only happens when we are not aligned to GDF_VALID_BITSIZE bytes, becasue the //arrow standard requires 64 byte alignment, this is a safe assumption to make thrust::transform(thrust::hip::par.on(stream), valid_bit_mask_group_8_iter, valid_bit_mask_group_8_iter + ((num_values + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE), thrust::detail::make_normal_iterator(thrust::device_pointer_cast(output->valid)),bit_mask_pack_op()); hipStreamSynchronize(stream); hipStreamDestroy(stream); return GDF_SUCCESS; } size_t get_last_byte_length(size_t column_size) { size_t n_bytes = get_number_of_bytes_for_valid(column_size); size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1); if (n_bytes == 1 ) { length = column_size; } return length; } size_t get_right_byte_length(size_t column_size, size_t iter, size_t left_length) { size_t n_bytes = get_number_of_bytes_for_valid(column_size); size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1); if (iter == n_bytes - 1) { // the last one if (left_length + length > GDF_VALID_BITSIZE) { length = GDF_VALID_BITSIZE - left_length; } } else { length = GDF_VALID_BITSIZE - left_length; } return length; } bool last_with_too_many_bits(size_t column_size, size_t iter, size_t left_length) { size_t n_bytes = get_number_of_bytes_for_valid(column_size); size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1); if (iter == n_bytes) { // the last one // the last one has to many bits if (left_length + length > GDF_VALID_BITSIZE) { return true; } } return false; } gdf_valid_type concat_bins (gdf_valid_type A, gdf_valid_type B, int len_a, int len_b, bool has_next, size_t right_length){ A = A << len_b; if (!has_next) { B = B << len_a; B = B >> len_a; } else { B = B >> right_length - len_b; } return (A | B); } gdf_error gpu_concat(gdf_column *lhs, gdf_column *rhs, gdf_column *output) { GDF_REQUIRE( (lhs->dtype == output->dtype ) && ( rhs->dtype == output->dtype), GDF_VALIDITY_MISSING); GDF_REQUIRE(output->size == lhs->size + rhs->size, GDF_COLUMN_SIZE_MISMATCH); hipStream_t stream; hipStreamCreate(&stream); int type_width = column_type_width[ lhs->dtype ]; hipMemcpyAsync(output->data, lhs->data, type_width * lhs->size, hipMemcpyDeviceToDevice, stream); hipMemcpyAsync( (void *)( (int8_t*) (output->data) + type_width * lhs->size), rhs->data, type_width * rhs->size, hipMemcpyDeviceToDevice, stream); int left_num_chars = get_number_of_bytes_for_valid(lhs->size); int right_num_chars = get_number_of_bytes_for_valid(rhs->size); int output_num_chars = get_number_of_bytes_for_valid(output->size); thrust::device_ptr<gdf_valid_type> left_device_bits = thrust::device_pointer_cast((gdf_valid_type *)lhs->valid); thrust::device_ptr<gdf_valid_type> right_device_bits = thrust::device_pointer_cast((gdf_valid_type *)rhs->valid); thrust::device_ptr<gdf_valid_type> output_device_bits = thrust::device_pointer_cast((gdf_valid_type *)output->valid); thrust::copy(left_device_bits, left_device_bits + left_num_chars, output_device_bits); gdf_valid_type shift_bits = (GDF_VALID_BITSIZE - (lhs->size % GDF_VALID_BITSIZE)); if(shift_bits == 8){ shift_bits = 0; } if (right_num_chars > 0) { size_t prev_len = get_last_byte_length(lhs->size); // copy all the rnbytes bytes from right column if (shift_bits == 0) { thrust::copy(right_device_bits, right_device_bits + right_num_chars, output_device_bits + left_num_chars); } else { thrust::host_vector<gdf_valid_type> last_byte (2); thrust::copy (left_device_bits + left_num_chars - 1, left_device_bits + left_num_chars, last_byte.begin()); thrust::copy (right_device_bits, right_device_bits + 1, last_byte.begin() + 1); size_t curr_len = get_right_byte_length(rhs->size, 0, prev_len); if (1 != right_num_chars) { last_byte[1] = last_byte[1] >> prev_len; } auto flag = last_with_too_many_bits(rhs->size, 0 + 1, prev_len); size_t last_right_byte_length = rhs->size - GDF_VALID_BITSIZE * (right_num_chars - 1); last_byte[0] = concat_bins(last_byte[0], last_byte[1], prev_len, curr_len, flag, last_right_byte_length); thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + left_num_chars - 1); if(right_num_chars > 1) { using first_iterator_type = thrust::transform_iterator<shift_left,thrust::device_vector<gdf_valid_type>::iterator>; using second_iterator_type = thrust::transform_iterator<shift_right,thrust::device_vector<gdf_valid_type>::iterator>; using offset_tuple = thrust::tuple<first_iterator_type, second_iterator_type>; using zipped_offset = thrust::zip_iterator<offset_tuple>; auto too_many_bits = last_with_too_many_bits(rhs->size, right_num_chars, prev_len); size_t last_byte_length = get_last_byte_length(rhs->size); if (last_byte_length >= (GDF_VALID_BITSIZE - shift_bits)) { // thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars); last_byte[0] = last_byte[0] << GDF_VALID_BITSIZE - last_byte_length; thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1); } zipped_offset zipped_offset_iter( thrust::make_tuple( thrust::make_transform_iterator<shift_left, thrust::device_vector<gdf_valid_type>::iterator >( right_device_bits, shift_left(shift_bits)), thrust::make_transform_iterator<shift_right, thrust::device_vector<gdf_valid_type>::iterator >( right_device_bits + 1, shift_right(GDF_VALID_BITSIZE - shift_bits, !too_many_bits)) ) ); //so what this does is give you an iterator which gives you a tuple where you have your char, and the char after you, so you can get the last bits! using transformed_or = thrust::transform_iterator<bit_or, zipped_offset>; //now we want to make a transform iterator that ands these values together transformed_or ored_offset_iter = thrust::make_transform_iterator<bit_or,zipped_offset> ( zipped_offset_iter, bit_or() ); //because one of the iterators is + 1 we dont want to read the last char here since it could be past the end of our allocation thrust::copy( ored_offset_iter, ored_offset_iter + right_num_chars - 1, output_device_bits + left_num_chars); thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars); last_byte[0] = last_byte[0] >> GDF_VALID_BITSIZE - last_byte_length; thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1); if ( !too_many_bits ) { thrust::host_vector<gdf_valid_type> last_byte (2); thrust::copy (right_device_bits + right_num_chars - 2, right_device_bits + right_num_chars - 1, last_byte.begin()); thrust::copy (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars, last_byte.begin() + 1); last_byte[0] = last_byte[0] << last_byte_length | last_byte[1]; thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1); } } } if( last_with_too_many_bits(rhs->size, right_num_chars, prev_len)){ thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars); size_t prev_len = get_last_byte_length(lhs->size); size_t curr_len = get_right_byte_length(rhs->size, right_num_chars - 1, prev_len); last_byte[0] = last_byte[0] << curr_len; last_byte[0] = last_byte[0] >> curr_len; thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1); } } hipStreamSynchronize(stream); hipStreamDestroy(stream); return GDF_SUCCESS; }
8ae3d68c951ebf12916a89a51f3a74657666ba65.cu
/* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Felipe Aramburu <felipe@blazingdb.com> * Copyright 2018 Alexander Ocsa <alexander@blazingdb.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include <cuda_runtime.h> #include <vector> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/remove.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include <thrust/iterator/iterator_adaptor.h> #include <thrust/iterator/transform_iterator.h> //std lib #include <map> //wow the freaking example from iterator_adaptpr, what a break right! template<typename Iterator> class repeat_iterator : public thrust::iterator_adaptor< repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating Iterator // the second template parameter is the name of the iterator we're adapting // we can use the default for the additional template parameters > { public: // shorthand for the name of the iterator_adaptor we're deriving from typedef thrust::iterator_adaptor< repeat_iterator<Iterator>, Iterator > super_t; __host__ __device__ repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {} // befriend thrust::iterator_core_access to allow it access to the private interface below friend class thrust::iterator_core_access; private: // repeat each element of the adapted range n times unsigned int n; // used to keep track of where we began const Iterator begin; // it is private because only thrust::iterator_core_access needs access to it __host__ __device__ typename super_t::reference dereference() const { return *(begin + (this->base() - begin) / n); } }; typedef repeat_iterator<thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > > gdf_valid_iterator; size_t get_number_of_bytes_for_valid (size_t column_size) { return sizeof(gdf_valid_type) * (column_size + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE; } // note: functor inherits from unary_function struct modulus_bit_width : public thrust::unary_function<gdf_size_type,gdf_size_type> { size_t n_bytes; size_t column_size; modulus_bit_width (size_t b_nytes, size_t column_size) { this->n_bytes = n_bytes; this->column_size = column_size; } __host__ __device__ gdf_size_type operator()(gdf_size_type x) const { int col_position = x / 8; int length_col = n_bytes != col_position+1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1); //return x % GDF_VALID_BITSIZE; return (length_col - 1) - (x % 8); // x << } }; struct shift_left: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; shift_left(gdf_valid_type num_bits): num_bits(num_bits){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { return x << num_bits; } }; struct shift_right: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; bool not_too_many; shift_right(gdf_valid_type num_bits, bool not_too_many) : num_bits(num_bits), not_too_many(not_too_many){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { //if you want to force the shift to be fill bits with 0 you need to use an unsigned type /*if (not_too_many) { // is the last return x; }*/ return *((unsigned char *) &x) >> num_bits; } }; struct bit_or: public thrust::unary_function<thrust::tuple<gdf_valid_type,gdf_valid_type>,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(thrust::tuple<gdf_valid_type,gdf_valid_type> x) const { return thrust::get<0>(x) | thrust::get<1>(x); } }; typedef thrust::transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> > bit_position_iterator; template<typename stencil_type> struct is_stencil_true { __host__ __device__ bool operator()(const thrust::tuple<stencil_type, gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<2>(value); return ((thrust::get<1>(value) >> position) & 1) && (thrust::get<0>(value) != 0); } }; struct is_bit_set { __host__ __device__ bool operator()(const thrust::tuple< gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<1>(value); return ((thrust::get<0>(value) >> position) & 1); } }; struct bit_mask_pack_op : public thrust::unary_function<int64_t,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(const int64_t expanded) { gdf_valid_type result = 0; for(int i = 0; i < GDF_VALID_BITSIZE; i++){ // 0, 8, 16, ....,48, 56 unsigned char byte = (expanded >> ( (GDF_VALID_BITSIZE - 1 - i ) * 8)); result |= (byte & 1) << i; } return (result); } }; std::map<gdf_dtype, int16_t> column_type_width = {{GDF_INT8, sizeof(int8_t)}, {GDF_INT16, sizeof(int16_t)},{GDF_INT32, sizeof(int32_t)}, {GDF_INT64, sizeof(int64_t)}, {GDF_FLOAT32, sizeof(float)}, {GDF_FLOAT64, sizeof(double)} }; //because applying a stencil only needs to know the WIDTH of a type for copying to output, we won't be making a bunch of templated version to store this but rather //storing a map from gdf_type to width //TODO: add a way for the space where we store temp bitmaps for compaction be allocated //on the outside gdf_error gpu_apply_stencil(gdf_column *lhs, gdf_column * stencil, gdf_column * output){ //OK: add a rquire here that output and lhs are the same size GDF_REQUIRE(output->size == lhs->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(lhs->dtype == output->dtype, GDF_VALIDITY_MISSING); //find the width in bytes of this data type auto searched_item = column_type_width.find(lhs->dtype); int16_t width = searched_item->second; //width in bytes searched_item = column_type_width.find(stencil->dtype); int16_t stencil_width= searched_item->second; //width in bytes cudaStream_t stream; cudaStreamCreate(&stream); size_t n_bytes = get_number_of_bytes_for_valid(stencil->size); bit_position_iterator bit_position_iter(thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)); gdf_valid_iterator valid_iterator(thrust::detail::make_normal_iterator(thrust::device_pointer_cast(stencil->valid)),GDF_VALID_BITSIZE); //TODO: can probably make this happen with some kind of iterator so it can work on any width size //zip the stencil and the valid iterator together typedef thrust::tuple<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >,gdf_valid_iterator, bit_position_iterator > zipped_stencil_tuple; typedef thrust::zip_iterator<zipped_stencil_tuple> zipped_stencil_iterator; //what kind of shit is that you might wonder? //well basically we are zipping up an iterator to the stencil, one to the bit masks, and one which lets us get the bit position based on our index zipped_stencil_iterator zipped_stencil_iter( thrust::make_tuple( thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t * )stencil->data)), valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) )); //NOTE!!!! the output column is getting set to a specific size but we are NOT compacting the allocation, //whoever calls that should handle that if(width == 1){ thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_end = thrust::copy_if(thrust::cuda::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 2){ thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_end = thrust::copy_if(thrust::cuda::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 4){ thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_end = thrust::copy_if(thrust::cuda::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 8){ thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_end = thrust::copy_if(thrust::cuda::par.on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; } gdf_size_type num_values = lhs->size; //TODO:BRING OVER THE BITMASK!!! //need to store a prefix sum //align to size 8 thrust::device_vector<gdf_valid_type> valid_bit_mask; //we are expanding the bit mask to an int8 because I can't envision an algorithm that operates on the bitmask that if(num_values % GDF_VALID_BITSIZE != 0){ valid_bit_mask.resize(num_values + (GDF_VALID_BITSIZE - (num_values % GDF_VALID_BITSIZE))); //align this allocation on GDF_VALID_BITSIZE so we don't have to bounds check }else{ valid_bit_mask.resize(num_values); } // doesn't require the use for a prefix sum which will have size 8 * num rows which is much larger than this typedef thrust::tuple<gdf_valid_iterator, bit_position_iterator > mask_tuple; typedef thrust::zip_iterator<mask_tuple> zipped_mask; zipped_mask zipped_mask_iter( thrust::make_tuple( valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) ) ); typedef thrust::transform_iterator<is_bit_set, zipped_mask > bit_set_iterator; bit_set_iterator bit_set_iter = thrust::make_transform_iterator<is_bit_set,zipped_mask>( zipped_mask_iter, is_bit_set() ); //copy the bitmask to device_vector of int8 thrust::copy(thrust::cuda::par.on(stream), bit_set_iter, bit_set_iter + num_values, valid_bit_mask.begin()); //remove the values that don't pass the stencil thrust::remove_if(thrust::cuda::par.on(stream),valid_bit_mask.begin(), valid_bit_mask.begin() + num_values,zipped_stencil_iter, is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); //recompact the values and store them in the output bitmask //we can group them into pieces of 8 because we aligned this earlier on when we made the device_vector thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > valid_bit_mask_group_8_iter = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) valid_bit_mask.data().get())); //you may notice that we can write out more bytes than our valid_num_bytes, this only happens when we are not aligned to GDF_VALID_BITSIZE bytes, becasue the //arrow standard requires 64 byte alignment, this is a safe assumption to make thrust::transform(thrust::cuda::par.on(stream), valid_bit_mask_group_8_iter, valid_bit_mask_group_8_iter + ((num_values + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE), thrust::detail::make_normal_iterator(thrust::device_pointer_cast(output->valid)),bit_mask_pack_op()); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); return GDF_SUCCESS; } size_t get_last_byte_length(size_t column_size) { size_t n_bytes = get_number_of_bytes_for_valid(column_size); size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1); if (n_bytes == 1 ) { length = column_size; } return length; } size_t get_right_byte_length(size_t column_size, size_t iter, size_t left_length) { size_t n_bytes = get_number_of_bytes_for_valid(column_size); size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1); if (iter == n_bytes - 1) { // the last one if (left_length + length > GDF_VALID_BITSIZE) { length = GDF_VALID_BITSIZE - left_length; } } else { length = GDF_VALID_BITSIZE - left_length; } return length; } bool last_with_too_many_bits(size_t column_size, size_t iter, size_t left_length) { size_t n_bytes = get_number_of_bytes_for_valid(column_size); size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1); if (iter == n_bytes) { // the last one // the last one has to many bits if (left_length + length > GDF_VALID_BITSIZE) { return true; } } return false; } gdf_valid_type concat_bins (gdf_valid_type A, gdf_valid_type B, int len_a, int len_b, bool has_next, size_t right_length){ A = A << len_b; if (!has_next) { B = B << len_a; B = B >> len_a; } else { B = B >> right_length - len_b; } return (A | B); } gdf_error gpu_concat(gdf_column *lhs, gdf_column *rhs, gdf_column *output) { GDF_REQUIRE( (lhs->dtype == output->dtype ) && ( rhs->dtype == output->dtype), GDF_VALIDITY_MISSING); GDF_REQUIRE(output->size == lhs->size + rhs->size, GDF_COLUMN_SIZE_MISMATCH); cudaStream_t stream; cudaStreamCreate(&stream); int type_width = column_type_width[ lhs->dtype ]; cudaMemcpyAsync(output->data, lhs->data, type_width * lhs->size, cudaMemcpyDeviceToDevice, stream); cudaMemcpyAsync( (void *)( (int8_t*) (output->data) + type_width * lhs->size), rhs->data, type_width * rhs->size, cudaMemcpyDeviceToDevice, stream); int left_num_chars = get_number_of_bytes_for_valid(lhs->size); int right_num_chars = get_number_of_bytes_for_valid(rhs->size); int output_num_chars = get_number_of_bytes_for_valid(output->size); thrust::device_ptr<gdf_valid_type> left_device_bits = thrust::device_pointer_cast((gdf_valid_type *)lhs->valid); thrust::device_ptr<gdf_valid_type> right_device_bits = thrust::device_pointer_cast((gdf_valid_type *)rhs->valid); thrust::device_ptr<gdf_valid_type> output_device_bits = thrust::device_pointer_cast((gdf_valid_type *)output->valid); thrust::copy(left_device_bits, left_device_bits + left_num_chars, output_device_bits); gdf_valid_type shift_bits = (GDF_VALID_BITSIZE - (lhs->size % GDF_VALID_BITSIZE)); if(shift_bits == 8){ shift_bits = 0; } if (right_num_chars > 0) { size_t prev_len = get_last_byte_length(lhs->size); // copy all the rnbytes bytes from right column if (shift_bits == 0) { thrust::copy(right_device_bits, right_device_bits + right_num_chars, output_device_bits + left_num_chars); } else { thrust::host_vector<gdf_valid_type> last_byte (2); thrust::copy (left_device_bits + left_num_chars - 1, left_device_bits + left_num_chars, last_byte.begin()); thrust::copy (right_device_bits, right_device_bits + 1, last_byte.begin() + 1); size_t curr_len = get_right_byte_length(rhs->size, 0, prev_len); if (1 != right_num_chars) { last_byte[1] = last_byte[1] >> prev_len; } auto flag = last_with_too_many_bits(rhs->size, 0 + 1, prev_len); size_t last_right_byte_length = rhs->size - GDF_VALID_BITSIZE * (right_num_chars - 1); last_byte[0] = concat_bins(last_byte[0], last_byte[1], prev_len, curr_len, flag, last_right_byte_length); thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + left_num_chars - 1); if(right_num_chars > 1) { using first_iterator_type = thrust::transform_iterator<shift_left,thrust::device_vector<gdf_valid_type>::iterator>; using second_iterator_type = thrust::transform_iterator<shift_right,thrust::device_vector<gdf_valid_type>::iterator>; using offset_tuple = thrust::tuple<first_iterator_type, second_iterator_type>; using zipped_offset = thrust::zip_iterator<offset_tuple>; auto too_many_bits = last_with_too_many_bits(rhs->size, right_num_chars, prev_len); size_t last_byte_length = get_last_byte_length(rhs->size); if (last_byte_length >= (GDF_VALID_BITSIZE - shift_bits)) { // thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars); last_byte[0] = last_byte[0] << GDF_VALID_BITSIZE - last_byte_length; thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1); } zipped_offset zipped_offset_iter( thrust::make_tuple( thrust::make_transform_iterator<shift_left, thrust::device_vector<gdf_valid_type>::iterator >( right_device_bits, shift_left(shift_bits)), thrust::make_transform_iterator<shift_right, thrust::device_vector<gdf_valid_type>::iterator >( right_device_bits + 1, shift_right(GDF_VALID_BITSIZE - shift_bits, !too_many_bits)) ) ); //so what this does is give you an iterator which gives you a tuple where you have your char, and the char after you, so you can get the last bits! using transformed_or = thrust::transform_iterator<bit_or, zipped_offset>; //now we want to make a transform iterator that ands these values together transformed_or ored_offset_iter = thrust::make_transform_iterator<bit_or,zipped_offset> ( zipped_offset_iter, bit_or() ); //because one of the iterators is + 1 we dont want to read the last char here since it could be past the end of our allocation thrust::copy( ored_offset_iter, ored_offset_iter + right_num_chars - 1, output_device_bits + left_num_chars); thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars); last_byte[0] = last_byte[0] >> GDF_VALID_BITSIZE - last_byte_length; thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1); if ( !too_many_bits ) { thrust::host_vector<gdf_valid_type> last_byte (2); thrust::copy (right_device_bits + right_num_chars - 2, right_device_bits + right_num_chars - 1, last_byte.begin()); thrust::copy (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars, last_byte.begin() + 1); last_byte[0] = last_byte[0] << last_byte_length | last_byte[1]; thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1); } } } if( last_with_too_many_bits(rhs->size, right_num_chars, prev_len)){ thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars); size_t prev_len = get_last_byte_length(lhs->size); size_t curr_len = get_right_byte_length(rhs->size, right_num_chars - 1, prev_len); last_byte[0] = last_byte[0] << curr_len; last_byte[0] = last_byte[0] >> curr_len; thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1); } } cudaStreamSynchronize(stream); cudaStreamDestroy(stream); return GDF_SUCCESS; }
81e2ded91c5c5f9c92b94cb5483ce1ab028d6289.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void warmup(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } }
81e2ded91c5c5f9c92b94cb5483ce1ab028d6289.cu
#include "includes.h" __global__ void warmup(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } }
43c998651144de17b7bb2177fc700b1206283d14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* function for projecting lidar points * */ #include "../common.h" __global__ void CameraTransformKernel(const float* const tform, const float* const cam, const size_t imWidth, const size_t imHeight, const float* const xIn, const float* const yIn, const float* const zIn, const size_t numPoints, float* const xOut, float* const yOut, bool* const valid){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } //transform points float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12]; float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13]; float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14]; bool v = true; //panoramic camera model y = (y/sqrt(z*z + x*x)); x = atan2(x,z); //apply projective camera matrix x = cam[0]*x + cam[3]*y + cam[6]*z + cam[9]; y = cam[1]*x + cam[4]*y + cam[7]*z + cam[10]; z = cam[2]*x + cam[5]*y + cam[8]*z + cam[11]; if((x < 0) || (y < 0) || (x >= imWidth) || (y >= imHeight)){ v = false; } //output points xOut[i] = x; yOut[i] = y; valid[i] = v; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * tformMat = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * camMat = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const * pointsMat = mxGPUCreateFromMxArray(prhs[2]); size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1]; size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0]; size_t numPoints = mxGPUGetDimensions(pointsMat)[0]; //get input pointers float* tformPtr = (float*)(mxGPUGetDataReadOnly(tformMat)); float* camPtr = (float*)(mxGPUGetDataReadOnly(camMat)); float* xInPtr = (float*)(mxGPUGetDataReadOnly(pointsMat)); float* yInPtr = &(xInPtr[numPoints]); float* zInPtr = &(yInPtr[numPoints]); //create output mwSize outSize[] = {numPoints,2}; mxGPUArray* outMat = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[1] = mxGPUCreateMxArrayOnGPU(outMat); outSize[1] = 1; mxGPUArray* validMat = mxGPUCreateGPUArray(2, outSize, mxLOGICAL_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[0] = mxGPUCreateMxArrayOnGPU(validMat); float* xOutPtr = (float*)(mxGPUGetData(outMat)); float* yOutPtr = &(xOutPtr[numPoints]); bool* validPtr = (bool*)(mxGPUGetData(validMat)); //run and get ouputs hipLaunchKernelGGL(( CameraTransformKernel), dim3(gridSize(numPoints)), dim3(BLOCK_SIZE), 0, 0, tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, numPoints, xOutPtr, yOutPtr,validPtr); CudaCheckError(); //destroy reference structures mxGPUDestroyGPUArray(tformMat); mxGPUDestroyGPUArray(camMat); mxGPUDestroyGPUArray(pointsMat); mxGPUDestroyGPUArray(outMat); mxGPUDestroyGPUArray(validMat); }
43c998651144de17b7bb2177fc700b1206283d14.cu
/* function for projecting lidar points * */ #include "../common.h" __global__ void CameraTransformKernel(const float* const tform, const float* const cam, const size_t imWidth, const size_t imHeight, const float* const xIn, const float* const yIn, const float* const zIn, const size_t numPoints, float* const xOut, float* const yOut, bool* const valid){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } //transform points float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12]; float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13]; float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14]; bool v = true; //panoramic camera model y = (y/sqrt(z*z + x*x)); x = atan2(x,z); //apply projective camera matrix x = cam[0]*x + cam[3]*y + cam[6]*z + cam[9]; y = cam[1]*x + cam[4]*y + cam[7]*z + cam[10]; z = cam[2]*x + cam[5]*y + cam[8]*z + cam[11]; if((x < 0) || (y < 0) || (x >= imWidth) || (y >= imHeight)){ v = false; } //output points xOut[i] = x; yOut[i] = y; valid[i] = v; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * tformMat = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * camMat = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const * pointsMat = mxGPUCreateFromMxArray(prhs[2]); size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1]; size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0]; size_t numPoints = mxGPUGetDimensions(pointsMat)[0]; //get input pointers float* tformPtr = (float*)(mxGPUGetDataReadOnly(tformMat)); float* camPtr = (float*)(mxGPUGetDataReadOnly(camMat)); float* xInPtr = (float*)(mxGPUGetDataReadOnly(pointsMat)); float* yInPtr = &(xInPtr[numPoints]); float* zInPtr = &(yInPtr[numPoints]); //create output mwSize outSize[] = {numPoints,2}; mxGPUArray* outMat = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[1] = mxGPUCreateMxArrayOnGPU(outMat); outSize[1] = 1; mxGPUArray* validMat = mxGPUCreateGPUArray(2, outSize, mxLOGICAL_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[0] = mxGPUCreateMxArrayOnGPU(validMat); float* xOutPtr = (float*)(mxGPUGetData(outMat)); float* yOutPtr = &(xOutPtr[numPoints]); bool* validPtr = (bool*)(mxGPUGetData(validMat)); //run and get ouputs CameraTransformKernel<<<gridSize(numPoints), BLOCK_SIZE>>>(tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, numPoints, xOutPtr, yOutPtr,validPtr); CudaCheckError(); //destroy reference structures mxGPUDestroyGPUArray(tformMat); mxGPUDestroyGPUArray(camMat); mxGPUDestroyGPUArray(pointsMat); mxGPUDestroyGPUArray(outMat); mxGPUDestroyGPUArray(validMat); }
188341391145ed04ec8db0fa23e13bada4bcaee0.hip
// !!! This is a file automatically generated by hipify!!! //#include "SSS_cuda.h" //#include <hip/hip_runtime.h> #include "SSS_cuda.h" const int threadsPerBlock = 64; double dot_host(double *x,double *y, int n) { int i; double t=0; for (i=0;i<n;i++) { t +=x[i] * y[i]; } return t; } __global__ void dot_kernel(int N,double *a,double *b,double *c) { __shared__ double cache[threadsPerBlock]; int tid=threadIdx.x+blockIdx.x*blockDim.x; int cacheIndex=threadIdx.x; double temp=0; while(tid<N) { temp += a[tid]*b[tid]; tid += blockDim.x*gridDim.x; } cache[cacheIndex]=temp; __syncthreads(); //threadPerBLock`2 int i=blockDim.x/2; while(i != 0) { if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+i]; __syncthreads(); i /=2; } if(cacheIndex==0) { c[blockIdx.x]=cache[0]; } // printf("c[%d] = %lf\n",blockIdx.x,c[blockIdx.x]); } double dot_cuda(int N, double *hx,double *hy, double *dx, double *dy ,double *dz, double *recive) { double result = 0; hipMemcpy(dx, hx, N * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dy, hy, N * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dot_kernel), dim3(64),dim3(64), 0, 0, N,dx,dy,dz); hipMemcpy(recive,dz,N*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<N;i++) { result +=recive[i]; } return result; } __global__ void spmv_kernel(const int m,int *row_ptr,int *col_idx,double *A_val,double *x_val,double *y_val) { int tid = blockDim.x * blockIdx.x +threadIdx.x; if (tid < m) { double temp = 0; int begin_row = row_ptr[tid]; int end_row = row_ptr[tid+1]; for(int k = begin_row; k < end_row; k++) { temp+= A_val[k] *x_val[col_idx[k]]; } y_val[tid]+=temp; } } __global__ void alpha_spmv_kernel(const int alpha ,const int m,int *row_ptr,int *col_idx,double *A_val,double *x_val,double *y_val) { int tid = blockDim.x * blockIdx.x +threadIdx.x; if (tid < m) { double temp = 0; int begin_row = row_ptr[tid]; int end_row = row_ptr[tid+1]; for(int k = begin_row; k < end_row; k++) { temp+= A_val[k] *x_val[col_idx[k]]; } y_val[tid]+=temp * alpha; } } void spmv_cuda(SSS_MAT *A, SSS_VEC *x, SSS_VEC *y, int *d_row_ptr,int *d_col_idx,double *d_A_val,double *d_x_val,double *d_y_val) { //cuda spmv hipMemcpy(d_row_ptr, A->row_ptr, (A->num_rows+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_col_idx, A->col_idx, A->num_nnzs * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_A_val, A->val, A->num_nnzs * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_x_val, x->d, x->n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_y_val, y->d, y->n * sizeof(double), hipMemcpyHostToDevice); double time1=get_time(); hipLaunchKernelGGL(( spmv_kernel), dim3(64),dim3(64), 0, 0, y->n,d_row_ptr,d_col_idx,d_A_val,d_x_val,d_y_val); hipDeviceSynchronize(); double time2=get_time(); //printf("cuda_time = %lf\n",time3); hipMemcpy(y->d,d_y_val, y->n * sizeof(double), hipMemcpyDeviceToHost); } void alpha_spmv_cuda(const int alpha,SSS_MAT *A, SSS_VEC *x, SSS_VEC *y, int *d_row_ptr,int *d_col_idx,double *d_A_val,double *d_x_val,double *d_y_val) { //cuda spmv //int num = 0 ; hipMemcpy(d_row_ptr, A->row_ptr, (A->num_rows+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_col_idx, A->col_idx, A->num_nnzs * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_A_val, A->val, A->num_nnzs * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_x_val, x->d, x->n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_y_val, y->d, y->n * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( alpha_spmv_kernel), dim3(64),dim3(64), 0, 0, alpha,y->n,d_row_ptr,d_col_idx,d_A_val,d_x_val,d_y_val); //num+=1; hipDeviceSynchronize(); hipMemcpy(y->d,d_y_val, y->n * sizeof(double), hipMemcpyDeviceToHost); //hipDeviceSynchronize(); //printf("alpha_spmv\n"); // hipFree(d_row_ptr); //hipFree(d_col_idx); //hipFree(d_A_val); //hipFree(d_x_val); //hipFree(d_y_val); }
188341391145ed04ec8db0fa23e13bada4bcaee0.cu
//#include "SSS_cuda.h" //#include <cuda.h> #include "SSS_cuda.h" const int threadsPerBlock = 64; double dot_host(double *x,double *y, int n) { int i; double t=0; for (i=0;i<n;i++) { t +=x[i] * y[i]; } return t; } __global__ void dot_kernel(int N,double *a,double *b,double *c) { __shared__ double cache[threadsPerBlock]; int tid=threadIdx.x+blockIdx.x*blockDim.x; int cacheIndex=threadIdx.x; double temp=0; while(tid<N) { temp += a[tid]*b[tid]; tid += blockDim.x*gridDim.x; } cache[cacheIndex]=temp; __syncthreads(); //对于归约运算来说,以下代码要求threadPerBLock必须`为2的指数 int i=blockDim.x/2; while(i != 0) { if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+i]; __syncthreads(); i /=2; } if(cacheIndex==0) { c[blockIdx.x]=cache[0]; } // printf("c[%d] = %lf\n",blockIdx.x,c[blockIdx.x]); } double dot_cuda(int N, double *hx,double *hy, double *dx, double *dy ,double *dz, double *recive) { double result = 0; cudaMemcpy(dx, hx, N * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dy, hy, N * sizeof(double), cudaMemcpyHostToDevice); dot_kernel<<<64,64>>>(N,dx,dy,dz); cudaMemcpy(recive,dz,N*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) { result +=recive[i]; } return result; } __global__ void spmv_kernel(const int m,int *row_ptr,int *col_idx,double *A_val,double *x_val,double *y_val) { int tid = blockDim.x * blockIdx.x +threadIdx.x; if (tid < m) { double temp = 0; int begin_row = row_ptr[tid]; int end_row = row_ptr[tid+1]; for(int k = begin_row; k < end_row; k++) { temp+= A_val[k] *x_val[col_idx[k]]; } y_val[tid]+=temp; } } __global__ void alpha_spmv_kernel(const int alpha ,const int m,int *row_ptr,int *col_idx,double *A_val,double *x_val,double *y_val) { int tid = blockDim.x * blockIdx.x +threadIdx.x; if (tid < m) { double temp = 0; int begin_row = row_ptr[tid]; int end_row = row_ptr[tid+1]; for(int k = begin_row; k < end_row; k++) { temp+= A_val[k] *x_val[col_idx[k]]; } y_val[tid]+=temp * alpha; } } void spmv_cuda(SSS_MAT *A, SSS_VEC *x, SSS_VEC *y, int *d_row_ptr,int *d_col_idx,double *d_A_val,double *d_x_val,double *d_y_val) { //cuda spmv cudaMemcpy(d_row_ptr, A->row_ptr, (A->num_rows+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_col_idx, A->col_idx, A->num_nnzs * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_A_val, A->val, A->num_nnzs * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_x_val, x->d, x->n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_y_val, y->d, y->n * sizeof(double), cudaMemcpyHostToDevice); double time1=get_time(); spmv_kernel<<<64,64>>>(y->n,d_row_ptr,d_col_idx,d_A_val,d_x_val,d_y_val); cudaDeviceSynchronize(); double time2=get_time(); //printf("cuda_time = %lf\n",time3); cudaMemcpy(y->d,d_y_val, y->n * sizeof(double), cudaMemcpyDeviceToHost); } void alpha_spmv_cuda(const int alpha,SSS_MAT *A, SSS_VEC *x, SSS_VEC *y, int *d_row_ptr,int *d_col_idx,double *d_A_val,double *d_x_val,double *d_y_val) { //cuda spmv //int num = 0 ; cudaMemcpy(d_row_ptr, A->row_ptr, (A->num_rows+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_col_idx, A->col_idx, A->num_nnzs * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_A_val, A->val, A->num_nnzs * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_x_val, x->d, x->n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_y_val, y->d, y->n * sizeof(double), cudaMemcpyHostToDevice); alpha_spmv_kernel<<<64,64>>>(alpha,y->n,d_row_ptr,d_col_idx,d_A_val,d_x_val,d_y_val); //num+=1; cudaDeviceSynchronize(); cudaMemcpy(y->d,d_y_val, y->n * sizeof(double), cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); //printf("alpha_spmv\n"); // cudaFree(d_row_ptr); //cudaFree(d_col_idx); //cudaFree(d_A_val); //cudaFree(d_x_val); //cudaFree(d_y_val); }
901b6bcdc55e91d82327b8ba143d613853233ea7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <contract.h> #include <constants.h> #include <device_opts_inline.h> #include <utils.h> #include <stdio.h> using namespace contract; // ==================================================== // // !!!!!!! for now the code will work only with 100 eigenVectors // !!!!!!! for now the code will work only with submatrix side 25 ==> 25x25=625 threads #define BLOCK_SIZE 25 #define NSIZE 100 //====================================================// __global__ void calculate_loop_unity_kernel_float(float2* out, hipTextureObject_t texPropDiag, hipTextureObject_t texMom, float2* tmp){ #define FLOAT2 float2 #define FLOAT float #define FETCH_FLOAT2 fetch_float2 #include <calculate_loop_unity_core.h> #undef FLOAT2 #undef FLOAT #undef FETCH_FLOAT2 } //================================================// __global__ void calculate_loop_unity_kernel_double(double2* out, hipTextureObject_t texPropDiag, hipTextureObject_t texMom, double2* tmp){ #define FLOAT2 double2 #define FLOAT double #define FETCH_FLOAT2 fetch_double2 #include <calculate_loop_unity_core.h> #undef FLOAT2 #undef FLOAT #undef FETCH_FLOAT2 } //===================================================// template<typename Float2, typename Float> static void calculate_loop_unity_kernel(hipTextureObject_t texPropDiag, hipTextureObject_t texMom, int Nt, Float* loop){ int numBlocks = Nt * 4; // for spin combinations on the diagonal dim3 blockDim(BLOCK_SIZE,BLOCK_SIZE,1); dim3 gridDim(numBlocks,1,1); Float *h_loop = NULL; h_loop = (Float*) malloc(numBlocks*2*sizeof(Float)); if(h_loop == NULL) ABORT("Error allocating memory\n"); Float *d_loop = NULL; hipMalloc((void**)&d_loop, numBlocks*2*sizeof(Float)); CHECK_CUDA_ERROR(); Float *tmp = NULL; hipMalloc((void**)&tmp, numBlocks*NSIZE*NSIZE*2*sizeof(Float)); CHECK_CUDA_ERROR(); //+++++++++++++ if( typeid(Float2) == typeid(float2) ){ hipLaunchKernelGGL(( calculate_loop_unity_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_loop, texPropDiag, texMom, (float2*) tmp); } else if ( typeid(Float2) == typeid(double2) ){ hipLaunchKernelGGL(( calculate_loop_unity_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_loop, texPropDiag, texMom, (double2*) tmp); } else ABORT("Something fishy is happening\n"); //+++++++++++++ hipMemcpy(h_loop, d_loop, numBlocks*2*sizeof(Float), hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(); // Float *h_loop_reduced = (Float*) calloc(Nt*2,sizeof(Float)); // if(h_loop_reduced == NULL) ABORT("Error allocating memory for reduction\n"); memset(loop,0,Nt*2*sizeof(Float)); for(int it = 0 ; it < Nt ; it++) for(int is = 0 ; is < 4 ; is++){ loop[it*2+0] += h_loop[it*4*2+is*2+0]; loop[it*2+1] += h_loop[it*4*2+is*2+1]; } free(h_loop); hipFree(d_loop); hipFree(tmp); CHECK_CUDA_ERROR(); } //=================================================// void contract::run_ContractLoopUnity(hipTextureObject_t texPropDiag, hipTextureObject_t texMom, int Nt, void* loop, PRECISION prec){ if(prec == SINGLE){ calculate_loop_unity_kernel<float2,float>(texPropDiag, texMom, Nt, (float*) loop); } else if (prec == DOUBLE){ calculate_loop_unity_kernel<double2,double>(texPropDiag, texMom, Nt, (double*) loop); } else{ ABORT("Error: this precision in not implemented"); } }
901b6bcdc55e91d82327b8ba143d613853233ea7.cu
#include <contract.h> #include <constants.h> #include <device_opts_inline.h> #include <utils.h> #include <stdio.h> using namespace contract; // ==================================================== // // !!!!!!! for now the code will work only with 100 eigenVectors // !!!!!!! for now the code will work only with submatrix side 25 ==> 25x25=625 threads #define BLOCK_SIZE 25 #define NSIZE 100 //====================================================// __global__ void calculate_loop_unity_kernel_float(float2* out, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMom, float2* tmp){ #define FLOAT2 float2 #define FLOAT float #define FETCH_FLOAT2 fetch_float2 #include <calculate_loop_unity_core.h> #undef FLOAT2 #undef FLOAT #undef FETCH_FLOAT2 } //================================================// __global__ void calculate_loop_unity_kernel_double(double2* out, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMom, double2* tmp){ #define FLOAT2 double2 #define FLOAT double #define FETCH_FLOAT2 fetch_double2 #include <calculate_loop_unity_core.h> #undef FLOAT2 #undef FLOAT #undef FETCH_FLOAT2 } //===================================================// template<typename Float2, typename Float> static void calculate_loop_unity_kernel(cudaTextureObject_t texPropDiag, cudaTextureObject_t texMom, int Nt, Float* loop){ int numBlocks = Nt * 4; // for spin combinations on the diagonal dim3 blockDim(BLOCK_SIZE,BLOCK_SIZE,1); dim3 gridDim(numBlocks,1,1); Float *h_loop = NULL; h_loop = (Float*) malloc(numBlocks*2*sizeof(Float)); if(h_loop == NULL) ABORT("Error allocating memory\n"); Float *d_loop = NULL; cudaMalloc((void**)&d_loop, numBlocks*2*sizeof(Float)); CHECK_CUDA_ERROR(); Float *tmp = NULL; cudaMalloc((void**)&tmp, numBlocks*NSIZE*NSIZE*2*sizeof(Float)); CHECK_CUDA_ERROR(); //+++++++++++++ if( typeid(Float2) == typeid(float2) ){ calculate_loop_unity_kernel_float<<<gridDim,blockDim>>>((float2*) d_loop, texPropDiag, texMom, (float2*) tmp); } else if ( typeid(Float2) == typeid(double2) ){ calculate_loop_unity_kernel_double<<<gridDim,blockDim>>>((double2*) d_loop, texPropDiag, texMom, (double2*) tmp); } else ABORT("Something fishy is happening\n"); //+++++++++++++ cudaMemcpy(h_loop, d_loop, numBlocks*2*sizeof(Float), cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(); // Float *h_loop_reduced = (Float*) calloc(Nt*2,sizeof(Float)); // if(h_loop_reduced == NULL) ABORT("Error allocating memory for reduction\n"); memset(loop,0,Nt*2*sizeof(Float)); for(int it = 0 ; it < Nt ; it++) for(int is = 0 ; is < 4 ; is++){ loop[it*2+0] += h_loop[it*4*2+is*2+0]; loop[it*2+1] += h_loop[it*4*2+is*2+1]; } free(h_loop); cudaFree(d_loop); cudaFree(tmp); CHECK_CUDA_ERROR(); } //=================================================// void contract::run_ContractLoopUnity(cudaTextureObject_t texPropDiag, cudaTextureObject_t texMom, int Nt, void* loop, PRECISION prec){ if(prec == SINGLE){ calculate_loop_unity_kernel<float2,float>(texPropDiag, texMom, Nt, (float*) loop); } else if (prec == DOUBLE){ calculate_loop_unity_kernel<double2,double>(texPropDiag, texMom, Nt, (double*) loop); } else{ ABORT("Error: this precision in not implemented"); } }
5a5c97c92562b49bbedfbbcc518540cca45c0832.hip
// !!! This is a file automatically generated by hipify!!! // Added by Karel Adamek #ifndef MSD_PLANE_KERNEL_H_ #define MSD_PLANE_KERNEL_H_ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "headers/params.h" __global__ void MSD_GPU(float2 const* __restrict__ d_input, float *d_output) { __shared__ float Ms[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float Ss[WARP * MSD_WARPS_PER_BLOCK]; int warp_id, local_id, pos; float2 x; float M; float S; float j; float ftemp; local_id = threadIdx.x & ( WARP - 1 ); warp_id = threadIdx.x >> 5; //---------------------------------------------- //---- Calculating of streaming mean and sum of squares pos = blockIdx.x * MSD_WARPS_PER_BLOCK * WARP * MSD_ELEM_PER_THREAD + warp_id * WARP * MSD_ELEM_PER_THREAD + local_id; x = __ldg(&d_input[pos]); M = x.x; S = 0; j = 1.0f; j = j + 1.0f; M = M + x.y; ftemp = ( j * x.y - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; for (int i = 1; i < MSD_ELEM_PER_THREAD; i++) { pos = pos + WARP; x = __ldg(&d_input[pos]); j = j + 1.0f; M = M + x.x; ftemp = ( j * x.x - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; j = j + 1.0f; M = M + x.y; ftemp = ( j * x.y - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; } Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; __syncthreads(); // now all threads had saved their work, reduction follows // first we must load initial values //j=2*MSD_ELEM_PER_THREAD; // value of j is preserved during kernel's execution for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) { if (threadIdx.x < i) { j = j * 2; ftemp = ( M - Ms[i + threadIdx.x] ); S = S + Ss[i + threadIdx.x] + ( 1.0f / j ) * ftemp * ftemp; M = M + Ms[i + threadIdx.x]; Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; } __syncthreads(); } // by now we should have only 32 partial results. shuffle reduction follows for (int q = HALF_WARP; q > 0; q = q >> 1) { j = j * 2; ftemp = ( M - __shfl_down(M, q) ); S = S + __shfl_down(S, q) + ( 1.0f / j ) * ftemp * ftemp; M = M + __shfl_down(M, q); } //---------------------------------------------- //---- Writing data if (warp_id == 0 && threadIdx.x == 0) { d_output[2 * blockIdx.x] = M; d_output[2 * blockIdx.x + 1] = S; } } __global__ void MSD_GPU_remainder(float const* __restrict__ d_input, float *d_output, int remainder) { __shared__ float Ms[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float Ss[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float js[WARP * MSD_WARPS_PER_BLOCK]; int warp_id, pos; float x; float M; float S; float j, jv; float ftemp; warp_id = threadIdx.x >> 5; //---------------------------------------------- //---- Calculating of streaming mean and sum of squares pos = threadIdx.x; if (remainder > blockDim.x) { M = __ldg(&d_input[pos]); S = 0; j = 1.0f; pos = pos + blockDim.x; while (pos < remainder) { x = __ldg(&d_input[pos]); j = j + 1.0f; M = M + x; ftemp = ( j * x - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; pos = pos + blockDim.x; } Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; __syncthreads(); // first we must load initial values for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) { if (threadIdx.x < i) { jv = js[i + threadIdx.x]; ftemp = ( jv / j * M - Ms[i + threadIdx.x] ); S = S + Ss[i + threadIdx.x] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + Ms[i + threadIdx.x]; j = j + jv; Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; } __syncthreads(); } // by now we should have only 32 partial results. shuffle reduction follows for (int q = HALF_WARP; q > 0; q = q >> 1) { jv = __shfl_down(j, q); ftemp = ( jv / j * M - __shfl_down(M, q) ); S = S + __shfl_down(S, q) + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + __shfl_down(M, q); j = j + jv; } } else { if (threadIdx.x == 0) { // This assumes remainder to be small < 32 pos = 0; M = __ldg(&d_input[pos]); S = 0; j = 1.0f; for (pos = 1; pos < remainder; pos++) { x = __ldg(&d_input[pos]); j = j + 1.0f; M = M + x; ftemp = ( j * x - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; } } } //---------------------------------------------- //---- Writing data if (warp_id == 0 && threadIdx.x == 0) { d_output[0] = M; d_output[1] = S; d_output[2] = j; } } __global__ void MSD_GPU_final(float *d_input, float *d_output, int size, int tail, float nElements) { __shared__ float Ms[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float Ss[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float js[WARP * MSD_WARPS_PER_BLOCK]; int warp_id, pos; float M; float S; float j, jv; float ftemp; warp_id = threadIdx.x >> 5; //---------------------------------------------- //---- Calculating of streaming mean and sum of squares pos = threadIdx.x; if (size > blockDim.x) { M = d_input[2 * pos]; S = d_input[2 * pos + 1]; j = 2 * WARP * MSD_ELEM_PER_THREAD * MSD_WARPS_PER_BLOCK; jv = 2 * WARP * MSD_ELEM_PER_THREAD * MSD_WARPS_PER_BLOCK; pos = pos + blockDim.x; while (pos < size) { ftemp = ( jv / j * M - d_input[2 * pos] ); S = S + d_input[2 * pos + 1] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + d_input[2 * pos]; j = j + jv; pos = pos + blockDim.x; } __syncthreads(); Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; // now all threads had saved their work, reduction follows // first we must load initial values for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) { if (threadIdx.x < i) { jv = js[i + threadIdx.x]; ftemp = ( jv / j * M - Ms[i + threadIdx.x] ); S = S + Ss[i + threadIdx.x] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + Ms[i + threadIdx.x]; j = j + jv; Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; } __syncthreads(); } // by now we should have only 32 partial results. shuffle reduction follows for (int q = HALF_WARP; q > 0; q = q >> 1) { jv = __shfl_down(j, q); ftemp = ( jv / j * M - __shfl_down(M, q) ); S = S + __shfl_down(S, q) + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + __shfl_down(M, q); j = j + jv; } if (tail > 0 && threadIdx.x == 0) { jv = d_input[2 * size + 2]; ftemp = ( jv / j * M - d_input[2 * size] ); S = S + d_input[2 * size + 1] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + d_input[2 * size]; j = j + jv; } } else { if (threadIdx.x == 0) { pos = 0; M = d_input[2 * pos]; S = d_input[2 * pos + 1]; j = 2 * WARP * MSD_ELEM_PER_THREAD * MSD_WARPS_PER_BLOCK; for (pos = 1; pos < size; pos++) { j = j * 2; ftemp = ( M - d_input[2 * pos] ); S = S + d_input[2 * pos + 1] + ( 1.0f / j ) * ftemp * ftemp; M = M + d_input[2 * pos]; } if (tail > 0) { jv = d_input[2 * size + 2]; ftemp = ( jv / j * M - d_input[2 * size] ); S = S + d_input[2 * size + 1] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + d_input[2 * size]; j = j + jv; } } } //---------------------------------------------- //---- Writing data if (warp_id == 0 && threadIdx.x == 0) { d_output[0] = M / nElements; d_output[1] = sqrt(S / nElements); d_output[2] = nElements; } } #endif
5a5c97c92562b49bbedfbbcc518540cca45c0832.cu
// Added by Karel Adamek #ifndef MSD_PLANE_KERNEL_H_ #define MSD_PLANE_KERNEL_H_ #include <cuda.h> #include <cuda_runtime.h> #include "headers/params.h" __global__ void MSD_GPU(float2 const* __restrict__ d_input, float *d_output) { __shared__ float Ms[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float Ss[WARP * MSD_WARPS_PER_BLOCK]; int warp_id, local_id, pos; float2 x; float M; float S; float j; float ftemp; local_id = threadIdx.x & ( WARP - 1 ); warp_id = threadIdx.x >> 5; //---------------------------------------------- //---- Calculating of streaming mean and sum of squares pos = blockIdx.x * MSD_WARPS_PER_BLOCK * WARP * MSD_ELEM_PER_THREAD + warp_id * WARP * MSD_ELEM_PER_THREAD + local_id; x = __ldg(&d_input[pos]); M = x.x; S = 0; j = 1.0f; j = j + 1.0f; M = M + x.y; ftemp = ( j * x.y - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; for (int i = 1; i < MSD_ELEM_PER_THREAD; i++) { pos = pos + WARP; x = __ldg(&d_input[pos]); j = j + 1.0f; M = M + x.x; ftemp = ( j * x.x - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; j = j + 1.0f; M = M + x.y; ftemp = ( j * x.y - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; } Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; __syncthreads(); // now all threads had saved their work, reduction follows // first we must load initial values //j=2*MSD_ELEM_PER_THREAD; // value of j is preserved during kernel's execution for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) { if (threadIdx.x < i) { j = j * 2; ftemp = ( M - Ms[i + threadIdx.x] ); S = S + Ss[i + threadIdx.x] + ( 1.0f / j ) * ftemp * ftemp; M = M + Ms[i + threadIdx.x]; Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; } __syncthreads(); } // by now we should have only 32 partial results. shuffle reduction follows for (int q = HALF_WARP; q > 0; q = q >> 1) { j = j * 2; ftemp = ( M - __shfl_down(M, q) ); S = S + __shfl_down(S, q) + ( 1.0f / j ) * ftemp * ftemp; M = M + __shfl_down(M, q); } //---------------------------------------------- //---- Writing data if (warp_id == 0 && threadIdx.x == 0) { d_output[2 * blockIdx.x] = M; d_output[2 * blockIdx.x + 1] = S; } } __global__ void MSD_GPU_remainder(float const* __restrict__ d_input, float *d_output, int remainder) { __shared__ float Ms[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float Ss[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float js[WARP * MSD_WARPS_PER_BLOCK]; int warp_id, pos; float x; float M; float S; float j, jv; float ftemp; warp_id = threadIdx.x >> 5; //---------------------------------------------- //---- Calculating of streaming mean and sum of squares pos = threadIdx.x; if (remainder > blockDim.x) { M = __ldg(&d_input[pos]); S = 0; j = 1.0f; pos = pos + blockDim.x; while (pos < remainder) { x = __ldg(&d_input[pos]); j = j + 1.0f; M = M + x; ftemp = ( j * x - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; pos = pos + blockDim.x; } Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; __syncthreads(); // first we must load initial values for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) { if (threadIdx.x < i) { jv = js[i + threadIdx.x]; ftemp = ( jv / j * M - Ms[i + threadIdx.x] ); S = S + Ss[i + threadIdx.x] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + Ms[i + threadIdx.x]; j = j + jv; Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; } __syncthreads(); } // by now we should have only 32 partial results. shuffle reduction follows for (int q = HALF_WARP; q > 0; q = q >> 1) { jv = __shfl_down(j, q); ftemp = ( jv / j * M - __shfl_down(M, q) ); S = S + __shfl_down(S, q) + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + __shfl_down(M, q); j = j + jv; } } else { if (threadIdx.x == 0) { // This assumes remainder to be small < 32 pos = 0; M = __ldg(&d_input[pos]); S = 0; j = 1.0f; for (pos = 1; pos < remainder; pos++) { x = __ldg(&d_input[pos]); j = j + 1.0f; M = M + x; ftemp = ( j * x - M ); S = S + 1.0f / ( j * ( j - 1.0f ) ) * ftemp * ftemp; } } } //---------------------------------------------- //---- Writing data if (warp_id == 0 && threadIdx.x == 0) { d_output[0] = M; d_output[1] = S; d_output[2] = j; } } __global__ void MSD_GPU_final(float *d_input, float *d_output, int size, int tail, float nElements) { __shared__ float Ms[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float Ss[WARP * MSD_WARPS_PER_BLOCK]; __shared__ float js[WARP * MSD_WARPS_PER_BLOCK]; int warp_id, pos; float M; float S; float j, jv; float ftemp; warp_id = threadIdx.x >> 5; //---------------------------------------------- //---- Calculating of streaming mean and sum of squares pos = threadIdx.x; if (size > blockDim.x) { M = d_input[2 * pos]; S = d_input[2 * pos + 1]; j = 2 * WARP * MSD_ELEM_PER_THREAD * MSD_WARPS_PER_BLOCK; jv = 2 * WARP * MSD_ELEM_PER_THREAD * MSD_WARPS_PER_BLOCK; pos = pos + blockDim.x; while (pos < size) { ftemp = ( jv / j * M - d_input[2 * pos] ); S = S + d_input[2 * pos + 1] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + d_input[2 * pos]; j = j + jv; pos = pos + blockDim.x; } __syncthreads(); Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; // now all threads had saved their work, reduction follows // first we must load initial values for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) { if (threadIdx.x < i) { jv = js[i + threadIdx.x]; ftemp = ( jv / j * M - Ms[i + threadIdx.x] ); S = S + Ss[i + threadIdx.x] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + Ms[i + threadIdx.x]; j = j + jv; Ms[threadIdx.x] = M; Ss[threadIdx.x] = S; js[threadIdx.x] = j; } __syncthreads(); } // by now we should have only 32 partial results. shuffle reduction follows for (int q = HALF_WARP; q > 0; q = q >> 1) { jv = __shfl_down(j, q); ftemp = ( jv / j * M - __shfl_down(M, q) ); S = S + __shfl_down(S, q) + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + __shfl_down(M, q); j = j + jv; } if (tail > 0 && threadIdx.x == 0) { jv = d_input[2 * size + 2]; ftemp = ( jv / j * M - d_input[2 * size] ); S = S + d_input[2 * size + 1] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + d_input[2 * size]; j = j + jv; } } else { if (threadIdx.x == 0) { pos = 0; M = d_input[2 * pos]; S = d_input[2 * pos + 1]; j = 2 * WARP * MSD_ELEM_PER_THREAD * MSD_WARPS_PER_BLOCK; for (pos = 1; pos < size; pos++) { j = j * 2; ftemp = ( M - d_input[2 * pos] ); S = S + d_input[2 * pos + 1] + ( 1.0f / j ) * ftemp * ftemp; M = M + d_input[2 * pos]; } if (tail > 0) { jv = d_input[2 * size + 2]; ftemp = ( jv / j * M - d_input[2 * size] ); S = S + d_input[2 * size + 1] + ( j / ( jv * ( j + jv ) ) ) * ftemp * ftemp; M = M + d_input[2 * size]; j = j + jv; } } } //---------------------------------------------- //---- Writing data if (warp_id == 0 && threadIdx.x == 0) { d_output[0] = M / nElements; d_output[1] = sqrt(S / nElements); d_output[2] = nElements; } } #endif
3170661b4c180d42efd580af86e52439fb879073.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2021 by XGBoost Contributors */ #include <thrust/scan.h> #include <hipcub/hipcub.hpp> #include <cassert> #include <limits> #include <memory> #include <utility> #include <tuple> #include "rabit/rabit.h" #include "xgboost/span.h" #include "xgboost/data.h" #include "auc.h" #include "../common/device_helpers.cuh" #include "../common/ranking_utils.cuh" namespace xgboost { namespace metric { namespace { template <typename T> class Discard : public thrust::discard_iterator<T> { public: using value_type = T; // NOLINT }; struct GetWeightOp { common::Span<float const> weights; common::Span<size_t const> sorted_idx; __device__ float operator()(size_t i) const { return weights.empty() ? 1.0f : weights[sorted_idx[i]]; } }; } // namespace /** * A cache to GPU data to avoid reallocating memory. */ struct DeviceAUCCache { // Pair of FP/TP using Pair = thrust::pair<float, float>; // index sorted by prediction value dh::device_vector<size_t> sorted_idx; // track FP/TP for computation on trapesoid area dh::device_vector<Pair> fptp; // track FP_PREV/TP_PREV for computation on trapesoid area dh::device_vector<Pair> neg_pos; // index of unique prediction values. dh::device_vector<size_t> unique_idx; // p^T: transposed prediction matrix, used by MultiClassAUC dh::device_vector<float> predts_t; std::unique_ptr<dh::AllReducer> reducer; void Init(common::Span<float const> predts, bool is_multi, int32_t device) { if (sorted_idx.size() != predts.size()) { sorted_idx.resize(predts.size()); fptp.resize(sorted_idx.size()); unique_idx.resize(sorted_idx.size()); neg_pos.resize(sorted_idx.size()); if (is_multi) { predts_t.resize(sorted_idx.size()); reducer.reset(new dh::AllReducer); reducer->Init(rabit::GetRank()); } } } }; /** * The GPU implementation uses same calculation as CPU with a few more steps to distribute * work across threads: * * - Run scan to obtain TP/FP values, which are right coordinates of trapesoid. * - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value, * which are left coordinates of trapesoids. * - Reduce the scan array into 1 AUC value. */ std::tuple<float, float, float> GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); dh::safe_cuda(hipSetDevice(device)); CHECK(!labels.empty()); CHECK_EQ(labels.size(), predts.size()); /** * Create sorted index for each class */ auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::ArgSort<false>(predts, d_sorted_idx); /** * Linear scan */ auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; float label = labels[idx]; float w = get_weight(i); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT auto d_fptp = dh::ToSpan(cache->fptp); dh::LaunchN(device, d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx, device); auto uni_key = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return predts[d_sorted_idx[i]]; }); auto end_unique = thrust::unique_by_key_copy( thrust::hip::par(alloc), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), thrust::make_discard_iterator(), dh::tbegin(d_unique_idx)); d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx)); dh::InclusiveScan( dh::tbegin(d_fptp), dh::tbegin(d_fptp), [=] __device__(Pair const &l, Pair const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }, d_fptp.size()); auto d_neg_pos = dh::ToSpan(cache->neg_pos); // scatter unique negaive/positive values // shift to right by 1 with initial value being 0 dh::LaunchN(device, d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] == 0) { // first unique index is 0 assert(i == 0); d_neg_pos[0] = {0, 0}; return; } d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == d_unique_idx.size() - 1) { // last one needs to be included, may override above assignment if the last // prediction value is distinct from previous one. d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1]; return; } }); auto in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { float fp, tp; float fp_prev, tp_prev; if (i == 0) { // handle the last element thrust::tie(fp, tp) = d_fptp.back(); thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } return TrapesoidArea(fp_prev, fp, tp_prev, tp); }); Pair last = cache->fptp.back(); float auc = thrust::reduce(thrust::hip::par(alloc), in, in + d_unique_idx.size()); return std::make_tuple(last.first, last.second, auc); } void Transpose(common::Span<float const> in, common::Span<float> out, size_t m, size_t n, int32_t device) { CHECK_EQ(in.size(), out.size()); CHECK_EQ(in.size(), m * n); dh::LaunchN(device, in.size(), [=] __device__(size_t i) { size_t col = i / m; size_t row = i % m; size_t idx = row * n + col; out[i] = in[idx]; }); } /** * Last index of a group in a CSR style of index pointer. */ template <typename Idx> XGBOOST_DEVICE size_t LastOf(size_t group, common::Span<Idx> indptr) { return indptr[group + 1] - 1; } /** * MultiClass implementation is similar to binary classification, except we need to split * up each class in all kernels. */ float GPUMultiClassAUCOVR(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache>* p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, true, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); size_t n_samples = labels.size(); size_t n_classes = predts.size() / labels.size(); CHECK_NE(n_classes, 0); /** * Create sorted index for each class */ auto d_predts_t = dh::ToSpan(cache->predts_t); Transpose(predts, d_predts_t, n_samples, n_classes, device); dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0); auto d_class_ptr = dh::ToSpan(class_ptr); dh::LaunchN(device, n_classes + 1, [=]__device__(size_t i) { d_class_ptr[i] = i * n_samples; }); // no out-of-place sort for thrust, cub sort doesn't accept general iterator. So can't // use transform iterator in sorting. auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); /** * Linear scan */ dh::caching_device_vector<float> d_auc(n_classes, 0); auto s_d_auc = dh::ToSpan(d_auc); auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto d_fptp = dh::ToSpan(cache->fptp); auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; size_t class_id = i / n_samples; // labels is a vector of size n_samples. float label = labels[idx % n_samples] == class_id; float w = get_weight(i % n_samples); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT dh::LaunchN(device, d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); /** * Handle duplicated predictions */ dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx, device); auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; float predt = d_predts_t[d_sorted_idx[i]]; return thrust::make_pair(class_id, predt); }); // unique values are sparse, so we need a CSR style indptr dh::TemporaryArray<uint32_t> unique_class_ptr(class_ptr.size() + 1); auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr); auto n_uniques = dh::SegmentedUniqueByKey( thrust::hip::par(alloc), dh::tbegin(d_class_ptr), dh::tend(d_class_ptr), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), d_unique_class_ptr.data(), dh::tbegin(d_unique_idx), thrust::equal_to<thrust::pair<uint32_t, float>>{}); d_unique_idx = d_unique_idx.subspan(0, n_uniques); using Triple = thrust::tuple<uint32_t, float, float>; // expand to tuple to include class id auto fptp_it_in = dh::MakeTransformIterator<Triple>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; return thrust::make_tuple(class_id, d_fptp[i].first, d_fptp[i].second); }); // shrink down to pair auto fptp_it_out = thrust::make_transform_output_iterator( dh::tbegin(d_fptp), [=] __device__(Triple const &t) { return thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t)); }); dh::InclusiveScan( fptp_it_in, fptp_it_out, [=] __device__(Triple const &l, Triple const &r) { uint32_t l_cid = thrust::get<0>(l); uint32_t r_cid = thrust::get<0>(r); if (l_cid != r_cid) { return r; } return Triple(r_cid, // class_id thrust::get<1>(l) + thrust::get<1>(r), // fp thrust::get<2>(l) + thrust::get<2>(r)); // tp }, d_fptp.size()); // scatter unique FP_PREV/TP_PREV values auto d_neg_pos = dh::ToSpan(cache->neg_pos); // When dataset is not empty, each class must have at least 1 (unique) sample // prediction, so no need to handle special case. dh::LaunchN(device, d_unique_idx.size(), [=]__device__(size_t i) { if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0 assert(d_unique_idx[i] % n_samples == 0); d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i return; } uint32_t class_id = d_unique_idx[i] / n_samples; d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == LastOf(class_id, d_unique_class_ptr)) { // last one needs to be included. size_t last = d_unique_idx[LastOf(class_id, d_unique_class_ptr)]; d_neg_pos[LastOf(class_id, d_class_ptr)] = d_fptp[last - 1]; return; } }); /** * Reduce the result for each class */ auto key_in = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; return class_id; }); auto val_in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; float fp, tp; float fp_prev, tp_prev; if (i == d_unique_class_ptr[class_id]) { // first item is ignored, we use this thread to calculate the last item thrust::tie(fp, tp) = d_fptp[class_id * n_samples + (n_samples - 1)]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[LastOf(class_id, d_unique_class_ptr)]]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } float auc = TrapesoidArea(fp_prev, fp, tp_prev, tp); return auc; }); thrust::reduce_by_key(thrust::hip::par(alloc), key_in, key_in + d_unique_idx.size(), val_in, thrust::make_discard_iterator(), d_auc.begin()); /** * Scale the classes with number of samples for each class. */ dh::TemporaryArray<float> resutls(n_classes * 4); auto d_results = dh::ToSpan(resutls); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); dh::LaunchN(device, n_classes, [=] __device__(size_t c) { auc[c] = s_d_auc[c]; auto last = d_fptp[n_samples * c + (n_samples - 1)]; fp[c] = last.first; tp[c] = last.second; local_area[c] = last.first * last.second; }); if (rabit::IsDistributed()) { cache->reducer->AllReduceSum(resutls.data().get(), resutls.data().get(), resutls.size()); } auto reduce_in = dh::MakeTransformIterator<thrust::pair<float, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { if (local_area[i] > 0) { return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]); } return thrust::make_pair(std::numeric_limits<float>::quiet_NaN(), 0.0f); }); float tp_sum; float auc_sum; thrust::tie(auc_sum, tp_sum) = thrust::reduce( thrust::hip::par(alloc), reduce_in, reduce_in + n_classes, thrust::make_pair(0.0f, 0.0f), [=] __device__(auto const &l, auto const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }); if (tp_sum != 0 && !std::isnan(auc_sum)) { auc_sum /= tp_sum; } else { return std::numeric_limits<float>::quiet_NaN(); } return auc_sum; } namespace { struct RankScanItem { size_t idx; float predt; float w; bst_group_t group_id; }; } // anonymous namespace std::pair<float, uint32_t> GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); dh::XGBCachingDeviceAllocator<char> alloc; auto d_group_ptr = dh::ToSpan(group_ptr); /** * Validate the dataset */ auto check_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; }); size_t n_valid = thrust::count_if( thrust::hip::par(alloc), check_it, check_it + group_ptr.size() - 1, [=] __device__(size_t len) { return len >= 3; }); if (n_valid < info.group_ptr_.size() - 1) { InvalidGroupAUC(); } if (n_valid == 0) { return std::make_pair(0.0f, 0); } /** * Sort the labels */ auto d_labels = info.labels_.ConstDeviceSpan(); auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_labels, d_group_ptr, d_sorted_idx); auto d_weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0); auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr); // Use max to represent triangle auto n_threads = common::SegmentedTrapezoidThreads( d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max()); // get the coordinate in nested summation auto get_i_j = [=]__device__(size_t idx, size_t query_group_idx) { auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; auto thread_group_begin = d_threads_group_ptr[query_group_idx]; auto idx_in_thread_group = idx - thread_group_begin; size_t i, j; common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j); // we use global index among all groups for sorted idx, so i, j should also be global // index. i += data_group_begin; j += data_group_begin; return thrust::make_pair(i, j); }; // NOLINT auto in = dh::MakeTransformIterator<RankScanItem>( thrust::make_counting_iterator(0), [=] __device__(size_t idx) { bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx); auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; if (n_samples < 3) { // at least 3 documents are required. return RankScanItem{idx, 0, 0, query_group_idx}; } size_t i, j; thrust::tie(i, j) = get_i_j(idx, query_group_idx); float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]]; float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]); if (predt > 0) { predt = 1.0; } else if (predt == 0) { predt = 0.5; } else { predt = 0; } predt *= w; return RankScanItem{idx, predt, w, query_group_idx}; }); dh::TemporaryArray<float> d_auc(group_ptr.size() - 1); auto s_d_auc = dh::ToSpan(d_auc); auto out = thrust::make_transform_output_iterator( Discard<RankScanItem>(), [=] __device__(RankScanItem const &item) -> RankScanItem { auto group_id = item.group_id; assert(group_id < d_group_ptr.size()); auto data_group_begin = d_group_ptr[group_id]; size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin; // last item of current group if (item.idx == LastOf(group_id, d_threads_group_ptr)) { if (item.w > 0) { s_d_auc[group_id] = item.predt / item.w; } else { s_d_auc[group_id] = 0; } } return {}; // discard }); dh::InclusiveScan( in, out, [] __device__(RankScanItem const &l, RankScanItem const &r) { if (l.group_id != r.group_id) { return r; } return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id}; }, n_threads); /** * Scale the AUC with number of items in each group. */ float auc = thrust::reduce(thrust::hip::par(alloc), dh::tbegin(s_d_auc), dh::tend(s_d_auc), 0.0f); return std::make_pair(auc, n_valid); } } // namespace metric } // namespace xgboost
3170661b4c180d42efd580af86e52439fb879073.cu
/*! * Copyright 2021 by XGBoost Contributors */ #include <thrust/scan.h> #include <cub/cub.cuh> #include <cassert> #include <limits> #include <memory> #include <utility> #include <tuple> #include "rabit/rabit.h" #include "xgboost/span.h" #include "xgboost/data.h" #include "auc.h" #include "../common/device_helpers.cuh" #include "../common/ranking_utils.cuh" namespace xgboost { namespace metric { namespace { template <typename T> class Discard : public thrust::discard_iterator<T> { public: using value_type = T; // NOLINT }; struct GetWeightOp { common::Span<float const> weights; common::Span<size_t const> sorted_idx; __device__ float operator()(size_t i) const { return weights.empty() ? 1.0f : weights[sorted_idx[i]]; } }; } // namespace /** * A cache to GPU data to avoid reallocating memory. */ struct DeviceAUCCache { // Pair of FP/TP using Pair = thrust::pair<float, float>; // index sorted by prediction value dh::device_vector<size_t> sorted_idx; // track FP/TP for computation on trapesoid area dh::device_vector<Pair> fptp; // track FP_PREV/TP_PREV for computation on trapesoid area dh::device_vector<Pair> neg_pos; // index of unique prediction values. dh::device_vector<size_t> unique_idx; // p^T: transposed prediction matrix, used by MultiClassAUC dh::device_vector<float> predts_t; std::unique_ptr<dh::AllReducer> reducer; void Init(common::Span<float const> predts, bool is_multi, int32_t device) { if (sorted_idx.size() != predts.size()) { sorted_idx.resize(predts.size()); fptp.resize(sorted_idx.size()); unique_idx.resize(sorted_idx.size()); neg_pos.resize(sorted_idx.size()); if (is_multi) { predts_t.resize(sorted_idx.size()); reducer.reset(new dh::AllReducer); reducer->Init(rabit::GetRank()); } } } }; /** * The GPU implementation uses same calculation as CPU with a few more steps to distribute * work across threads: * * - Run scan to obtain TP/FP values, which are right coordinates of trapesoid. * - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value, * which are left coordinates of trapesoids. * - Reduce the scan array into 1 AUC value. */ std::tuple<float, float, float> GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); dh::safe_cuda(cudaSetDevice(device)); CHECK(!labels.empty()); CHECK_EQ(labels.size(), predts.size()); /** * Create sorted index for each class */ auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::ArgSort<false>(predts, d_sorted_idx); /** * Linear scan */ auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; float label = labels[idx]; float w = get_weight(i); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT auto d_fptp = dh::ToSpan(cache->fptp); dh::LaunchN(device, d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx, device); auto uni_key = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return predts[d_sorted_idx[i]]; }); auto end_unique = thrust::unique_by_key_copy( thrust::cuda::par(alloc), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), thrust::make_discard_iterator(), dh::tbegin(d_unique_idx)); d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx)); dh::InclusiveScan( dh::tbegin(d_fptp), dh::tbegin(d_fptp), [=] __device__(Pair const &l, Pair const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }, d_fptp.size()); auto d_neg_pos = dh::ToSpan(cache->neg_pos); // scatter unique negaive/positive values // shift to right by 1 with initial value being 0 dh::LaunchN(device, d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] == 0) { // first unique index is 0 assert(i == 0); d_neg_pos[0] = {0, 0}; return; } d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == d_unique_idx.size() - 1) { // last one needs to be included, may override above assignment if the last // prediction value is distinct from previous one. d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1]; return; } }); auto in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { float fp, tp; float fp_prev, tp_prev; if (i == 0) { // handle the last element thrust::tie(fp, tp) = d_fptp.back(); thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } return TrapesoidArea(fp_prev, fp, tp_prev, tp); }); Pair last = cache->fptp.back(); float auc = thrust::reduce(thrust::cuda::par(alloc), in, in + d_unique_idx.size()); return std::make_tuple(last.first, last.second, auc); } void Transpose(common::Span<float const> in, common::Span<float> out, size_t m, size_t n, int32_t device) { CHECK_EQ(in.size(), out.size()); CHECK_EQ(in.size(), m * n); dh::LaunchN(device, in.size(), [=] __device__(size_t i) { size_t col = i / m; size_t row = i % m; size_t idx = row * n + col; out[i] = in[idx]; }); } /** * Last index of a group in a CSR style of index pointer. */ template <typename Idx> XGBOOST_DEVICE size_t LastOf(size_t group, common::Span<Idx> indptr) { return indptr[group + 1] - 1; } /** * MultiClass implementation is similar to binary classification, except we need to split * up each class in all kernels. */ float GPUMultiClassAUCOVR(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache>* p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, true, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); size_t n_samples = labels.size(); size_t n_classes = predts.size() / labels.size(); CHECK_NE(n_classes, 0); /** * Create sorted index for each class */ auto d_predts_t = dh::ToSpan(cache->predts_t); Transpose(predts, d_predts_t, n_samples, n_classes, device); dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0); auto d_class_ptr = dh::ToSpan(class_ptr); dh::LaunchN(device, n_classes + 1, [=]__device__(size_t i) { d_class_ptr[i] = i * n_samples; }); // no out-of-place sort for thrust, cub sort doesn't accept general iterator. So can't // use transform iterator in sorting. auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); /** * Linear scan */ dh::caching_device_vector<float> d_auc(n_classes, 0); auto s_d_auc = dh::ToSpan(d_auc); auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto d_fptp = dh::ToSpan(cache->fptp); auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; size_t class_id = i / n_samples; // labels is a vector of size n_samples. float label = labels[idx % n_samples] == class_id; float w = get_weight(i % n_samples); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT dh::LaunchN(device, d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); /** * Handle duplicated predictions */ dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx, device); auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; float predt = d_predts_t[d_sorted_idx[i]]; return thrust::make_pair(class_id, predt); }); // unique values are sparse, so we need a CSR style indptr dh::TemporaryArray<uint32_t> unique_class_ptr(class_ptr.size() + 1); auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr); auto n_uniques = dh::SegmentedUniqueByKey( thrust::cuda::par(alloc), dh::tbegin(d_class_ptr), dh::tend(d_class_ptr), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), d_unique_class_ptr.data(), dh::tbegin(d_unique_idx), thrust::equal_to<thrust::pair<uint32_t, float>>{}); d_unique_idx = d_unique_idx.subspan(0, n_uniques); using Triple = thrust::tuple<uint32_t, float, float>; // expand to tuple to include class id auto fptp_it_in = dh::MakeTransformIterator<Triple>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; return thrust::make_tuple(class_id, d_fptp[i].first, d_fptp[i].second); }); // shrink down to pair auto fptp_it_out = thrust::make_transform_output_iterator( dh::tbegin(d_fptp), [=] __device__(Triple const &t) { return thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t)); }); dh::InclusiveScan( fptp_it_in, fptp_it_out, [=] __device__(Triple const &l, Triple const &r) { uint32_t l_cid = thrust::get<0>(l); uint32_t r_cid = thrust::get<0>(r); if (l_cid != r_cid) { return r; } return Triple(r_cid, // class_id thrust::get<1>(l) + thrust::get<1>(r), // fp thrust::get<2>(l) + thrust::get<2>(r)); // tp }, d_fptp.size()); // scatter unique FP_PREV/TP_PREV values auto d_neg_pos = dh::ToSpan(cache->neg_pos); // When dataset is not empty, each class must have at least 1 (unique) sample // prediction, so no need to handle special case. dh::LaunchN(device, d_unique_idx.size(), [=]__device__(size_t i) { if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0 assert(d_unique_idx[i] % n_samples == 0); d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i return; } uint32_t class_id = d_unique_idx[i] / n_samples; d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == LastOf(class_id, d_unique_class_ptr)) { // last one needs to be included. size_t last = d_unique_idx[LastOf(class_id, d_unique_class_ptr)]; d_neg_pos[LastOf(class_id, d_class_ptr)] = d_fptp[last - 1]; return; } }); /** * Reduce the result for each class */ auto key_in = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; return class_id; }); auto val_in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; float fp, tp; float fp_prev, tp_prev; if (i == d_unique_class_ptr[class_id]) { // first item is ignored, we use this thread to calculate the last item thrust::tie(fp, tp) = d_fptp[class_id * n_samples + (n_samples - 1)]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[LastOf(class_id, d_unique_class_ptr)]]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } float auc = TrapesoidArea(fp_prev, fp, tp_prev, tp); return auc; }); thrust::reduce_by_key(thrust::cuda::par(alloc), key_in, key_in + d_unique_idx.size(), val_in, thrust::make_discard_iterator(), d_auc.begin()); /** * Scale the classes with number of samples for each class. */ dh::TemporaryArray<float> resutls(n_classes * 4); auto d_results = dh::ToSpan(resutls); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); dh::LaunchN(device, n_classes, [=] __device__(size_t c) { auc[c] = s_d_auc[c]; auto last = d_fptp[n_samples * c + (n_samples - 1)]; fp[c] = last.first; tp[c] = last.second; local_area[c] = last.first * last.second; }); if (rabit::IsDistributed()) { cache->reducer->AllReduceSum(resutls.data().get(), resutls.data().get(), resutls.size()); } auto reduce_in = dh::MakeTransformIterator<thrust::pair<float, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { if (local_area[i] > 0) { return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]); } return thrust::make_pair(std::numeric_limits<float>::quiet_NaN(), 0.0f); }); float tp_sum; float auc_sum; thrust::tie(auc_sum, tp_sum) = thrust::reduce( thrust::cuda::par(alloc), reduce_in, reduce_in + n_classes, thrust::make_pair(0.0f, 0.0f), [=] __device__(auto const &l, auto const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }); if (tp_sum != 0 && !std::isnan(auc_sum)) { auc_sum /= tp_sum; } else { return std::numeric_limits<float>::quiet_NaN(); } return auc_sum; } namespace { struct RankScanItem { size_t idx; float predt; float w; bst_group_t group_id; }; } // anonymous namespace std::pair<float, uint32_t> GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); dh::XGBCachingDeviceAllocator<char> alloc; auto d_group_ptr = dh::ToSpan(group_ptr); /** * Validate the dataset */ auto check_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; }); size_t n_valid = thrust::count_if( thrust::cuda::par(alloc), check_it, check_it + group_ptr.size() - 1, [=] __device__(size_t len) { return len >= 3; }); if (n_valid < info.group_ptr_.size() - 1) { InvalidGroupAUC(); } if (n_valid == 0) { return std::make_pair(0.0f, 0); } /** * Sort the labels */ auto d_labels = info.labels_.ConstDeviceSpan(); auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_labels, d_group_ptr, d_sorted_idx); auto d_weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0); auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr); // Use max to represent triangle auto n_threads = common::SegmentedTrapezoidThreads( d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max()); // get the coordinate in nested summation auto get_i_j = [=]__device__(size_t idx, size_t query_group_idx) { auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; auto thread_group_begin = d_threads_group_ptr[query_group_idx]; auto idx_in_thread_group = idx - thread_group_begin; size_t i, j; common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j); // we use global index among all groups for sorted idx, so i, j should also be global // index. i += data_group_begin; j += data_group_begin; return thrust::make_pair(i, j); }; // NOLINT auto in = dh::MakeTransformIterator<RankScanItem>( thrust::make_counting_iterator(0), [=] __device__(size_t idx) { bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx); auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; if (n_samples < 3) { // at least 3 documents are required. return RankScanItem{idx, 0, 0, query_group_idx}; } size_t i, j; thrust::tie(i, j) = get_i_j(idx, query_group_idx); float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]]; float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]); if (predt > 0) { predt = 1.0; } else if (predt == 0) { predt = 0.5; } else { predt = 0; } predt *= w; return RankScanItem{idx, predt, w, query_group_idx}; }); dh::TemporaryArray<float> d_auc(group_ptr.size() - 1); auto s_d_auc = dh::ToSpan(d_auc); auto out = thrust::make_transform_output_iterator( Discard<RankScanItem>(), [=] __device__(RankScanItem const &item) -> RankScanItem { auto group_id = item.group_id; assert(group_id < d_group_ptr.size()); auto data_group_begin = d_group_ptr[group_id]; size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin; // last item of current group if (item.idx == LastOf(group_id, d_threads_group_ptr)) { if (item.w > 0) { s_d_auc[group_id] = item.predt / item.w; } else { s_d_auc[group_id] = 0; } } return {}; // discard }); dh::InclusiveScan( in, out, [] __device__(RankScanItem const &l, RankScanItem const &r) { if (l.group_id != r.group_id) { return r; } return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id}; }, n_threads); /** * Scale the AUC with number of items in each group. */ float auc = thrust::reduce(thrust::cuda::par(alloc), dh::tbegin(s_d_auc), dh::tend(s_d_auc), 0.0f); return std::make_pair(auc, n_valid); } } // namespace metric } // namespace xgboost
24928f5068cfee20c93e9362c1e77ca3fd87f565.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgesellcmv.cu normal z -> c, Fri Jul 18 17:34:27 2014 */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_c // SELLC SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS __global__ void cgesellcmv_kernel( int num_rows, int num_cols, int blocksize, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; magmaFloatComplex val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLC/SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row (=1) @param alpha magmaFloatComplex scalar multiplier @param d_val magmaFloatComplex* array containing values of A in SELLC/P @param d_colind magma_int_t* columnindices of A in SELLC/P @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar multiplier @param d_y magmaFloatComplex* input/output vector y @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgesellcmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y ){ // the kernel can only handle up to 65535 slices // (~2M rows for blocksize 32) dim3 grid( slices, 1, 1); hipLaunchKernelGGL(( cgesellcmv_kernel), dim3(grid), dim3(blocksize), 0, magma_stream , m, n, blocksize, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); return MAGMA_SUCCESS; }
24928f5068cfee20c93e9362c1e77ca3fd87f565.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgesellcmv.cu normal z -> c, Fri Jul 18 17:34:27 2014 */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_c // SELLC SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS __global__ void cgesellcmv_kernel( int num_rows, int num_cols, int blocksize, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; magmaFloatComplex val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLC/SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row (=1) @param alpha magmaFloatComplex scalar multiplier @param d_val magmaFloatComplex* array containing values of A in SELLC/P @param d_colind magma_int_t* columnindices of A in SELLC/P @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar multiplier @param d_y magmaFloatComplex* input/output vector y @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgesellcmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y ){ // the kernel can only handle up to 65535 slices // (~2M rows for blocksize 32) dim3 grid( slices, 1, 1); cgesellcmv_kernel<<< grid, blocksize, 0, magma_stream >>> ( m, n, blocksize, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); return MAGMA_SUCCESS; }
f1013136176fdd79b507e1245cfa7b575ccc72dc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // forward propogation /* def forwardBackward(X, y, W, v): Z_trans = relu(W@X.T) # mat-mat Z = Z_trans.T # trans yhat = Z@v # mat-vec error = y - yhat grad_v = Z.T @ error # mat-vector grad_Z = np.outer(error, v) # outer product grad_p = dRelu(dZ, Z) grad_W = dp.T @ X # mat-mat */ /* Parameter Setup */ #define N 32 // # of input samples #define D 32 // # of input neurons #define K 32// # of hidden neurons #define STEP 0.001 // learning rate or step size // X: input matrix (n * d) #define X_HEIGHT N #define X_WIDTH D #define X_N X_HEIGHT * X_WIDTH // Z: ifmap matrix (n * k) #define Z_HEIGHT N #define Z_WIDTH K #define Z_N Z_HEIGHT * Z_WIDTH // W: layer 1 weights (k * d) #define W_HEIGHT K #define W_WIDTH D #define W_N W_HEIGHT * W_WIDTH // v: layer 2 weights #define V_HEIGHT K #define V_WIDTH 1 #define V_N V_HEIGHT * V_WIDTH #define BLOCK_SIZE 32 #define LINEAR_BLOCK_SIZE BLOCK_SIZE * BLOCK_SIZE #define MAX_ERR 1e-6 __global__ void matrix_mul_shared(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { // global position in the C (output) matrix int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; // thread position in the block int c_thread = threadIdx.y; int r_thread = threadIdx.x; double sum = 0.0; __shared__ double d_A_sub[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double d_B_sub[BLOCK_SIZE][BLOCK_SIZE]; // iterate over tiles across horizontal direction of A for(int k = 0; k<(d_a_width-1)/BLOCK_SIZE + 1; k++){ // load d_A[rid, k*BLOCK_SIZE+c_thread] into d_A_sub[r_thread][c_thread] if((rid < d_a_height) && (k*BLOCK_SIZE+c_thread < d_a_width)){ d_A_sub[r_thread][c_thread] = d_A[rid*d_a_width + k*BLOCK_SIZE+c_thread]; } else { d_A_sub[r_thread][c_thread] = 0.0; } // load d_B[k*BLOCK_SIZE + r_thread, cid] into d_B_sub[r_thread][c_thread] if((k*BLOCK_SIZE+r_thread < d_a_width) && (cid < d_b_width)){ d_B_sub[r_thread][c_thread] = d_B[(k*BLOCK_SIZE+r_thread)*d_b_width + cid]; } else { d_B_sub[r_thread][c_thread] = 0.0; } __syncthreads(); // dot product within a tile for(int i = 0; i<d_a_width; i++){ sum += d_A_sub[r_thread][i] * d_B_sub[i][c_thread]; } __syncthreads(); } if(rid < d_a_height && cid < d_b_width) d_C[rid * d_b_width + cid] = sum; } __global__ void relu_matrix_mul_shared(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { // global position in the C (output) matrix int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; // thread position in the block int c_thread = threadIdx.y; int r_thread = threadIdx.x; double sum = 0.0; __shared__ double d_A_sub[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double d_B_sub[BLOCK_SIZE][BLOCK_SIZE]; // iterate over tiles across horizontal direction of A for(int k = 0; k<(d_a_width-1)/BLOCK_SIZE + 1; k++){ // load d_A[rid, k*BLOCK_SIZE+c_thread] into d_A_sub[r_thread][c_thread] if((rid < d_a_height) && (k*BLOCK_SIZE+c_thread < d_a_width)){ d_A_sub[r_thread][c_thread] = d_A[rid*d_a_width + k*BLOCK_SIZE+c_thread]; } else { d_A_sub[r_thread][c_thread] = 0.0; } // load d_B[k*BLOCK_SIZE + r_thread, cid] into d_B_sub[r_thread][c_thread] if((k*BLOCK_SIZE+r_thread < d_a_width) && (cid < d_b_width)){ d_B_sub[r_thread][c_thread] = d_B[(k*BLOCK_SIZE+r_thread)*d_b_width + cid]; } else { d_B_sub[r_thread][c_thread] = 0.0; } __syncthreads(); // dot product within a tile for(int i = 0; i<d_a_width; i++){ sum += d_A_sub[r_thread][i] * d_B_sub[i][c_thread]; } __syncthreads(); } if(rid < d_a_height && cid < d_b_width) d_C[rid * d_b_width + cid] = (sum>0)?sum:0; } __global__ void d_relu_matrix_mul_shared(double *d_C, double *d_A, double *d_B, double *d_act, int d_a_height, int d_a_width, int d_b_width) { // global position in the C (output) matrix int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; // thread position in the block int c_thread = threadIdx.y; int r_thread = threadIdx.x; double sum = 0.0; __shared__ double d_A_sub[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double d_B_sub[BLOCK_SIZE][BLOCK_SIZE]; // iterate over tiles across horizontal direction of A for(int k = 0; k<(d_a_width-1)/BLOCK_SIZE + 1; k++){ // load d_A[rid, k*BLOCK_SIZE+c_thread] into d_A_sub[r_thread][c_thread] if((rid < d_a_height) && (k*BLOCK_SIZE+c_thread < d_a_width)){ d_A_sub[r_thread][c_thread] = d_A[rid*d_a_width + k*BLOCK_SIZE+c_thread]; } else { d_A_sub[r_thread][c_thread] = 0.0; } // load d_B[k*BLOCK_SIZE + r_thread, cid] into d_B_sub[r_thread][c_thread] if((k*BLOCK_SIZE+r_thread < d_a_width) && (cid < d_b_width)){ d_B_sub[r_thread][c_thread] = d_B[(k*BLOCK_SIZE+r_thread)*d_b_width + cid]; } else { d_B_sub[r_thread][c_thread] = 0.0; } __syncthreads(); // dot product within a tile for(int i = 0; i<d_a_width; i++){ sum += d_A_sub[r_thread][i] * d_B_sub[i][c_thread]; } __syncthreads(); } if(rid < d_a_height && cid < d_b_width) d_C[rid * d_b_width + cid] = (d_act[rid * d_b_width + cid]>0)?sum:0; } __global__ void matrix_transpose(double *d_out, double *d_in, int d_in_width, int d_out_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(cid < d_in_width && rid < d_out_width){ d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid]; } } __global__ void vector_sub(double *out, double *a, double *b, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = a[tid] - b[tid]; } } __global__ void update(double *d_weights, double *d_grads, double step, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ d_weights[tid] -= step * d_grads[tid]; } } __global__ void square(double *out, double *in, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = in[tid] * in[tid]; } } int main(){ // forward variables double *h_X, *h_W, *h_Z, *h_Z_T, *h_v, *h_yhat, *h_y; double *d_X, *d_X_T, *d_W, *d_Z, *d_Z_T, *d_v, *d_yhat, *d_y; // backward variables double *h_error, *h_grad_v, *h_grad_Z, *h_grad_p_T, *h_grad_W, *h_err_sq; double *d_error, *d_grad_v, *d_grad_Z, *d_grad_p_T, *d_grad_W, *d_err_sq; // double *h_ref; // compute verified results // Allocate host memory h_X = (double*)malloc(sizeof(double) * X_N); h_W = (double*)malloc(sizeof(double) * W_N); h_v = (double*)malloc(sizeof(double) * V_N); h_Z_T = (double*)malloc(sizeof(double) * Z_N); h_Z = (double*)malloc(sizeof(double) * Z_N); h_yhat = (double*)malloc(sizeof(double) * N); h_y = (double*)malloc(sizeof(double) * N); h_error = (double*)malloc(sizeof(double) * N); h_grad_v = (double*)malloc(sizeof(double) * V_N); h_grad_Z = (double*)malloc(sizeof(double) * Z_N); h_grad_p_T = (double*)malloc(sizeof(double) * Z_N); h_grad_W = (double*)malloc(sizeof(double) * W_N); h_err_sq = (double*)malloc(sizeof(double) * N); // h_ref = (double*)malloc(sizeof(double) * N); // Initialize host arrays /*** TEST 1 ***/ /* for(int i = 0; i < X_N; i++){ if(i == 1 || i == 3){ h_X[i] = (double)(-i-1); } else{ h_X[i] = (double)(i+1); } } for(int i = 0; i < W_N; i++){ h_W[i] = double(i+1); } for(int i = 0; i < V_HEIGHT; i++){ h_v[i] = (double)(i+1); } for(int i = 0; i < N; i++){ h_y[i] = (double)(i+1); } */ /*** TEST 2 ***/ srand((unsigned int)time(NULL)); // random uniform from [-a, a] double a = 1.0; for (int i = 0; i< X_N; i++){ h_X[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } for (int i = 0; i< W_N; i++){ h_W[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } for (int i = 0; i< V_N; i++){ h_v[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } for (int i = 0; i< N; i++){ h_y[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } // Allocate device memory hipMalloc((void**)&d_X, sizeof(double) * X_N); hipMalloc((void**)&d_X_T, sizeof(double) * X_N); hipMalloc((void**)&d_Z, sizeof(double) * Z_N); hipMalloc((void**)&d_Z_T, sizeof(double) * Z_N); hipMalloc((void**)&d_W, sizeof(double) * W_N); hipMalloc((void**)&d_v, sizeof(double) * V_N); hipMalloc((void**)&d_yhat, sizeof(double) * N); hipMalloc((void**)&d_y, sizeof(double) * N); hipMalloc((void**)&d_error, sizeof(double) * N); hipMalloc((void**)&d_grad_v, sizeof(double) * V_N); hipMalloc((void**)&d_grad_Z, sizeof(double) * Z_N); hipMalloc((void**)&d_grad_p_T, sizeof(double) * Z_N); hipMalloc((void**)&d_grad_W, sizeof(double) * W_N); hipMalloc((void**)&d_err_sq, sizeof(double) * N); // Transfer data from host to device memory hipMemcpy(d_X, h_X, sizeof(double) * X_N, hipMemcpyHostToDevice); hipMemcpy(d_W, h_W, sizeof(double) * W_N, hipMemcpyHostToDevice); hipMemcpy(d_v, h_v, sizeof(double) * V_N, hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, sizeof(double) * N, hipMemcpyHostToDevice); int iters = 20; for (int i = 0; i < iters; i++){ // Executing kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // X_HEIGHT (N) corresponding to OUT_WIDTH, X_WIDTH (D) corresponding to IN_WIDTH dim3 dimGrid1(N / BLOCK_SIZE + 1,D / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_transpose), dim3(dimGrid1),dim3(dimBlock), 0, 0, d_X_T, d_X, D, N); dim3 dimGrid2(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); hipLaunchKernelGGL(( relu_matrix_mul_shared), dim3(dimGrid2),dim3(dimBlock), 0, 0, d_Z_T, d_W, d_X_T, K, D, N); dim3 dimGrid3(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_transpose), dim3(dimGrid3),dim3(dimBlock), 0, 0, d_Z, d_Z_T, N, K); dim3 dimGrid4(N / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_mul_shared), dim3(dimGrid4),dim3(dimBlock), 0, 0, d_yhat, d_Z, d_v, N, K, 1); // backwards: hipLaunchKernelGGL(( vector_sub), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_error, d_yhat, d_y, N); dim3 dimGrid5(K / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_mul_shared), dim3(dimGrid5),dim3(dimBlock), 0, 0, d_grad_v, d_Z_T, d_error, K, N, 1); dim3 dimGrid6(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); hipLaunchKernelGGL(( d_relu_matrix_mul_shared), dim3(dimGrid6),dim3(dimBlock), 0, 0, d_grad_Z, d_error, d_v, d_Z, N, 1, K); dim3 dimGrid7(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_transpose), dim3(dimGrid7),dim3(dimBlock), 0, 0, d_grad_p_T, d_grad_Z, K, N); dim3 dimGrid8(K / BLOCK_SIZE + 1, D / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_mul_shared), dim3(dimGrid8),dim3(dimBlock), 0, 0, d_grad_W, d_grad_p_T, d_X, K, N, D); // update hipLaunchKernelGGL(( update), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_W, d_grad_W, (STEP/N), W_N); hipLaunchKernelGGL(( update), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_v, d_grad_v, (STEP/N), V_N); // hipMemcpy(h_W, d_W, sizeof(double) * W_N, hipMemcpyDeviceToHost); // hipMemcpy(h_v, d_v, sizeof(double) * V_N, hipMemcpyDeviceToHost); // get MSE back hipLaunchKernelGGL(( square), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_err_sq, d_error, N); hipMemcpy(h_err_sq, d_err_sq, sizeof(double) * N, hipMemcpyDeviceToHost); double sum = 0.0; for(int i = 0; i < N; i++){ sum += h_err_sq[i]; } printf("MSE is %f\n", sum / N); } // Verification /* for(int i = 0; i < K; i++){ for(int j = 0; j < D; j++){ // double sum = 0.0; // for(int k = 0; k < A_WIDTH; k++){ // sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j]; // } // h_ref[i * C_WIDTH + j] = sum; // assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR); printf("h_W[%d][%d] = %f\n", i, j, h_W[i * D + j]); // printf("h_Z[%d][%d] = %f\n", i, j, h_Z[i * K + j]); // printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]); } } for(int i = 0; i < K; i++){ printf("h_v[%d] = %f\n", i, h_v[i]); } */ printf("PASSED\n"); // Deallocate device memory hipFree(d_X); hipFree(d_X_T); hipFree(d_W); hipFree(d_v); hipFree(d_Z); hipFree(d_Z_T); hipFree(d_yhat); hipFree(d_y); hipFree(d_error); hipFree(d_grad_v); hipFree(d_grad_Z); hipFree(d_grad_p_T); hipFree(d_grad_W); hipFree(d_err_sq); // Deallocate host memory free(h_X); free(h_W); free(h_v); free(h_Z); free(h_Z_T); free(h_yhat); free(h_y); free(h_error); free(h_grad_v); free(h_grad_Z); free(h_grad_p_T); free(h_grad_W); free(h_err_sq); }
f1013136176fdd79b507e1245cfa7b575ccc72dc.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> // forward propogation /* def forwardBackward(X, y, W, v): Z_trans = relu(W@X.T) # mat-mat Z = Z_trans.T # trans yhat = Z@v # mat-vec error = y - yhat grad_v = Z.T @ error # mat-vector grad_Z = np.outer(error, v) # outer product grad_p = dRelu(dZ, Z) grad_W = dp.T @ X # mat-mat */ /* Parameter Setup */ #define N 32 // # of input samples #define D 32 // # of input neurons #define K 32// # of hidden neurons #define STEP 0.001 // learning rate or step size // X: input matrix (n * d) #define X_HEIGHT N #define X_WIDTH D #define X_N X_HEIGHT * X_WIDTH // Z: ifmap matrix (n * k) #define Z_HEIGHT N #define Z_WIDTH K #define Z_N Z_HEIGHT * Z_WIDTH // W: layer 1 weights (k * d) #define W_HEIGHT K #define W_WIDTH D #define W_N W_HEIGHT * W_WIDTH // v: layer 2 weights #define V_HEIGHT K #define V_WIDTH 1 #define V_N V_HEIGHT * V_WIDTH #define BLOCK_SIZE 32 #define LINEAR_BLOCK_SIZE BLOCK_SIZE * BLOCK_SIZE #define MAX_ERR 1e-6 __global__ void matrix_mul_shared(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { // global position in the C (output) matrix int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; // thread position in the block int c_thread = threadIdx.y; int r_thread = threadIdx.x; double sum = 0.0; __shared__ double d_A_sub[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double d_B_sub[BLOCK_SIZE][BLOCK_SIZE]; // iterate over tiles across horizontal direction of A for(int k = 0; k<(d_a_width-1)/BLOCK_SIZE + 1; k++){ // load d_A[rid, k*BLOCK_SIZE+c_thread] into d_A_sub[r_thread][c_thread] if((rid < d_a_height) && (k*BLOCK_SIZE+c_thread < d_a_width)){ d_A_sub[r_thread][c_thread] = d_A[rid*d_a_width + k*BLOCK_SIZE+c_thread]; } else { d_A_sub[r_thread][c_thread] = 0.0; } // load d_B[k*BLOCK_SIZE + r_thread, cid] into d_B_sub[r_thread][c_thread] if((k*BLOCK_SIZE+r_thread < d_a_width) && (cid < d_b_width)){ d_B_sub[r_thread][c_thread] = d_B[(k*BLOCK_SIZE+r_thread)*d_b_width + cid]; } else { d_B_sub[r_thread][c_thread] = 0.0; } __syncthreads(); // dot product within a tile for(int i = 0; i<d_a_width; i++){ sum += d_A_sub[r_thread][i] * d_B_sub[i][c_thread]; } __syncthreads(); } if(rid < d_a_height && cid < d_b_width) d_C[rid * d_b_width + cid] = sum; } __global__ void relu_matrix_mul_shared(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { // global position in the C (output) matrix int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; // thread position in the block int c_thread = threadIdx.y; int r_thread = threadIdx.x; double sum = 0.0; __shared__ double d_A_sub[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double d_B_sub[BLOCK_SIZE][BLOCK_SIZE]; // iterate over tiles across horizontal direction of A for(int k = 0; k<(d_a_width-1)/BLOCK_SIZE + 1; k++){ // load d_A[rid, k*BLOCK_SIZE+c_thread] into d_A_sub[r_thread][c_thread] if((rid < d_a_height) && (k*BLOCK_SIZE+c_thread < d_a_width)){ d_A_sub[r_thread][c_thread] = d_A[rid*d_a_width + k*BLOCK_SIZE+c_thread]; } else { d_A_sub[r_thread][c_thread] = 0.0; } // load d_B[k*BLOCK_SIZE + r_thread, cid] into d_B_sub[r_thread][c_thread] if((k*BLOCK_SIZE+r_thread < d_a_width) && (cid < d_b_width)){ d_B_sub[r_thread][c_thread] = d_B[(k*BLOCK_SIZE+r_thread)*d_b_width + cid]; } else { d_B_sub[r_thread][c_thread] = 0.0; } __syncthreads(); // dot product within a tile for(int i = 0; i<d_a_width; i++){ sum += d_A_sub[r_thread][i] * d_B_sub[i][c_thread]; } __syncthreads(); } if(rid < d_a_height && cid < d_b_width) d_C[rid * d_b_width + cid] = (sum>0)?sum:0; } __global__ void d_relu_matrix_mul_shared(double *d_C, double *d_A, double *d_B, double *d_act, int d_a_height, int d_a_width, int d_b_width) { // global position in the C (output) matrix int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; // thread position in the block int c_thread = threadIdx.y; int r_thread = threadIdx.x; double sum = 0.0; __shared__ double d_A_sub[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double d_B_sub[BLOCK_SIZE][BLOCK_SIZE]; // iterate over tiles across horizontal direction of A for(int k = 0; k<(d_a_width-1)/BLOCK_SIZE + 1; k++){ // load d_A[rid, k*BLOCK_SIZE+c_thread] into d_A_sub[r_thread][c_thread] if((rid < d_a_height) && (k*BLOCK_SIZE+c_thread < d_a_width)){ d_A_sub[r_thread][c_thread] = d_A[rid*d_a_width + k*BLOCK_SIZE+c_thread]; } else { d_A_sub[r_thread][c_thread] = 0.0; } // load d_B[k*BLOCK_SIZE + r_thread, cid] into d_B_sub[r_thread][c_thread] if((k*BLOCK_SIZE+r_thread < d_a_width) && (cid < d_b_width)){ d_B_sub[r_thread][c_thread] = d_B[(k*BLOCK_SIZE+r_thread)*d_b_width + cid]; } else { d_B_sub[r_thread][c_thread] = 0.0; } __syncthreads(); // dot product within a tile for(int i = 0; i<d_a_width; i++){ sum += d_A_sub[r_thread][i] * d_B_sub[i][c_thread]; } __syncthreads(); } if(rid < d_a_height && cid < d_b_width) d_C[rid * d_b_width + cid] = (d_act[rid * d_b_width + cid]>0)?sum:0; } __global__ void matrix_transpose(double *d_out, double *d_in, int d_in_width, int d_out_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(cid < d_in_width && rid < d_out_width){ d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid]; } } __global__ void vector_sub(double *out, double *a, double *b, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = a[tid] - b[tid]; } } __global__ void update(double *d_weights, double *d_grads, double step, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ d_weights[tid] -= step * d_grads[tid]; } } __global__ void square(double *out, double *in, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = in[tid] * in[tid]; } } int main(){ // forward variables double *h_X, *h_W, *h_Z, *h_Z_T, *h_v, *h_yhat, *h_y; double *d_X, *d_X_T, *d_W, *d_Z, *d_Z_T, *d_v, *d_yhat, *d_y; // backward variables double *h_error, *h_grad_v, *h_grad_Z, *h_grad_p_T, *h_grad_W, *h_err_sq; double *d_error, *d_grad_v, *d_grad_Z, *d_grad_p_T, *d_grad_W, *d_err_sq; // double *h_ref; // compute verified results // Allocate host memory h_X = (double*)malloc(sizeof(double) * X_N); h_W = (double*)malloc(sizeof(double) * W_N); h_v = (double*)malloc(sizeof(double) * V_N); h_Z_T = (double*)malloc(sizeof(double) * Z_N); h_Z = (double*)malloc(sizeof(double) * Z_N); h_yhat = (double*)malloc(sizeof(double) * N); h_y = (double*)malloc(sizeof(double) * N); h_error = (double*)malloc(sizeof(double) * N); h_grad_v = (double*)malloc(sizeof(double) * V_N); h_grad_Z = (double*)malloc(sizeof(double) * Z_N); h_grad_p_T = (double*)malloc(sizeof(double) * Z_N); h_grad_W = (double*)malloc(sizeof(double) * W_N); h_err_sq = (double*)malloc(sizeof(double) * N); // h_ref = (double*)malloc(sizeof(double) * N); // Initialize host arrays /*** TEST 1 ***/ /* for(int i = 0; i < X_N; i++){ if(i == 1 || i == 3){ h_X[i] = (double)(-i-1); } else{ h_X[i] = (double)(i+1); } } for(int i = 0; i < W_N; i++){ h_W[i] = double(i+1); } for(int i = 0; i < V_HEIGHT; i++){ h_v[i] = (double)(i+1); } for(int i = 0; i < N; i++){ h_y[i] = (double)(i+1); } */ /*** TEST 2 ***/ srand((unsigned int)time(NULL)); // random uniform from [-a, a] double a = 1.0; for (int i = 0; i< X_N; i++){ h_X[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } for (int i = 0; i< W_N; i++){ h_W[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } for (int i = 0; i< V_N; i++){ h_v[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } for (int i = 0; i< N; i++){ h_y[i] = -a + (double)rand()/(double)(RAND_MAX)*a; } // Allocate device memory cudaMalloc((void**)&d_X, sizeof(double) * X_N); cudaMalloc((void**)&d_X_T, sizeof(double) * X_N); cudaMalloc((void**)&d_Z, sizeof(double) * Z_N); cudaMalloc((void**)&d_Z_T, sizeof(double) * Z_N); cudaMalloc((void**)&d_W, sizeof(double) * W_N); cudaMalloc((void**)&d_v, sizeof(double) * V_N); cudaMalloc((void**)&d_yhat, sizeof(double) * N); cudaMalloc((void**)&d_y, sizeof(double) * N); cudaMalloc((void**)&d_error, sizeof(double) * N); cudaMalloc((void**)&d_grad_v, sizeof(double) * V_N); cudaMalloc((void**)&d_grad_Z, sizeof(double) * Z_N); cudaMalloc((void**)&d_grad_p_T, sizeof(double) * Z_N); cudaMalloc((void**)&d_grad_W, sizeof(double) * W_N); cudaMalloc((void**)&d_err_sq, sizeof(double) * N); // Transfer data from host to device memory cudaMemcpy(d_X, h_X, sizeof(double) * X_N, cudaMemcpyHostToDevice); cudaMemcpy(d_W, h_W, sizeof(double) * W_N, cudaMemcpyHostToDevice); cudaMemcpy(d_v, h_v, sizeof(double) * V_N, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, sizeof(double) * N, cudaMemcpyHostToDevice); int iters = 20; for (int i = 0; i < iters; i++){ // Executing kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // X_HEIGHT (N) corresponding to OUT_WIDTH, X_WIDTH (D) corresponding to IN_WIDTH dim3 dimGrid1(N / BLOCK_SIZE + 1,D / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid1,dimBlock>>>(d_X_T, d_X, D, N); dim3 dimGrid2(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); relu_matrix_mul_shared<<<dimGrid2,dimBlock>>>(d_Z_T, d_W, d_X_T, K, D, N); dim3 dimGrid3(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid3,dimBlock>>>(d_Z, d_Z_T, N, K); dim3 dimGrid4(N / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); matrix_mul_shared<<<dimGrid4,dimBlock>>>(d_yhat, d_Z, d_v, N, K, 1); // backwards: vector_sub<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_error, d_yhat, d_y, N); dim3 dimGrid5(K / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); matrix_mul_shared<<<dimGrid5,dimBlock>>>(d_grad_v, d_Z_T, d_error, K, N, 1); dim3 dimGrid6(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); d_relu_matrix_mul_shared<<<dimGrid6,dimBlock>>>(d_grad_Z, d_error, d_v, d_Z, N, 1, K); dim3 dimGrid7(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid7,dimBlock>>>(d_grad_p_T, d_grad_Z, K, N); dim3 dimGrid8(K / BLOCK_SIZE + 1, D / BLOCK_SIZE + 1); matrix_mul_shared<<<dimGrid8,dimBlock>>>(d_grad_W, d_grad_p_T, d_X, K, N, D); // update update<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_W, d_grad_W, (STEP/N), W_N); update<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_v, d_grad_v, (STEP/N), V_N); // cudaMemcpy(h_W, d_W, sizeof(double) * W_N, cudaMemcpyDeviceToHost); // cudaMemcpy(h_v, d_v, sizeof(double) * V_N, cudaMemcpyDeviceToHost); // get MSE back square<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_err_sq, d_error, N); cudaMemcpy(h_err_sq, d_err_sq, sizeof(double) * N, cudaMemcpyDeviceToHost); double sum = 0.0; for(int i = 0; i < N; i++){ sum += h_err_sq[i]; } printf("MSE is %f\n", sum / N); } // Verification /* for(int i = 0; i < K; i++){ for(int j = 0; j < D; j++){ // double sum = 0.0; // for(int k = 0; k < A_WIDTH; k++){ // sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j]; // } // h_ref[i * C_WIDTH + j] = sum; // assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR); printf("h_W[%d][%d] = %f\n", i, j, h_W[i * D + j]); // printf("h_Z[%d][%d] = %f\n", i, j, h_Z[i * K + j]); // printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]); } } for(int i = 0; i < K; i++){ printf("h_v[%d] = %f\n", i, h_v[i]); } */ printf("PASSED\n"); // Deallocate device memory cudaFree(d_X); cudaFree(d_X_T); cudaFree(d_W); cudaFree(d_v); cudaFree(d_Z); cudaFree(d_Z_T); cudaFree(d_yhat); cudaFree(d_y); cudaFree(d_error); cudaFree(d_grad_v); cudaFree(d_grad_Z); cudaFree(d_grad_p_T); cudaFree(d_grad_W); cudaFree(d_err_sq); // Deallocate host memory free(h_X); free(h_W); free(h_v); free(h_Z); free(h_Z_T); free(h_yhat); free(h_y); free(h_error); free(h_grad_v); free(h_grad_Z); free(h_grad_p_T); free(h_grad_W); free(h_err_sq); }
bb171580a88e1221e1fe8556295d1107c0d30949.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define NUM_BLOCKS 1 #define BLOCK_WIDTH 512 __global__ void hello() { printf("Hello world! I'm thread %d\n", threadIdx.x); } int main(int argc,char **argv) { // launch the kernel hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, ); // force the printf()s to flush hipDeviceSynchronize(); printf("That's all!\n"); return 0; }
bb171580a88e1221e1fe8556295d1107c0d30949.cu
#include <stdio.h> #define NUM_BLOCKS 1 #define BLOCK_WIDTH 512 __global__ void hello() { printf("Hello world! I'm thread %d\n", threadIdx.x); } int main(int argc,char **argv) { // launch the kernel hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush cudaDeviceSynchronize(); printf("That's all!\n"); return 0; }
cacf2e74c64d0044f5462992b8b5d5d3df348f21.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <sys/timeb.h> #include <math.h> #include <hip/hip_runtime.h> // nbre de threads dans une dimension (on travaille en 1D) #define NBTHREADS 1024 // Pour la generation aleatoire des valeurs #define MAX_VAL 10 #define MIN_VAL 0 void vecAleatoire(int *v, int n); void vecAff(int *v, int n); // voisin dans la dimension d du processeur p unsigned int voisin(unsigned int p, unsigned int d); // calcul de la somme d'un vecteur sur CPU // pour les tests int somCPU(int *t, int n); // calcul de la somme sur l'hypercube (ne fonctionne que pour un vecteur comportant moins de NBTHREADS elements !!!) __global__ void somHypercubeKernel(int* d_t, int d, int total); // calcul de la somme sur l'hypercube dans la dimension dc (suppose que toutes les dimensions inferieures ont ete calculees) __global__ void somHypercubeUneDimensionKernel(int* d_t, int d, int dc, int total); // fonction qui appel les noyaux float somHypercube(int* h_t, int d); int GPUInfo(); int main(int argc, char* argv[]){ int dim=3, n; int *tab; int somme; float ms; // pour mesurer le temps sur CPU struct timeb tav, tap ; double te; // recuperation des parametres en ligne de commandes if (argc==2) dim= strtol(argv[1], NULL, 10); // allocation et initialisation du tableau // pour calculer le max de n=2^dim valeurs, nous avons besoin d'un vecteur de taille 2*n n=(int)pow(2,dim); // l'occupation memoire du vecteur en Mo float tailleMo=sizeof(int)*n/float(512*1024); tab=(int*)malloc(sizeof(int)*2*n); vecAleatoire(tab,2*n); // la partie droite du tableau est egale a 0 for(int i=0;i<n;i++) tab[n+i]=0; // quel GPU ? GPUInfo(); // calcul de la somme sur CPU ftime(&tav); somme=somCPU(tab,n); ftime(&tap); te = (double)((tap.time*1000+tap.millitm)-(tav.time*1000+tav.millitm))/1000 ; // affichage du tableau /* vecAff(tab,2*n); printf("\n"); */ // calcul de la somme sur GPU printf("----\nHypercube de dimension %d, soit %d valeurs dans le vecteur (%f Mo).\n", dim, n, tailleMo); printf("Temps d'execution sur CPU : %f ms.\n",te); ms=somHypercube(tab,dim); printf("Temps d'execution sur GPU : %f ms.\n",ms); // le resultat peut etre a gauche ou a droite, en fonction de la parite de dim printf("SommeCPU : %d, sommeGPU : %d (ecart GPU : %d)\n",somme, tab[n*(dim%2)], tab[n*(dim%2)+n-1]-tab[n*(dim%2)]); // affichage du tableau resultat // vecAff(tab,2*n); } // calcul la somme de tous les elements contenus dans le vecteur h_t float somHypercube(int* h_t, int d){ int n = (int)pow(2,d); // la taille du vecteur est de 2*n elements long size = 2*n*sizeof(int); int nbBlocs; int *d_t; // pour mesurer le temps en cuda hipEvent_t start, stop; float milliseconds = 0; hipEventCreate(&start); hipEventCreate(&stop); // allocations du vecteur printf("Allocation de %ld octets (%f Mo) sur le GPU.\n",size,(float)size/1024/1024); if (hipMalloc((void **) &d_t, size)!=hipSuccess) { printf ("Pb allocation !!!\n"); exit(1); } // copies hote vers device hipMemcpy(d_t, h_t, size, hipMemcpyHostToDevice); // le calcul sur GPU nbBlocs=(n-1)/NBTHREADS+1; printf("Appel du noyau <<<%d blocs, %d>>>.\n", nbBlocs, NBTHREADS); // 2 cas de figures : (1) un seul bloc ou (2) plus d'un blocs hipEventRecord(start); if(nbBlocs==1)hipLaunchKernelGGL(( somHypercubeKernel), dim3(nbBlocs),dim3(NBTHREADS), 0, 0, d_t, d, n); else { // on appelle le noyau pour chaque dimension // afin de s'assurer que tous les blocs soient resolus avant de passer a la dimension suivante for(int i=0;i<d;i++) { // printf("somHypercubeUneDimensionKernel<<<%d,%d>>>(d_t,%d,%d,%d)\n",nbBlocs,NBTHREADS,d,i,n); hipLaunchKernelGGL(( somHypercubeUneDimensionKernel), dim3(nbBlocs),dim3(NBTHREADS), 0, 0, d_t, d, i, n); // attente de la fin du noyau dans la dimension courante hipDeviceSynchronize(); } } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); // copie device vers hote hipMemcpy(h_t, d_t, size, hipMemcpyDeviceToHost); // liberation de la memoire hipFree(d_t); return milliseconds; } // calcul de la somme sur l'hypercube (ne fonctionne que pour un vecteur comportant moins de NBTHREADS elements !!!) __global__ void somHypercubeKernel(int* d_t, int d, int total){ int p,voisin; int val; p=threadIdx.x+blockDim.x*blockIdx.x; // attention pour eviter le conflit d'ecriture, la longueur de d_t est egale a 2*total // suivant la parite de i, on utilise la partie gauche ou droite du tableau pour la lecture et inversement pour l'ecriture if (p<total) { for(int i=0;i<d;i++){ voisin=p^(((unsigned int)1)<<i); val=d_t[total*(i%2)+voisin]; d_t[total*((i+1)%2)+p]=d_t[total*(i%2)+p]+val; __syncthreads(); } } } // calcul de la somme sur l'hypercube dans la dimension dc (suppose que toutes les dimensions inferieures ont ete calculees) // ce noyau devrait etre appele d fois depuis l'hote !!! __global__ void somHypercubeUneDimensionKernel(int* d_t, int d, int i, int total) { int p,voisin; int val; p=threadIdx.x+blockDim.x*blockIdx.x; // attention pour eviter le conflit d'ecriture, la longueur de d_t est egale a 2*total // suivant la parite de i, on utilise la partie gauche ou droite du tableau pour la lecture et inversement pour l'ecriture if (p<total) { // voisin=p^(((unsigned int)1)<<i); voisin=p^(1<<i); val=d_t[total*(i%2)+voisin]; d_t[total*((i+1)%2)+p]=d_t[total*(i%2)+p]+val; __syncthreads(); } } // rappel sur les fonctions C pour manipuler les bits directement // https://zestedesavoir.com/tutoriels/755/le-langage-c-1/notions-avancees/manipulation-des-bits/ unsigned int voisin(unsigned int p, unsigned int d) { // ou exclusif entre le numero du processeur // et le bit a 1 en position d return (p^(((unsigned int)1)<<d)); } // calcul de la somme d'un vecteur sur CPU int somCPU(int *t, int n) { int somme=0; for(int i=0;i<n;i++) somme+=t[i]; return somme; } // Initialisation aleatoire d'un vecteur void vecAleatoire(int *v, int n) { int i; for(i=0;i<n;i++){ v[i]= (int)((double)rand()/RAND_MAX*MAX_VAL) + MIN_VAL; } } // Affiche un vecteur void vecAff(int *v, int n){ int i; printf("["); for(i=0;i<n-1;i++) printf("%d ",v[i]); printf("%f]",v[n-1]); } int GPUInfo(){ int deviceCount; hipGetDeviceCount(&deviceCount); for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA\n"); printf("Device %d, name: %s\n", dev, deviceProp.name); printf("Computational Capabilities: %d.%d\n", deviceProp.major, deviceProp.minor); printf("Maximum global memory size: %ld bytes\n", deviceProp.totalGlobalMem); printf("Maximum shared memory size per block: %ld bytes\n", deviceProp.sharedMemPerBlock); printf("Warp size: %d\n", deviceProp.warpSize); printf("Maximum number of blocks per multiProcessor: %d\n",deviceProp.maxBlocksPerMultiProcessor); printf("Maximum number of threads per multiProcessor: %d\n",deviceProp.maxThreadsPerMultiProcessor); printf("Maximum grid size : %d x %d x %d blocks.\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); } else { printf("There are %d devices supporting CUDA\n", deviceCount); } } } return deviceCount; }
cacf2e74c64d0044f5462992b8b5d5d3df348f21.cu
#include <stdio.h> #include <sys/timeb.h> #include <math.h> #include <cuda.h> // nbre de threads dans une dimension (on travaille en 1D) #define NBTHREADS 1024 // Pour la generation aleatoire des valeurs #define MAX_VAL 10 #define MIN_VAL 0 void vecAleatoire(int *v, int n); void vecAff(int *v, int n); // voisin dans la dimension d du processeur p unsigned int voisin(unsigned int p, unsigned int d); // calcul de la somme d'un vecteur sur CPU // pour les tests int somCPU(int *t, int n); // calcul de la somme sur l'hypercube (ne fonctionne que pour un vecteur comportant moins de NBTHREADS elements !!!) __global__ void somHypercubeKernel(int* d_t, int d, int total); // calcul de la somme sur l'hypercube dans la dimension dc (suppose que toutes les dimensions inferieures ont ete calculees) __global__ void somHypercubeUneDimensionKernel(int* d_t, int d, int dc, int total); // fonction qui appel les noyaux float somHypercube(int* h_t, int d); int GPUInfo(); int main(int argc, char* argv[]){ int dim=3, n; int *tab; int somme; float ms; // pour mesurer le temps sur CPU struct timeb tav, tap ; double te; // recuperation des parametres en ligne de commandes if (argc==2) dim= strtol(argv[1], NULL, 10); // allocation et initialisation du tableau // pour calculer le max de n=2^dim valeurs, nous avons besoin d'un vecteur de taille 2*n n=(int)pow(2,dim); // l'occupation memoire du vecteur en Mo float tailleMo=sizeof(int)*n/float(512*1024); tab=(int*)malloc(sizeof(int)*2*n); vecAleatoire(tab,2*n); // la partie droite du tableau est egale a 0 for(int i=0;i<n;i++) tab[n+i]=0; // quel GPU ? GPUInfo(); // calcul de la somme sur CPU ftime(&tav); somme=somCPU(tab,n); ftime(&tap); te = (double)((tap.time*1000+tap.millitm)-(tav.time*1000+tav.millitm))/1000 ; // affichage du tableau /* vecAff(tab,2*n); printf("\n"); */ // calcul de la somme sur GPU printf("----\nHypercube de dimension %d, soit %d valeurs dans le vecteur (%f Mo).\n", dim, n, tailleMo); printf("Temps d'execution sur CPU : %f ms.\n",te); ms=somHypercube(tab,dim); printf("Temps d'execution sur GPU : %f ms.\n",ms); // le resultat peut etre a gauche ou a droite, en fonction de la parite de dim printf("SommeCPU : %d, sommeGPU : %d (ecart GPU : %d)\n",somme, tab[n*(dim%2)], tab[n*(dim%2)+n-1]-tab[n*(dim%2)]); // affichage du tableau resultat // vecAff(tab,2*n); } // calcul la somme de tous les elements contenus dans le vecteur h_t float somHypercube(int* h_t, int d){ int n = (int)pow(2,d); // la taille du vecteur est de 2*n elements long size = 2*n*sizeof(int); int nbBlocs; int *d_t; // pour mesurer le temps en cuda cudaEvent_t start, stop; float milliseconds = 0; cudaEventCreate(&start); cudaEventCreate(&stop); // allocations du vecteur printf("Allocation de %ld octets (%f Mo) sur le GPU.\n",size,(float)size/1024/1024); if (cudaMalloc((void **) &d_t, size)!=cudaSuccess) { printf ("Pb allocation !!!\n"); exit(1); } // copies hote vers device cudaMemcpy(d_t, h_t, size, cudaMemcpyHostToDevice); // le calcul sur GPU nbBlocs=(n-1)/NBTHREADS+1; printf("Appel du noyau <<<%d blocs, %d>>>.\n", nbBlocs, NBTHREADS); // 2 cas de figures : (1) un seul bloc ou (2) plus d'un blocs cudaEventRecord(start); if(nbBlocs==1) somHypercubeKernel<<<nbBlocs,NBTHREADS>>>(d_t, d, n); else { // on appelle le noyau pour chaque dimension // afin de s'assurer que tous les blocs soient resolus avant de passer a la dimension suivante for(int i=0;i<d;i++) { // printf("somHypercubeUneDimensionKernel<<<%d,%d>>>(d_t,%d,%d,%d)\n",nbBlocs,NBTHREADS,d,i,n); somHypercubeUneDimensionKernel<<<nbBlocs,NBTHREADS>>>(d_t, d, i, n); // attente de la fin du noyau dans la dimension courante cudaDeviceSynchronize(); } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); // copie device vers hote cudaMemcpy(h_t, d_t, size, cudaMemcpyDeviceToHost); // liberation de la memoire cudaFree(d_t); return milliseconds; } // calcul de la somme sur l'hypercube (ne fonctionne que pour un vecteur comportant moins de NBTHREADS elements !!!) __global__ void somHypercubeKernel(int* d_t, int d, int total){ int p,voisin; int val; p=threadIdx.x+blockDim.x*blockIdx.x; // attention pour eviter le conflit d'ecriture, la longueur de d_t est egale a 2*total // suivant la parite de i, on utilise la partie gauche ou droite du tableau pour la lecture et inversement pour l'ecriture if (p<total) { for(int i=0;i<d;i++){ voisin=p^(((unsigned int)1)<<i); val=d_t[total*(i%2)+voisin]; d_t[total*((i+1)%2)+p]=d_t[total*(i%2)+p]+val; __syncthreads(); } } } // calcul de la somme sur l'hypercube dans la dimension dc (suppose que toutes les dimensions inferieures ont ete calculees) // ce noyau devrait etre appele d fois depuis l'hote !!! __global__ void somHypercubeUneDimensionKernel(int* d_t, int d, int i, int total) { int p,voisin; int val; p=threadIdx.x+blockDim.x*blockIdx.x; // attention pour eviter le conflit d'ecriture, la longueur de d_t est egale a 2*total // suivant la parite de i, on utilise la partie gauche ou droite du tableau pour la lecture et inversement pour l'ecriture if (p<total) { // voisin=p^(((unsigned int)1)<<i); voisin=p^(1<<i); val=d_t[total*(i%2)+voisin]; d_t[total*((i+1)%2)+p]=d_t[total*(i%2)+p]+val; __syncthreads(); } } // rappel sur les fonctions C pour manipuler les bits directement // https://zestedesavoir.com/tutoriels/755/le-langage-c-1/notions-avancees/manipulation-des-bits/ unsigned int voisin(unsigned int p, unsigned int d) { // ou exclusif entre le numero du processeur // et le bit a 1 en position d return (p^(((unsigned int)1)<<d)); } // calcul de la somme d'un vecteur sur CPU int somCPU(int *t, int n) { int somme=0; for(int i=0;i<n;i++) somme+=t[i]; return somme; } // Initialisation aleatoire d'un vecteur void vecAleatoire(int *v, int n) { int i; for(i=0;i<n;i++){ v[i]= (int)((double)rand()/RAND_MAX*MAX_VAL) + MIN_VAL; } } // Affiche un vecteur void vecAff(int *v, int n){ int i; printf("["); for(i=0;i<n-1;i++) printf("%d ",v[i]); printf("%f]",v[n-1]); } int GPUInfo(){ int deviceCount; cudaGetDeviceCount(&deviceCount); for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA\n"); printf("Device %d, name: %s\n", dev, deviceProp.name); printf("Computational Capabilities: %d.%d\n", deviceProp.major, deviceProp.minor); printf("Maximum global memory size: %ld bytes\n", deviceProp.totalGlobalMem); printf("Maximum shared memory size per block: %ld bytes\n", deviceProp.sharedMemPerBlock); printf("Warp size: %d\n", deviceProp.warpSize); printf("Maximum number of blocks per multiProcessor: %d\n",deviceProp.maxBlocksPerMultiProcessor); printf("Maximum number of threads per multiProcessor: %d\n",deviceProp.maxThreadsPerMultiProcessor); printf("Maximum grid size : %d x %d x %d blocks.\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); } else { printf("There are %d devices supporting CUDA\n", deviceCount); } } } return deviceCount; }
6fc500fcdf6f5fb77085eefc73e3497c20cdfe63.hip
// !!! This is a file automatically generated by hipify!!! // // Created by Fabio Lipreri on 2019-11-20. // #include "SigmoidLayer.h" #include "../utils/cudamath.h" using namespace std; SigmoidLayer::SigmoidLayer(std::string name) { this->name = name; } SigmoidLayer::~SigmoidLayer() {} Matrix &SigmoidLayer::forward(hipblasHandle_t handle, Matrix &A) { this->Z = A; Res.allocate_size(A.getX(), A.getY()); gpu_sigmoid_forward(this->Z.getDevData().get(), this->Res.getDevData().get(), this->Res.getX(), this->Res.getY()); // TODO: togliere stampa /*Res.cpyDevToHost(); cout << "Y signmoid"<< endl; cout << Res << endl;*/ return Res; } Matrix &SigmoidLayer::backward(hipblasHandle_t handle, Matrix &top_diff, float learning_rate) { Matrix sigmoid_res; sigmoid_res.allocate_size(this->Res.getX(), this->Res.getY()); /*top_diff.cpyDevToHost(); cout << "top_diff" << endl; cout << top_diff << endl;*/ gpu_sigmoid_backward(this->Res.getDevData().get(), sigmoid_res.getDevData().get(), sigmoid_res.getX(), sigmoid_res.getY()); this->dZ.allocate_size(top_diff.getX(), this->Res.getY()); gpu_blas_mmul(handle, top_diff.getDevData().get(), HIPBLAS_OP_N, sigmoid_res.getDevData().get(), HIPBLAS_OP_N, this->dZ.getDevData().get(), top_diff.getX(), sigmoid_res.getY(), top_diff.getY(), learning_rate); return dZ; } std::string SigmoidLayer::getName() { return Layer::getName(); }
6fc500fcdf6f5fb77085eefc73e3497c20cdfe63.cu
// // Created by Fabio Lipreri on 2019-11-20. // #include "SigmoidLayer.h" #include "../utils/cudamath.h" using namespace std; SigmoidLayer::SigmoidLayer(std::string name) { this->name = name; } SigmoidLayer::~SigmoidLayer() {} Matrix &SigmoidLayer::forward(cublasHandle_t handle, Matrix &A) { this->Z = A; Res.allocate_size(A.getX(), A.getY()); gpu_sigmoid_forward(this->Z.getDevData().get(), this->Res.getDevData().get(), this->Res.getX(), this->Res.getY()); // TODO: togliere stampa /*Res.cpyDevToHost(); cout << "Y signmoid"<< endl; cout << Res << endl;*/ return Res; } Matrix &SigmoidLayer::backward(cublasHandle_t handle, Matrix &top_diff, float learning_rate) { Matrix sigmoid_res; sigmoid_res.allocate_size(this->Res.getX(), this->Res.getY()); /*top_diff.cpyDevToHost(); cout << "top_diff" << endl; cout << top_diff << endl;*/ gpu_sigmoid_backward(this->Res.getDevData().get(), sigmoid_res.getDevData().get(), sigmoid_res.getX(), sigmoid_res.getY()); this->dZ.allocate_size(top_diff.getX(), this->Res.getY()); gpu_blas_mmul(handle, top_diff.getDevData().get(), CUBLAS_OP_N, sigmoid_res.getDevData().get(), CUBLAS_OP_N, this->dZ.getDevData().get(), top_diff.getX(), sigmoid_res.getY(), top_diff.getY(), learning_rate); return dZ; } std::string SigmoidLayer::getName() { return Layer::getName(); }
151631e5d2e188b28c3e0133974afaac1130b0fa.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include "dev_array.h" #include <math.h> #include <random> #include <algorithm> #include<string> #include <fstream> #include <sstream> using namespace std; vector<string> split(const string &s, char delim) { stringstream ss(s); string item; vector<string> tokens; while (getline(ss, item, delim)) { tokens.push_back(item); } return tokens; } vector <float> operator/(const vector <float>& m2, const float m1) { /* Returns the product of a float and a vectors (elementwise multiplication). Inputs: m1: float m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m2.size(); vector <float> product(VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i) { product[i] = m2[i] / m1; }; return product; } __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int K_Width, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; float Pvalue = 0; for (int k = 0; k < K_Width; k++) { Pvalue += A[Row*K_Width + k] * B[k*Col_Size + Col]; } C[Row*Col_Size + Col] = Pvalue; } __global__ void reluKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (Input[index] < 0) { Output[index] = 0.0; } else { Output[index] = Input[index]; } } __global__ void reluPrimeKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (Input[index] <= 0) { Output[index] = 0.0; } else { Output[index] = 1.0; } } __global__ void sigmoidKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = 1 / (1 + expf(-Input[index])); } __global__ void sigmoid_dKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = Input[index] * (1 - Input[index]); } __global__ void matrixAddKernel(float *A, float *B, float *C, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; C[Row*Col_Size + Col] = A[Row*Col_Size + Col] + B[Row*Col_Size + Col]; } __global__ void matrixMinusKernel(float *A, float *B, float *C, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; C[Row*Col_Size + Col] = A[Row*Col_Size + Col] - B[Row*Col_Size + Col]; } __global__ void matrixProductKernel(float *A, float *B, float *C, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; C[Row*Col_Size + Col] = A[Row*Col_Size + Col] * B[Row*Col_Size + Col]; } __global__ void matrixValueProductKernel(float *Input, float *Output, float value) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = Input[index] * value; } __global__ void matrixValueDivideKernel(float *Input, float *Output, float value) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = Input[index] / value; } __global__ void matrixTransposeKernel(float *Input, float *Output, int Row_Size, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; Output[Col*Row_Size + Row] = Input[Row*Col_Size + Col]; } __global__ void sofmaxKernel(float *Input, float *Output) { int index = threadIdx.x; float foo[10]; float max = Input[index * 10]; for (int i = 0; i < 10; i++) { foo[i] = Input[index * 10 + i]; if (foo[i] > max) { max = foo[i]; } } for (int i = 0;i<10; i++) { foo[i] = expf(foo[i] - max); } float sum = 0.0; for (int j = 0; j < 10;j++) { sum = sum + foo[j]; } for (int j = 0;j < 10; j++) { Output[index * 10 + j] = foo[j] / sum; } } ///////////////////////////////////////////////////////////////////////////////// void matrixMultiplication(float *A, float *B, float *C, int Row_Size, int Col_Size, int K_Width) { if (Col_Size == 10) { dim3 threadsPerBlock(10, 16); dim3 blocksPerGrid(Col_Size / 10, Row_Size / 16); matrixMultiplicationKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, K_Width, Col_Size); } else { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixMultiplicationKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, K_Width, Col_Size); } } void relu(float *Input, float *Output, int size) { hipLaunchKernelGGL(( reluKernel) , size/16, 16>> > (Input,Output); } float* reluPrime(float *Input, int size) { float *Output = NULL; hipMalloc(&Output, size * sizeof(float)); reluPrimeKernel, size/16, 16, 0, 0, 0, Input,Output); return Output; } void sigmoid(float *Input, float *Output, int size) { sigmoidKernel << < size / 16, 16 >> > (Input,Output); } void sigmoid_d(float *Input, float *Output, int size) { sigmoid_dKernel<<< size / 16, 16 >> > (Input, Output); } void matrixAdd(float *A, float *B, float *C, int Row_Size, int Col_Size) { dim3 threadsPerBlock(16,16); dim3 blocksPerGrid(Col_Size / 16,Row_Size / 16); matrixAddKernel << < blocksPerGrid, threadsPerBlock >> > (A, B, C, Col_Size); } void matrixMinus(float *A, float *B, float *C, int Row_Size, int Col_Size) { if (Col_Size == 10) { dim3 threadsPerBlock(10, 16); dim3 blocksPerGrid(Col_Size / 10, Row_Size / 16); matrixMinusKernel << < blocksPerGrid, threadsPerBlock >> > (A, B, C, Col_Size); } else { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixMinusKernel << < blocksPerGrid, threadsPerBlock >> > (A, B, C, Col_Size); } } void matrixProduct(float *A, float *B, float *C, int Row_Size, int Col_Size) { dim3 threadsPerBlock(16,16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixProductKernel << < blocksPerGrid, threadsPerBlock>> > (A,B,C,Col_Size); } float* matrixValueProduct(float *Input, int size, float value) { float *Output = NULL; hipMalloc(&Output, size*sizeof(float)); matrixValueProductKernel<< < size / 16, 16 >> > (Input, Output, value); return Output; } void matrixValueDivide(float *Input, float *Output, int size, float value) { matrixValueDivideKernel << < size / 16, 16 >> > (Input, Output, value); } void matrixTranspose(float *Input, float *Output, int Row_Size, int Col_Size) { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixTransposeKernel << < blocksPerGrid, threadsPerBlock >> > (Input,Output,Row_Size,Col_Size); } float* matrixTranspose_secondv(float *Input, int Row_Size, int Col_Size) { if (Col_Size == 10) { dim3 threadsPerBlock(10, 16); dim3 blocksPerGrid(Col_Size / 10, Row_Size / 16); float *Output = NULL; hipMalloc(&Output, Row_Size * Col_Size * sizeof(float)); matrixTransposeKernel << < blocksPerGrid, threadsPerBlock >> > (Input, Output, Row_Size, Col_Size); return Output; } else { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); float *Output = NULL; hipMalloc(&Output, Row_Size * Col_Size * sizeof(float)); matrixTransposeKernel << < blocksPerGrid, threadsPerBlock >> > (Input, Output, Row_Size, Col_Size); return Output; } } void softmax(float *Input, float *Output,int size) { sofmaxKernel << <1,size / 10 >> > (Input, Output); } ///////////////////////////////////////////////////////////////////// float* test_generate(float *M, size_t size, float num) { M = (float *)malloc(size * sizeof(float)); for (int i = 0; i < size; i++) { M[i] = num; } return M; } float* random_generate(float *M, size_t size) { M = (float *)malloc(size * sizeof(float)); random_device rd; mt19937 gen(rd()); uniform_real_distribution<> distribution(0.0, 0.05); static default_random_engine generator; generate(M, M + size, [&]() { return distribution(generator); }); return M; } void print_value(float *M, int Row_Size, int Col_Size) { for (int i = 0; i < Row_Size; i++) { for (int j = 0; j < Col_Size; j++) { printf("%f ",M[i*Col_Size + j]); } printf("\n\n"); } } float compute_accuracy(float *prediction,float *ground_truth, int Row_Size, int Col_Size) { float correct = 0; for (int i = 0;i < Row_Size;i++) { int index1 = distance(&prediction[i*Col_Size], max_element(&prediction[i*Col_Size], &prediction[i*Col_Size] + Col_Size)); int index2 = distance(&ground_truth[i*Col_Size], max_element(&ground_truth[i*Col_Size], &ground_truth[i*Col_Size] + Col_Size)); if (index1 == index2) correct += 1; } return correct / Row_Size; } int main(int argc, char *argv[]) { // generate W1,W2,W3 //matrixMultiplication string line; vector<string> line_v; cout << "Loading data ...\n"; vector<float> X_train; vector<float> y_train; ifstream myfile("./train.txt"); if (myfile.is_open()) { while (getline(myfile, line)) { line_v = split(line, '\t'); int digit = strtof((line_v[0]).c_str(), 0); for (unsigned i = 0; i < 10; ++i) { if (i == digit) { y_train.push_back(1.); } else y_train.push_back(0.); } int size = static_cast<int>(line_v.size()); for (unsigned i = 1; i < size; ++i) { X_train.push_back(strtof((line_v[i]).c_str(), 0)); } } X_train = X_train / 255.0; } else cout << "Unable to open file" << '\n'; cout << X_train.size(); myfile.close(); int BATCH_SIZE = 256; float lr = .01 / BATCH_SIZE; // Random initialization of the weights float *W1 = NULL, *W2 = NULL, *W3 = NULL, *b_x = NULL, *b_y = NULL; float *a1 = NULL, *a2 = NULL, *yhat = NULL, *dyhat = NULL; float *dw3 = NULL, *dz2 = NULL , *dw2 = NULL, *dz1 = NULL, *dw1=NULL; float *temp; // forward variable W1 = random_generate(W1, 784*128); W2 = random_generate(W2, 128*64); W3 = random_generate(W3, 64*10); b_x = (float *)malloc(BATCH_SIZE * 784 * sizeof(float)); b_y = (float *)malloc(BATCH_SIZE * 10 * sizeof(float)); a1 = (float *)malloc(BATCH_SIZE * 128 * sizeof(float)); a2 = (float *)malloc(BATCH_SIZE * 64 * sizeof(float)); yhat = (float *)malloc(BATCH_SIZE * 10 * sizeof(float)); // dyhat = (float *)malloc(BATCH_SIZE * 10 * sizeof(float)); dw3 = (float *)malloc(64 * 10 * sizeof(float)); dz2 = (float *)malloc(256 * 64 * sizeof(float)); dw2 = (float *)malloc(128 * 64 * sizeof(float)); dz1 = (float *)malloc(256 * 128 * sizeof(float)); dw1 = (float *)malloc(784 * 128 * sizeof(float)); dev_array W1_d(784, 128); dev_array W2_d(128, 64); dev_array W3_d(64, 10); dev_array b_x_d(BATCH_SIZE , 784); dev_array b_y_d(BATCH_SIZE , 10); dev_array a1_d(BATCH_SIZE, 128); dev_array a2_d(BATCH_SIZE, 64); dev_array yhat_d(BATCH_SIZE, 10); dev_array dyhat_d(BATCH_SIZE, 10); dev_array dw3_d(64, 10); dev_array dz2_d(256, 64); dev_array dw2_d(128 ,64); dev_array dz1_d(256, 128); dev_array dw1_d(784, 128); W1_d.set(W1); W2_d.set(W2); W3_d.set(W3); b_x_d.set(b_x); b_y_d.set(b_y); a1_d.set(a1); a2_d.set(a2); yhat_d.set(yhat); dyhat_d.set(dyhat); dw3_d.set(dw3); dz2_d.set(dz2); dw2_d.set(dw2); dz1_d.set(dz1); dw1_d.set(dw1); cout << "Training the model ...\n"; for (unsigned i = 0 ; i < 32000; i++) { int randindx = rand() % (37904 - BATCH_SIZE); copy(X_train.begin()+ randindx * 784, X_train.begin() + (randindx+ BATCH_SIZE)*784, b_x); hipMemcpy(b_x_d.getData(), b_x, BATCH_SIZE * 784 * sizeof(float) ,hipMemcpyHostToDevice); copy(y_train.begin() + randindx * 10, y_train.begin() + (randindx + BATCH_SIZE) * 10, b_y); hipMemcpy(b_y_d.getData(), b_y, BATCH_SIZE * 10 * sizeof(float), hipMemcpyHostToDevice); matrixMultiplication(b_x_d.getData(), W1_d.getData(), a1_d.getData(), b_x_d.getRowSize(), W1_d.getColSize(), b_x_d.getColSize()); relu(a1_d.getData(), a1_d.getData(), 256 * 128); matrixMultiplication(a1_d.getData(), W2_d.getData(), a2_d.getData(), 256, 64, 128); relu(a2_d.getData(), a2_d.getData(), 256 * 64); matrixMultiplication(a2_d.getData(), W3_d.getData(), yhat_d.getData(), 256, 10, 64); softmax(yhat_d.getData(), yhat_d.getData(), 256*10); //yhat_d.get(yhat); // Back propagation matrixMinus(yhat_d.getData(), b_y_d.getData(), dyhat_d.getData(), 256, 10); temp = matrixTranspose_secondv(a2_d.getData(), BATCH_SIZE, 64); matrixMultiplication(temp, dyhat_d.getData(), dw3_d.getData(), 64, 10, 256); hipFree(temp); temp = matrixTranspose_secondv(W3_d.getData(), 64, 10); matrixMultiplication(dyhat_d.getData(), temp, dz2_d.getData(),256,64,10); hipFree(temp); temp = reluPrime(a2_d.getData(), a2_d.getSize()); matrixProduct(dz2_d.getData(), reluPrime(a2_d.getData(),a2_d.getSize()) , dz2_d.getData(), 256, 64); hipFree(temp); temp = matrixTranspose_secondv(W2_d.getData(), 128, 64); matrixMultiplication(dz2_d.getData(), temp, dz1_d.getData(), 256,128,64); hipFree(temp); temp = reluPrime(a1_d.getData(), a1_d.getSize()); matrixProduct(dz1_d.getData(), reluPrime(a1_d.getData(), a1_d.getSize()), dz1_d.getData(), 256,128); hipFree(temp); temp = matrixTranspose_secondv(b_x_d.getData(), 256, 784); matrixMultiplication(temp,dz1_d.getData(), dw1_d.getData(),784,128,256); hipFree(temp); temp = matrixValueProduct(dw3_d.getData(), dw3_d.getSize(), lr); matrixMinus(W3_d.getData(), temp, W3_d.getData(), 64, 10); hipFree(temp); temp = matrixValueProduct(dw2_d.getData(), dw2_d.getSize(), lr); matrixMinus(W2_d.getData(), temp, W2_d.getData(), 128, 64); hipFree(temp); temp = matrixValueProduct(dw1_d.getData(), dw1_d.getSize(), lr); matrixMinus(W1_d.getData(), temp, W1_d.getData(), 784, 128); hipFree(temp); if ((i + 1) % 100 == 0) { cout << "-----------------------------------------------Epoch " << i + 1 << "--------------------------------------------------" << "\n"; //cout << "Predictions:" << "\n"; yhat_d.get(yhat); //print_value(yhat, 10, 10); //cout << "Ground truth:" << "\n"; b_y_d.get(b_y); //print_value(b_y, 10, 10); cout << compute_accuracy(yhat, b_y, 256, 10) << endl;; } } cout << endl; cout << "Testing the model ...\n"; // testing for (int i = 0; i < 16; i++) { int randindx = 37096 + i * 256; copy(X_train.begin() + randindx * 784, X_train.begin() + (randindx + BATCH_SIZE) * 784, b_x); hipMemcpy(b_x_d.getData(), b_x, BATCH_SIZE * 784 * sizeof(float), hipMemcpyHostToDevice); copy(y_train.begin() + randindx * 10, y_train.begin() + (randindx + BATCH_SIZE) * 10, b_y); hipMemcpy(b_y_d.getData(), b_y, BATCH_SIZE * 10 * sizeof(float), hipMemcpyHostToDevice); matrixMultiplication(b_x_d.getData(), W1_d.getData(), a1_d.getData(), b_x_d.getRowSize(), W1_d.getColSize(), b_x_d.getColSize()); relu(a1_d.getData(), a1_d.getData(), 256 * 128); matrixMultiplication(a1_d.getData(), W2_d.getData(), a2_d.getData(), 256, 64, 128); relu(a2_d.getData(), a2_d.getData(), 256 * 64); matrixMultiplication(a2_d.getData(), W3_d.getData(), yhat_d.getData(), 256, 10, 64); softmax(yhat_d.getData(), yhat_d.getData(), 256 * 10); //yhat_d.get(yhat); // Back propagation matrixMinus(yhat_d.getData(), b_y_d.getData(), dyhat_d.getData(), 256, 10); temp = matrixTranspose_secondv(a2_d.getData(), BATCH_SIZE, 64); matrixMultiplication(temp, dyhat_d.getData(), dw3_d.getData(), 64, 10, 256); hipFree(temp); temp = matrixTranspose_secondv(W3_d.getData(), 64, 10); matrixMultiplication(dyhat_d.getData(), temp, dz2_d.getData(), 256, 64, 10); hipFree(temp); temp = reluPrime(a2_d.getData(), a2_d.getSize()); matrixProduct(dz2_d.getData(), reluPrime(a2_d.getData(), a2_d.getSize()), dz2_d.getData(), 256, 64); hipFree(temp); temp = matrixTranspose_secondv(W2_d.getData(), 128, 64); matrixMultiplication(dz2_d.getData(), temp, dz1_d.getData(), 256, 128, 64); hipFree(temp); temp = reluPrime(a1_d.getData(), a1_d.getSize()); matrixProduct(dz1_d.getData(), reluPrime(a1_d.getData(), a1_d.getSize()), dz1_d.getData(), 256, 128); hipFree(temp); temp = matrixTranspose_secondv(b_x_d.getData(), 256, 784); matrixMultiplication(temp, dz1_d.getData(), dw1_d.getData(), 784, 128, 256); hipFree(temp); temp = matrixValueProduct(dw3_d.getData(), dw3_d.getSize(), lr); matrixMinus(W3_d.getData(), temp, W3_d.getData(), 64, 10); hipFree(temp); temp = matrixValueProduct(dw2_d.getData(), dw2_d.getSize(), lr); matrixMinus(W2_d.getData(), temp, W2_d.getData(), 128, 64); hipFree(temp); temp = matrixValueProduct(dw1_d.getData(), dw1_d.getSize(), lr); matrixMinus(W1_d.getData(), temp, W1_d.getData(), 784, 128); hipFree(temp); cout << "--------------------------------------------testing batch " << i + 1 << "--------------------------------------------------" << "\n"; yhat_d.get(yhat); b_y_d.get(b_y); cout << compute_accuracy(yhat, b_y, 256, 10) << endl;; } }
151631e5d2e188b28c3e0133974afaac1130b0fa.cu
#include <iostream> #include <vector> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> #include "dev_array.h" #include <math.h> #include <random> #include <algorithm> #include<string> #include <fstream> #include <sstream> using namespace std; vector<string> split(const string &s, char delim) { stringstream ss(s); string item; vector<string> tokens; while (getline(ss, item, delim)) { tokens.push_back(item); } return tokens; } vector <float> operator/(const vector <float>& m2, const float m1) { /* Returns the product of a float and a vectors (elementwise multiplication). Inputs: m1: float m2: vector Output: vector, m1 * m2, product of two vectors m1 and m2 */ const unsigned long VECTOR_SIZE = m2.size(); vector <float> product(VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i) { product[i] = m2[i] / m1; }; return product; } __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int K_Width, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; float Pvalue = 0; for (int k = 0; k < K_Width; k++) { Pvalue += A[Row*K_Width + k] * B[k*Col_Size + Col]; } C[Row*Col_Size + Col] = Pvalue; } __global__ void reluKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (Input[index] < 0) { Output[index] = 0.0; } else { Output[index] = Input[index]; } } __global__ void reluPrimeKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (Input[index] <= 0) { Output[index] = 0.0; } else { Output[index] = 1.0; } } __global__ void sigmoidKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = 1 / (1 + expf(-Input[index])); } __global__ void sigmoid_dKernel(float *Input, float *Output) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = Input[index] * (1 - Input[index]); } __global__ void matrixAddKernel(float *A, float *B, float *C, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; C[Row*Col_Size + Col] = A[Row*Col_Size + Col] + B[Row*Col_Size + Col]; } __global__ void matrixMinusKernel(float *A, float *B, float *C, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; C[Row*Col_Size + Col] = A[Row*Col_Size + Col] - B[Row*Col_Size + Col]; } __global__ void matrixProductKernel(float *A, float *B, float *C, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; C[Row*Col_Size + Col] = A[Row*Col_Size + Col] * B[Row*Col_Size + Col]; } __global__ void matrixValueProductKernel(float *Input, float *Output, float value) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = Input[index] * value; } __global__ void matrixValueDivideKernel(float *Input, float *Output, float value) { int index = blockIdx.x * blockDim.x + threadIdx.x; Output[index] = Input[index] / value; } __global__ void matrixTransposeKernel(float *Input, float *Output, int Row_Size, int Col_Size) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; Output[Col*Row_Size + Row] = Input[Row*Col_Size + Col]; } __global__ void sofmaxKernel(float *Input, float *Output) { int index = threadIdx.x; float foo[10]; float max = Input[index * 10]; for (int i = 0; i < 10; i++) { foo[i] = Input[index * 10 + i]; if (foo[i] > max) { max = foo[i]; } } for (int i = 0;i<10; i++) { foo[i] = expf(foo[i] - max); } float sum = 0.0; for (int j = 0; j < 10;j++) { sum = sum + foo[j]; } for (int j = 0;j < 10; j++) { Output[index * 10 + j] = foo[j] / sum; } } ///////////////////////////////////////////////////////////////////////////////// void matrixMultiplication(float *A, float *B, float *C, int Row_Size, int Col_Size, int K_Width) { if (Col_Size == 10) { dim3 threadsPerBlock(10, 16); dim3 blocksPerGrid(Col_Size / 10, Row_Size / 16); matrixMultiplicationKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, K_Width, Col_Size); } else { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixMultiplicationKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, K_Width, Col_Size); } } void relu(float *Input, float *Output, int size) { reluKernel <<< size/16, 16>> > (Input,Output); } float* reluPrime(float *Input, int size) { float *Output = NULL; cudaMalloc(&Output, size * sizeof(float)); reluPrimeKernel<<< size/16, 16>>>(Input,Output); return Output; } void sigmoid(float *Input, float *Output, int size) { sigmoidKernel << < size / 16, 16 >> > (Input,Output); } void sigmoid_d(float *Input, float *Output, int size) { sigmoid_dKernel<<< size / 16, 16 >> > (Input, Output); } void matrixAdd(float *A, float *B, float *C, int Row_Size, int Col_Size) { dim3 threadsPerBlock(16,16); dim3 blocksPerGrid(Col_Size / 16,Row_Size / 16); matrixAddKernel << < blocksPerGrid, threadsPerBlock >> > (A, B, C, Col_Size); } void matrixMinus(float *A, float *B, float *C, int Row_Size, int Col_Size) { if (Col_Size == 10) { dim3 threadsPerBlock(10, 16); dim3 blocksPerGrid(Col_Size / 10, Row_Size / 16); matrixMinusKernel << < blocksPerGrid, threadsPerBlock >> > (A, B, C, Col_Size); } else { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixMinusKernel << < blocksPerGrid, threadsPerBlock >> > (A, B, C, Col_Size); } } void matrixProduct(float *A, float *B, float *C, int Row_Size, int Col_Size) { dim3 threadsPerBlock(16,16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixProductKernel << < blocksPerGrid, threadsPerBlock>> > (A,B,C,Col_Size); } float* matrixValueProduct(float *Input, int size, float value) { float *Output = NULL; cudaMalloc(&Output, size*sizeof(float)); matrixValueProductKernel<< < size / 16, 16 >> > (Input, Output, value); return Output; } void matrixValueDivide(float *Input, float *Output, int size, float value) { matrixValueDivideKernel << < size / 16, 16 >> > (Input, Output, value); } void matrixTranspose(float *Input, float *Output, int Row_Size, int Col_Size) { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); matrixTransposeKernel << < blocksPerGrid, threadsPerBlock >> > (Input,Output,Row_Size,Col_Size); } float* matrixTranspose_secondv(float *Input, int Row_Size, int Col_Size) { if (Col_Size == 10) { dim3 threadsPerBlock(10, 16); dim3 blocksPerGrid(Col_Size / 10, Row_Size / 16); float *Output = NULL; cudaMalloc(&Output, Row_Size * Col_Size * sizeof(float)); matrixTransposeKernel << < blocksPerGrid, threadsPerBlock >> > (Input, Output, Row_Size, Col_Size); return Output; } else { dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid(Col_Size / 16, Row_Size / 16); float *Output = NULL; cudaMalloc(&Output, Row_Size * Col_Size * sizeof(float)); matrixTransposeKernel << < blocksPerGrid, threadsPerBlock >> > (Input, Output, Row_Size, Col_Size); return Output; } } void softmax(float *Input, float *Output,int size) { sofmaxKernel << <1,size / 10 >> > (Input, Output); } ///////////////////////////////////////////////////////////////////// float* test_generate(float *M, size_t size, float num) { M = (float *)malloc(size * sizeof(float)); for (int i = 0; i < size; i++) { M[i] = num; } return M; } float* random_generate(float *M, size_t size) { M = (float *)malloc(size * sizeof(float)); random_device rd; mt19937 gen(rd()); uniform_real_distribution<> distribution(0.0, 0.05); static default_random_engine generator; generate(M, M + size, [&]() { return distribution(generator); }); return M; } void print_value(float *M, int Row_Size, int Col_Size) { for (int i = 0; i < Row_Size; i++) { for (int j = 0; j < Col_Size; j++) { printf("%f ",M[i*Col_Size + j]); } printf("\n\n"); } } float compute_accuracy(float *prediction,float *ground_truth, int Row_Size, int Col_Size) { float correct = 0; for (int i = 0;i < Row_Size;i++) { int index1 = distance(&prediction[i*Col_Size], max_element(&prediction[i*Col_Size], &prediction[i*Col_Size] + Col_Size)); int index2 = distance(&ground_truth[i*Col_Size], max_element(&ground_truth[i*Col_Size], &ground_truth[i*Col_Size] + Col_Size)); if (index1 == index2) correct += 1; } return correct / Row_Size; } int main(int argc, char *argv[]) { // generate W1,W2,W3 //matrixMultiplication string line; vector<string> line_v; cout << "Loading data ...\n"; vector<float> X_train; vector<float> y_train; ifstream myfile("./train.txt"); if (myfile.is_open()) { while (getline(myfile, line)) { line_v = split(line, '\t'); int digit = strtof((line_v[0]).c_str(), 0); for (unsigned i = 0; i < 10; ++i) { if (i == digit) { y_train.push_back(1.); } else y_train.push_back(0.); } int size = static_cast<int>(line_v.size()); for (unsigned i = 1; i < size; ++i) { X_train.push_back(strtof((line_v[i]).c_str(), 0)); } } X_train = X_train / 255.0; } else cout << "Unable to open file" << '\n'; cout << X_train.size(); myfile.close(); int BATCH_SIZE = 256; float lr = .01 / BATCH_SIZE; // Random initialization of the weights float *W1 = NULL, *W2 = NULL, *W3 = NULL, *b_x = NULL, *b_y = NULL; float *a1 = NULL, *a2 = NULL, *yhat = NULL, *dyhat = NULL; float *dw3 = NULL, *dz2 = NULL , *dw2 = NULL, *dz1 = NULL, *dw1=NULL; float *temp; // forward variable W1 = random_generate(W1, 784*128); W2 = random_generate(W2, 128*64); W3 = random_generate(W3, 64*10); b_x = (float *)malloc(BATCH_SIZE * 784 * sizeof(float)); b_y = (float *)malloc(BATCH_SIZE * 10 * sizeof(float)); a1 = (float *)malloc(BATCH_SIZE * 128 * sizeof(float)); a2 = (float *)malloc(BATCH_SIZE * 64 * sizeof(float)); yhat = (float *)malloc(BATCH_SIZE * 10 * sizeof(float)); // dyhat = (float *)malloc(BATCH_SIZE * 10 * sizeof(float)); dw3 = (float *)malloc(64 * 10 * sizeof(float)); dz2 = (float *)malloc(256 * 64 * sizeof(float)); dw2 = (float *)malloc(128 * 64 * sizeof(float)); dz1 = (float *)malloc(256 * 128 * sizeof(float)); dw1 = (float *)malloc(784 * 128 * sizeof(float)); dev_array W1_d(784, 128); dev_array W2_d(128, 64); dev_array W3_d(64, 10); dev_array b_x_d(BATCH_SIZE , 784); dev_array b_y_d(BATCH_SIZE , 10); dev_array a1_d(BATCH_SIZE, 128); dev_array a2_d(BATCH_SIZE, 64); dev_array yhat_d(BATCH_SIZE, 10); dev_array dyhat_d(BATCH_SIZE, 10); dev_array dw3_d(64, 10); dev_array dz2_d(256, 64); dev_array dw2_d(128 ,64); dev_array dz1_d(256, 128); dev_array dw1_d(784, 128); W1_d.set(W1); W2_d.set(W2); W3_d.set(W3); b_x_d.set(b_x); b_y_d.set(b_y); a1_d.set(a1); a2_d.set(a2); yhat_d.set(yhat); dyhat_d.set(dyhat); dw3_d.set(dw3); dz2_d.set(dz2); dw2_d.set(dw2); dz1_d.set(dz1); dw1_d.set(dw1); cout << "Training the model ...\n"; for (unsigned i = 0 ; i < 32000; i++) { int randindx = rand() % (37904 - BATCH_SIZE); copy(X_train.begin()+ randindx * 784, X_train.begin() + (randindx+ BATCH_SIZE)*784, b_x); cudaMemcpy(b_x_d.getData(), b_x, BATCH_SIZE * 784 * sizeof(float) ,cudaMemcpyHostToDevice); copy(y_train.begin() + randindx * 10, y_train.begin() + (randindx + BATCH_SIZE) * 10, b_y); cudaMemcpy(b_y_d.getData(), b_y, BATCH_SIZE * 10 * sizeof(float), cudaMemcpyHostToDevice); matrixMultiplication(b_x_d.getData(), W1_d.getData(), a1_d.getData(), b_x_d.getRowSize(), W1_d.getColSize(), b_x_d.getColSize()); relu(a1_d.getData(), a1_d.getData(), 256 * 128); matrixMultiplication(a1_d.getData(), W2_d.getData(), a2_d.getData(), 256, 64, 128); relu(a2_d.getData(), a2_d.getData(), 256 * 64); matrixMultiplication(a2_d.getData(), W3_d.getData(), yhat_d.getData(), 256, 10, 64); softmax(yhat_d.getData(), yhat_d.getData(), 256*10); //yhat_d.get(yhat); // Back propagation matrixMinus(yhat_d.getData(), b_y_d.getData(), dyhat_d.getData(), 256, 10); temp = matrixTranspose_secondv(a2_d.getData(), BATCH_SIZE, 64); matrixMultiplication(temp, dyhat_d.getData(), dw3_d.getData(), 64, 10, 256); cudaFree(temp); temp = matrixTranspose_secondv(W3_d.getData(), 64, 10); matrixMultiplication(dyhat_d.getData(), temp, dz2_d.getData(),256,64,10); cudaFree(temp); temp = reluPrime(a2_d.getData(), a2_d.getSize()); matrixProduct(dz2_d.getData(), reluPrime(a2_d.getData(),a2_d.getSize()) , dz2_d.getData(), 256, 64); cudaFree(temp); temp = matrixTranspose_secondv(W2_d.getData(), 128, 64); matrixMultiplication(dz2_d.getData(), temp, dz1_d.getData(), 256,128,64); cudaFree(temp); temp = reluPrime(a1_d.getData(), a1_d.getSize()); matrixProduct(dz1_d.getData(), reluPrime(a1_d.getData(), a1_d.getSize()), dz1_d.getData(), 256,128); cudaFree(temp); temp = matrixTranspose_secondv(b_x_d.getData(), 256, 784); matrixMultiplication(temp,dz1_d.getData(), dw1_d.getData(),784,128,256); cudaFree(temp); temp = matrixValueProduct(dw3_d.getData(), dw3_d.getSize(), lr); matrixMinus(W3_d.getData(), temp, W3_d.getData(), 64, 10); cudaFree(temp); temp = matrixValueProduct(dw2_d.getData(), dw2_d.getSize(), lr); matrixMinus(W2_d.getData(), temp, W2_d.getData(), 128, 64); cudaFree(temp); temp = matrixValueProduct(dw1_d.getData(), dw1_d.getSize(), lr); matrixMinus(W1_d.getData(), temp, W1_d.getData(), 784, 128); cudaFree(temp); if ((i + 1) % 100 == 0) { cout << "-----------------------------------------------Epoch " << i + 1 << "--------------------------------------------------" << "\n"; //cout << "Predictions:" << "\n"; yhat_d.get(yhat); //print_value(yhat, 10, 10); //cout << "Ground truth:" << "\n"; b_y_d.get(b_y); //print_value(b_y, 10, 10); cout << compute_accuracy(yhat, b_y, 256, 10) << endl;; } } cout << endl; cout << "Testing the model ...\n"; // testing for (int i = 0; i < 16; i++) { int randindx = 37096 + i * 256; copy(X_train.begin() + randindx * 784, X_train.begin() + (randindx + BATCH_SIZE) * 784, b_x); cudaMemcpy(b_x_d.getData(), b_x, BATCH_SIZE * 784 * sizeof(float), cudaMemcpyHostToDevice); copy(y_train.begin() + randindx * 10, y_train.begin() + (randindx + BATCH_SIZE) * 10, b_y); cudaMemcpy(b_y_d.getData(), b_y, BATCH_SIZE * 10 * sizeof(float), cudaMemcpyHostToDevice); matrixMultiplication(b_x_d.getData(), W1_d.getData(), a1_d.getData(), b_x_d.getRowSize(), W1_d.getColSize(), b_x_d.getColSize()); relu(a1_d.getData(), a1_d.getData(), 256 * 128); matrixMultiplication(a1_d.getData(), W2_d.getData(), a2_d.getData(), 256, 64, 128); relu(a2_d.getData(), a2_d.getData(), 256 * 64); matrixMultiplication(a2_d.getData(), W3_d.getData(), yhat_d.getData(), 256, 10, 64); softmax(yhat_d.getData(), yhat_d.getData(), 256 * 10); //yhat_d.get(yhat); // Back propagation matrixMinus(yhat_d.getData(), b_y_d.getData(), dyhat_d.getData(), 256, 10); temp = matrixTranspose_secondv(a2_d.getData(), BATCH_SIZE, 64); matrixMultiplication(temp, dyhat_d.getData(), dw3_d.getData(), 64, 10, 256); cudaFree(temp); temp = matrixTranspose_secondv(W3_d.getData(), 64, 10); matrixMultiplication(dyhat_d.getData(), temp, dz2_d.getData(), 256, 64, 10); cudaFree(temp); temp = reluPrime(a2_d.getData(), a2_d.getSize()); matrixProduct(dz2_d.getData(), reluPrime(a2_d.getData(), a2_d.getSize()), dz2_d.getData(), 256, 64); cudaFree(temp); temp = matrixTranspose_secondv(W2_d.getData(), 128, 64); matrixMultiplication(dz2_d.getData(), temp, dz1_d.getData(), 256, 128, 64); cudaFree(temp); temp = reluPrime(a1_d.getData(), a1_d.getSize()); matrixProduct(dz1_d.getData(), reluPrime(a1_d.getData(), a1_d.getSize()), dz1_d.getData(), 256, 128); cudaFree(temp); temp = matrixTranspose_secondv(b_x_d.getData(), 256, 784); matrixMultiplication(temp, dz1_d.getData(), dw1_d.getData(), 784, 128, 256); cudaFree(temp); temp = matrixValueProduct(dw3_d.getData(), dw3_d.getSize(), lr); matrixMinus(W3_d.getData(), temp, W3_d.getData(), 64, 10); cudaFree(temp); temp = matrixValueProduct(dw2_d.getData(), dw2_d.getSize(), lr); matrixMinus(W2_d.getData(), temp, W2_d.getData(), 128, 64); cudaFree(temp); temp = matrixValueProduct(dw1_d.getData(), dw1_d.getSize(), lr); matrixMinus(W1_d.getData(), temp, W1_d.getData(), 784, 128); cudaFree(temp); cout << "--------------------------------------------testing batch " << i + 1 << "--------------------------------------------------" << "\n"; yhat_d.get(yhat); b_y_d.get(b_y); cout << compute_accuracy(yhat, b_y, 256, 10) << endl;; } }
9f921258c7ed46fbeacc6bc199c3a1ac07171505.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float* var_8,float* var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) { comp = var_2 - sinhf((-1.0704E-35f / (var_3 - var_4))); float tmp_1 = (var_5 - var_6); comp = tmp_1 - -1.0138E-43f / (var_7 - -1.4411E36f * -1.5918E36f * -1.1523E-18f); for (int i=0; i < var_1; ++i) { var_8[i] = -1.2572E12f; float tmp_2 = var_10 - ceilf(-1.1408E15f); var_9[i] = var_11 + (+1.2625E-42f - -1.5289E35f); comp += var_9[i] / tmp_2 - var_8[i] / (var_12 - var_13 * (var_14 + +1.7477E-41f / atan2f((var_15 + atanf((+1.7147E0f + +1.7054E27f - (+0.0f - (var_16 + var_17 - var_18))))), -1.6021E35f * (var_19 - -1.3869E-25f - expf((var_20 - (+0.0f - var_21 - var_22))))))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float* tmp_9 = initPointer( atof(argv[9]) ); float* tmp_10 = initPointer( atof(argv[10]) ); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); hipDeviceSynchronize(); return 0; }
9f921258c7ed46fbeacc6bc199c3a1ac07171505.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float* var_8,float* var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) { comp = var_2 - sinhf((-1.0704E-35f / (var_3 - var_4))); float tmp_1 = (var_5 - var_6); comp = tmp_1 - -1.0138E-43f / (var_7 - -1.4411E36f * -1.5918E36f * -1.1523E-18f); for (int i=0; i < var_1; ++i) { var_8[i] = -1.2572E12f; float tmp_2 = var_10 - ceilf(-1.1408E15f); var_9[i] = var_11 + (+1.2625E-42f - -1.5289E35f); comp += var_9[i] / tmp_2 - var_8[i] / (var_12 - var_13 * (var_14 + +1.7477E-41f / atan2f((var_15 + atanf((+1.7147E0f + +1.7054E27f - (+0.0f - (var_16 + var_17 - var_18))))), -1.6021E35f * (var_19 - -1.3869E-25f - expf((var_20 - (+0.0f - var_21 - var_22))))))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float* tmp_9 = initPointer( atof(argv[9]) ); float* tmp_10 = initPointer( atof(argv[10]) ); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); cudaDeviceSynchronize(); return 0; }
42977dff8a0d8ae25b286885ad4ec444565508fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<fstream> #include<time.h> using namespace std; typedef struct{ int *x; int *y; int nr_vertice; int mbr[4]; int *boxes; }polygon; __device__ int PtInPolygon(int x,int y, int *poly_x, int *poly_y, int nCount){ int nCross=0,i; int x1,x2,y1,y2; double ix; for(i=0;i<nCount-1;i++){ x1=poly_x[i]; y1=poly_y[i]; x2=poly_x[i+1]; y2=poly_y[i+1]; if(y1==y2)continue; if(y<min(y1,y2))continue; if(y>=max(y1,y2))continue; ix=(double)(y-y1)*(double)(x2-x1)/(double)(y2-y1)+x1; if(ix>x)nCross++; } return(nCross%2==1); } __global__ void kernel(int nr_v1, int *poly1_x, int *poly1_y, int nr_v2, int *poly2_x, int *poly2_y, int left, int top, int *result){ int tid = threadIdx.x+blockIdx.x*blockDim.x+blockIdx.y*gridDim.x*blockDim.x; int x = blockIdx.x + left,y = blockIdx.y + top; int *poly_x, *poly_y, nr_v; poly_x = (tid%2 == 0) ? poly1_x:poly2_x; poly_y = (tid%2 == 0) ? poly1_y:poly2_y; nr_v = (tid%2 == 0) ? nr_v1:nr_v2; if(PtInPolygon(x, y, poly_x, poly_y, nr_v) == 1) result[tid] = 1; else result[tid] = 0; } void parsePoly(char *line,polygon *poly){ int i, offset = 0; sscanf(line, "%d, %d %d %d %d", &poly->nr_vertice, &poly->mbr[0], &poly->mbr[1], &poly->mbr[2], &poly->mbr[3]); //printf("%d, %d %d %d %d\n", poly->nr_vertice, poly->mbr[0], poly->mbr[1], poly->mbr[2], poly->mbr[3]); while(line[offset++] != ','); while(line[offset++] != ','); poly->x = (int *)malloc(poly->nr_vertice*sizeof(int)); poly->y = (int *)malloc(poly->nr_vertice*sizeof(int)); for(i=0;i<poly->nr_vertice;i++){ sscanf(line+offset, "%d %d", &poly->x[i], &poly->y[i]); while(line[offset++] != ','); } } int filter(polygon *poly1, polygon *poly2){ /* Check whether the mbr of poly1 contains in poly2 */ if(poly2->mbr[0]<=poly1->mbr[0] && poly2->mbr[1]>=poly1->mbr[1] && poly2->mbr[2]<=poly1->mbr[2] && poly2->mbr[3]>=poly1->mbr[3]) return 0; else return 1; } int main() { static const int read_bufsize=65536; char polygon1[read_bufsize], polygon2[read_bufsize]; const char *filename = "polygon"; fstream polyfile; polyfile.open(filename,fstream::in | fstream::binary); polyfile.getline(polygon1,read_bufsize); polyfile.getline(polygon2,read_bufsize); polygon *poly1,*poly2; poly1 = (polygon *)malloc(sizeof(polygon)); poly2 = (polygon *)malloc(sizeof(polygon)); parsePoly(polygon1, poly1); parsePoly(polygon2, poly2); if(filter(poly1,poly2)){ cout<<"NO!"<<endl; return 1; } int *dev_poly1_x, *dev_poly1_y, *dev_poly2_x, *dev_poly2_y; int *host_result, *dev_result; int boxsize = (poly1->mbr[1]-poly1->mbr[0]+1)*(poly1->mbr[3]-poly1->mbr[2]+1); hipMalloc((void **)&dev_poly1_x, poly1->nr_vertice*sizeof(int)); hipMalloc((void **)&dev_poly1_y, poly1->nr_vertice*sizeof(int)); hipMalloc((void **)&dev_poly2_x, poly2->nr_vertice*sizeof(int)); hipMalloc((void **)&dev_poly2_y, poly2->nr_vertice*sizeof(int)); hipMalloc((void **)&dev_result, 2*boxsize*sizeof(int)); hipMemcpy(dev_poly1_x, poly1->x, poly1->nr_vertice*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_poly1_y, poly1->y, poly1->nr_vertice*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_poly2_x, poly2->x, poly2->nr_vertice*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_poly2_y, poly2->y, poly2->nr_vertice*sizeof(int), hipMemcpyHostToDevice); clock_t start, end; start = clock(); dim3 grids(poly1->mbr[1]-poly1->mbr[0],poly1->mbr[3]-poly1->mbr[2]); hipLaunchKernelGGL(( kernel), dim3(grids), dim3(2), 0, 0, poly1->nr_vertice, dev_poly1_x, dev_poly1_y, poly2->nr_vertice, dev_poly2_x, dev_poly2_y, poly1->mbr[0], poly1->mbr[2], dev_result); host_result = (int *)malloc(2*boxsize*sizeof(int)); hipMemcpy(host_result, dev_result, 2*boxsize*sizeof(int), hipMemcpyDeviceToHost); for(int i=0;i<boxsize;i++){ // cout<<i % (poly1->mbr[1]-poly1->mbr[0])+poly1->mbr[0]<<" "<<i/(poly1->mbr[1]-poly1->mbr[0])+poly1->mbr[2]<<endl; if(host_result[2*i] == 1 && host_result[2*i+1] == 0){ end = clock(); cout<<"NO! Time used: "<<end-start<<endl; return 1; } } end = clock(); cout<<"YES! Time used: "<<end-start<<endl; return 0; }
42977dff8a0d8ae25b286885ad4ec444565508fe.cu
#include<iostream> #include<fstream> #include<time.h> using namespace std; typedef struct{ int *x; int *y; int nr_vertice; int mbr[4]; int *boxes; }polygon; __device__ int PtInPolygon(int x,int y, int *poly_x, int *poly_y, int nCount){ int nCross=0,i; int x1,x2,y1,y2; double ix; for(i=0;i<nCount-1;i++){ x1=poly_x[i]; y1=poly_y[i]; x2=poly_x[i+1]; y2=poly_y[i+1]; if(y1==y2)continue; if(y<min(y1,y2))continue; if(y>=max(y1,y2))continue; ix=(double)(y-y1)*(double)(x2-x1)/(double)(y2-y1)+x1; if(ix>x)nCross++; } return(nCross%2==1); } __global__ void kernel(int nr_v1, int *poly1_x, int *poly1_y, int nr_v2, int *poly2_x, int *poly2_y, int left, int top, int *result){ int tid = threadIdx.x+blockIdx.x*blockDim.x+blockIdx.y*gridDim.x*blockDim.x; int x = blockIdx.x + left,y = blockIdx.y + top; int *poly_x, *poly_y, nr_v; poly_x = (tid%2 == 0) ? poly1_x:poly2_x; poly_y = (tid%2 == 0) ? poly1_y:poly2_y; nr_v = (tid%2 == 0) ? nr_v1:nr_v2; if(PtInPolygon(x, y, poly_x, poly_y, nr_v) == 1) result[tid] = 1; else result[tid] = 0; } void parsePoly(char *line,polygon *poly){ int i, offset = 0; sscanf(line, "%d, %d %d %d %d", &poly->nr_vertice, &poly->mbr[0], &poly->mbr[1], &poly->mbr[2], &poly->mbr[3]); //printf("%d, %d %d %d %d\n", poly->nr_vertice, poly->mbr[0], poly->mbr[1], poly->mbr[2], poly->mbr[3]); while(line[offset++] != ','); while(line[offset++] != ','); poly->x = (int *)malloc(poly->nr_vertice*sizeof(int)); poly->y = (int *)malloc(poly->nr_vertice*sizeof(int)); for(i=0;i<poly->nr_vertice;i++){ sscanf(line+offset, "%d %d", &poly->x[i], &poly->y[i]); while(line[offset++] != ','); } } int filter(polygon *poly1, polygon *poly2){ /* Check whether the mbr of poly1 contains in poly2 */ if(poly2->mbr[0]<=poly1->mbr[0] && poly2->mbr[1]>=poly1->mbr[1] && poly2->mbr[2]<=poly1->mbr[2] && poly2->mbr[3]>=poly1->mbr[3]) return 0; else return 1; } int main() { static const int read_bufsize=65536; char polygon1[read_bufsize], polygon2[read_bufsize]; const char *filename = "polygon"; fstream polyfile; polyfile.open(filename,fstream::in | fstream::binary); polyfile.getline(polygon1,read_bufsize); polyfile.getline(polygon2,read_bufsize); polygon *poly1,*poly2; poly1 = (polygon *)malloc(sizeof(polygon)); poly2 = (polygon *)malloc(sizeof(polygon)); parsePoly(polygon1, poly1); parsePoly(polygon2, poly2); if(filter(poly1,poly2)){ cout<<"NO!"<<endl; return 1; } int *dev_poly1_x, *dev_poly1_y, *dev_poly2_x, *dev_poly2_y; int *host_result, *dev_result; int boxsize = (poly1->mbr[1]-poly1->mbr[0]+1)*(poly1->mbr[3]-poly1->mbr[2]+1); cudaMalloc((void **)&dev_poly1_x, poly1->nr_vertice*sizeof(int)); cudaMalloc((void **)&dev_poly1_y, poly1->nr_vertice*sizeof(int)); cudaMalloc((void **)&dev_poly2_x, poly2->nr_vertice*sizeof(int)); cudaMalloc((void **)&dev_poly2_y, poly2->nr_vertice*sizeof(int)); cudaMalloc((void **)&dev_result, 2*boxsize*sizeof(int)); cudaMemcpy(dev_poly1_x, poly1->x, poly1->nr_vertice*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_poly1_y, poly1->y, poly1->nr_vertice*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_poly2_x, poly2->x, poly2->nr_vertice*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_poly2_y, poly2->y, poly2->nr_vertice*sizeof(int), cudaMemcpyHostToDevice); clock_t start, end; start = clock(); dim3 grids(poly1->mbr[1]-poly1->mbr[0],poly1->mbr[3]-poly1->mbr[2]); kernel<<<grids, 2>>>(poly1->nr_vertice, dev_poly1_x, dev_poly1_y, poly2->nr_vertice, dev_poly2_x, dev_poly2_y, poly1->mbr[0], poly1->mbr[2], dev_result); host_result = (int *)malloc(2*boxsize*sizeof(int)); cudaMemcpy(host_result, dev_result, 2*boxsize*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<boxsize;i++){ // cout<<i % (poly1->mbr[1]-poly1->mbr[0])+poly1->mbr[0]<<" "<<i/(poly1->mbr[1]-poly1->mbr[0])+poly1->mbr[2]<<endl; if(host_result[2*i] == 1 && host_result[2*i+1] == 0){ end = clock(); cout<<"NO! Time used: "<<end-start<<endl; return 1; } } end = clock(); cout<<"YES! Time used: "<<end-start<<endl; return 0; }
5421f895dbc2f438d25d33b5c4fe1ec4c8e12d8b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/iterator.cuh> // include iterator header #include <cudf/detail/utilities/transform_unary_functions.cuh> //for meanvar #include <bitset> #include <cstdint> #include <iostream> #include <numeric> #include <random> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/cudf_gmock.hpp> #include <gmock/gmock.h> #include <tests/utilities/type_lists.hpp> #include <tests/utilities/column_wrapper.hpp> #include <thrust/equal.h> #include <thrust/transform.h> #include <thrust/functional.h> // for reduction tests #include <hipcub/hipcub.hpp> #include <thrust/device_vector.h> // --------------------------------------------------------------------------- template <typename T> T random_int(T min, T max) { static unsigned seed = 13377331; static std::mt19937 engine{seed}; static std::uniform_int_distribution<T> uniform{min, max}; return uniform(engine); } bool random_bool() { static unsigned seed = 13377331; static std::mt19937 engine{seed}; static std::uniform_int_distribution<int> uniform{0, 1}; return static_cast<bool>(uniform(engine)); } template <typename T> std::ostream& operator<<(std::ostream& os, cudf::meanvar<T> const& rhs) { return os << "[" << rhs.value << ", " << rhs.value_squared << ", " << rhs.count << "] "; }; auto strings_to_string_views(std::vector<std::string>& input_strings) { auto all_valid = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); std::vector<char> chars; std::vector<int32_t> offsets; std::tie(chars, offsets) = cudf::test::detail::make_chars_and_offsets( input_strings.begin(), input_strings.end(), all_valid); thrust::device_vector<char> dev_chars(chars); char* c_start = thrust::raw_pointer_cast(dev_chars.data()); // calculate the expected value by CPU. (but contains device pointers) std::vector<cudf::string_view> replaced_array(input_strings.size()); std::transform(thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(replaced_array.size()), replaced_array.begin(), [c_start, offsets](auto i) { return cudf::string_view(c_start + offsets[i], offsets[i + 1] - offsets[i]); }); return std::make_tuple(std::move(dev_chars), replaced_array); } // --------------------------------------------------------------------------- template <typename T> struct IteratorTest : public cudf::test::BaseFixture { // iterator test case which uses cub template <typename InputIterator, typename T_output> void iterator_test_cub(T_output expected, InputIterator d_in, int num_items) { T_output init{0}; thrust::device_vector<T_output> dev_result(1, init); // Get temporary storage size size_t temp_storage_bytes = 0; hipcub::DeviceReduce::Reduce(nullptr, temp_storage_bytes, d_in, dev_result.begin(), num_items, thrust::minimum<T_output>{}, init); // Allocate temporary storage rmm::device_buffer d_temp_storage(temp_storage_bytes); // Run reduction hipcub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, dev_result.begin(), num_items, thrust::minimum<T_output>{}, init); evaluate(expected, dev_result, "cub test"); } // iterator test case which uses thrust template <typename InputIterator, typename T_output> void iterator_test_thrust(thrust::host_vector<T_output>& expected, InputIterator d_in, int num_items) { InputIterator d_in_last = d_in + num_items; EXPECT_EQ(thrust::distance(d_in, d_in_last), num_items); thrust::device_vector<T_output> dev_expected(expected); // Can't use this because time_point make_pair bug in libcudacxx // bool result = thrust::equal(thrust::device, d_in, d_in_last, dev_expected.begin()); bool result = thrust::transform_reduce(thrust::device, thrust::make_zip_iterator(thrust::make_tuple(d_in, dev_expected.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_in_last, dev_expected.end())), [] __device__(auto it) { return (thrust::get<0>(it)) == T_output(thrust::get<1>(it)); }, true, thrust::logical_and<bool>()); #ifndef NDEBUG thrust::device_vector<bool> vec(expected.size(), false); thrust::transform(thrust::device, thrust::make_zip_iterator(thrust::make_tuple(d_in, dev_expected.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_in_last, dev_expected.end())), vec.begin(), [] __device__(auto it) { return (thrust::get<0>(it)) == T_output(thrust::get<1>(it)); } ); thrust::copy(vec.begin(), vec.end(), std::ostream_iterator<bool>(std::cout, " ")); std::cout<<std::endl; #endif EXPECT_TRUE(result) << "thrust test"; } template <typename T_output> void evaluate(T_output expected, thrust::device_vector<T_output>& dev_result, const char* msg = nullptr) { thrust::host_vector<T_output> hos_result(dev_result); EXPECT_EQ(expected, hos_result[0]) << msg; std::cout << "Done: expected <" << msg << "> = " //<< hos_result[0] //TODO uncomment after time_point ostream operator<< << std::endl; } template <typename T_output> void values_equal_test(thrust::host_vector<T_output>& expected, const cudf::column_device_view& col) { if (col.nullable()) { auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(col, T_output{0}); iterator_test_thrust(expected, it_dev, col.size()); } else { auto it_dev = col.begin<T_output>(); iterator_test_thrust(expected, it_dev, col.size()); } } }; using TestingTypes = cudf::test::AllTypes; TYPED_TEST_CASE(IteratorTest, TestingTypes); // tests for non-null iterator (pointer of device array) TYPED_TEST(IteratorTest, non_null_iterator) { using T = TypeParam; auto host_array = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); thrust::device_vector<T> dev_array(host_array); // calculate the expected value by CPU. thrust::host_vector<T> replaced_array(host_array); // driven by iterator as a pointer of device array. // FIXME: compilation error for cudf::experimental::bool8 // auto it_dev = dev_array.begin(); // this->iterator_test_thrust(replaced_array, it_dev, dev_array.size()); // this->iterator_test_cub(expected_value, it_dev, dev_array.size()); // test column input cudf::test::fixed_width_column_wrapper<T> w_col(host_array.begin(), host_array.end()); this->values_equal_test(replaced_array, *cudf::column_device_view::create(w_col)); } // Tests for null input iterator (column with null bitmap) // Actually, we can use cub for reduction with nulls without creating custom // kernel or multiple steps. We may accelarate the reduction for a column using // cub TYPED_TEST(IteratorTest, null_iterator) { using T = TypeParam; T init = T{0}; // data and valid arrays auto host_values = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1}); // create a column with bool vector cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<T> replaced_array(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x : init; }); T expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // TODO uncomment after time_point ostream operator<< // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, T{0}); this->iterator_test_cub(expected_value, it_dev, d_col->size()); this->values_equal_test(replaced_array, *d_col); } // Tests up cast reduction with null iterator. // The up cast iterator will be created by transform_iterator and // cudf::experimental::detail::make_null_replacement_iterator(col, T{0}) TYPED_TEST(IteratorTest, null_iterator_upcast) { const int column_size{1000}; using T = int8_t; using T_upcast = int64_t; T init{0}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int<T>(-128, 127)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<T> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x : init; }); T_upcast expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, T{0}); auto it_dev_upcast = thrust::make_transform_iterator(it_dev, thrust::identity<T_upcast>()); this->iterator_test_thrust(replaced_array, it_dev_upcast, d_col->size()); this->iterator_test_cub(expected_value, it_dev, d_col->size()); } // Tests for square input iterator using helper strcut // `cudf::transformer_squared<T, T_upcast>` The up cast iterator will be created // by make_transform_iterator( // cudf::experimental::detail::make_null_replacement_iterator(col, T{0}), // cudf::detail::transformer_squared<T_upcast>) TYPED_TEST(IteratorTest, null_iterator_square) { const int column_size{1000}; using T = int8_t; using T_upcast = int64_t; T init{0}; cudf::transformer_squared<T_upcast> transformer{}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int(-128, 128)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<T_upcast> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x * x : init; }); T_upcast expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, T{0}); auto it_dev_upcast = thrust::make_transform_iterator(it_dev, thrust::identity<T_upcast>()); auto it_dev_squared = thrust::make_transform_iterator(it_dev_upcast, transformer); this->iterator_test_thrust(replaced_array, it_dev_squared, d_col->size()); this->iterator_test_cub(expected_value, it_dev_squared, d_col->size()); } TYPED_TEST(IteratorTest, large_size_reduction) { using T = TypeParam; const int column_size{1000000}; const T init{0}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int(-128, 128)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<TypeParam> w_col( host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate by cudf::reduce thrust::host_vector<T> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x : init; }); T expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, init); this->iterator_test_thrust(replaced_array, it_dev, d_col->size()); this->iterator_test_cub(expected_value, it_dev, d_col->size()); } // Transformers and Operators for pair_iterator test template<typename ElementType> struct transformer_pair_meanvar { using ResultType = thrust::pair<cudf::meanvar<ElementType>, bool>; CUDA_HOST_DEVICE_CALLABLE ResultType operator()(thrust::pair<ElementType, bool> const& pair) { ElementType v = pair.first; return {{v, static_cast<ElementType>(v*v), (pair.second)? 1 : 0 }, pair.second}; }; }; struct sum_if_not_null { template <typename T> CUDA_HOST_DEVICE_CALLABLE thrust::pair<T, bool> operator()( const thrust::pair<T, bool>& lhs, const thrust::pair<T, bool>& rhs) { if (lhs.second & rhs.second) return {lhs.first+rhs.first, true}; else if (lhs.second) return {lhs}; else return {rhs}; } }; template <typename T> struct PairIteratorTest : public cudf::test::BaseFixture {}; TYPED_TEST_CASE(PairIteratorTest, cudf::test::NumericTypes); // TODO: enable this test also at __CUDACC_DEBUG__ // This test causes fatal compilation error only at device debug mode. // Workaround: exclude this test only at device debug mode. #if !defined(__CUDACC_DEBUG__) // This test computes `count`, `sum`, `sum_of_squares` at a single reduction call. // It would be useful for `var`, `std` operation TYPED_TEST(PairIteratorTest, mean_var_output) { using T = TypeParam; using T_output = cudf::meanvar<T>; transformer_pair_meanvar<T> transformer{}; const int column_size{5000}; const T init{0}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int(-128, 128)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<TypeParam> w_col( host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate expected values by CPU T_output expected_value; expected_value.count = d_col->size() - d_col->null_count(); std::vector<T> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? static_cast<T>(x) : init; }); expected_value.count = d_col->size() - d_col->null_count(); expected_value.value = std::accumulate(replaced_array.begin(), replaced_array.end(), T{0}); expected_value.value_squared = std::accumulate(replaced_array.begin(), replaced_array.end(), T{0}, [](T acc, T i) { return acc + i * i; }); std::cout << "expected <mixed_output> = " << expected_value << std::endl; // GPU test auto it_dev = d_col->pair_begin<T, true>(); auto it_dev_squared = thrust::make_transform_iterator(it_dev, transformer); auto result = thrust::reduce( it_dev_squared, it_dev_squared+ d_col->size(), thrust::make_pair(T_output{}, true), sum_if_not_null{} ); EXPECT_EQ(expected_value, result.first) << "pair iterator reduction sum"; } #endif TYPED_TEST(IteratorTest, error_handling) { using T = TypeParam; auto host_array = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1}); cudf::test::fixed_width_column_wrapper<T> w_col_no_null(host_array.begin(), host_array.end()); cudf::test::fixed_width_column_wrapper<T> w_col_null(host_array.begin(), host_array.end(), host_bools.begin()); auto d_col_no_null = cudf::column_device_view::create(w_col_no_null); auto d_col_null = cudf::column_device_view::create(w_col_null); // expects error: data type mismatch if (!(std::is_same<T, double>::value)) { CUDF_EXPECT_THROW_MESSAGE((d_col_null->begin<double>()), "the data type mismatch"); } // expects error: data type mismatch if (!(std::is_same<T, float>::value)) { CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_null_replacement_iterator(*d_col_null, float{0})), "the data type mismatch"); } CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_null_replacement_iterator(*d_col_no_null, T{0})), "Unexpected non-nullable column."); CUDF_EXPECT_THROW_MESSAGE((d_col_null->begin<T>()), "Unexpected column with nulls."); CUDF_EXPECT_THROW_MESSAGE((d_col_no_null->pair_begin<T, true>()), "Unexpected non-nullable column."); CUDF_EXPECT_NO_THROW((d_col_null->pair_begin<T, false>())); CUDF_EXPECT_NO_THROW((d_col_null->pair_begin<T, true>())); //scalar iterator using ScalarType = cudf::experimental::scalar_type_t<T>; std::unique_ptr<cudf::scalar> s(new ScalarType{T{1}, false}); CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_scalar_iterator<T>(*s)), "the scalar value must be valid"); CUDF_EXPECT_NO_THROW((cudf::experimental::detail::make_pair_iterator<T>(*s))); // expects error: data type mismatch if (!(std::is_same<T, double>::value)) { CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_scalar_iterator<double>(*s)), "the data type mismatch"); CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_pair_iterator<double>(*s)), "the data type mismatch"); } } struct StringIteratorTest : public IteratorTest<cudf::string_view> { }; TEST_F(StringIteratorTest, string_view_null_iterator ) { using T = cudf::string_view; // T init = T{"", 0}; std::string zero("zero"); // the char data has to be in GPU thrust::device_vector<char> initmsg(zero.begin(), zero.end()); T init = T{initmsg.data().get(), int(initmsg.size())}; // data and valid arrays std::vector<std::string> host_values({"one", "two", "three", "four", "five", "six", "eight", "nine"}); std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1}); // replace nulls in CPU std::vector<std::string> replaced_strings(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_strings.begin(), [zero](auto s, auto b) { return b ? s : zero; }); thrust::device_vector<char> dev_chars; thrust::host_vector<T> replaced_array(host_values.size()); std::tie(dev_chars, replaced_array) = strings_to_string_views(replaced_strings); // create a column with bool vector cudf::test::strings_column_wrapper w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, init); this->iterator_test_thrust(replaced_array, it_dev, host_values.size()); // this->values_equal_test(replaced_array, *d_col); //string_view{0} is invalid } TEST_F(StringIteratorTest, string_view_no_null_iterator ) { using T = cudf::string_view; // T init = T{"", 0}; std::string zero("zero"); // the char data has to be in GPU thrust::device_vector<char> initmsg(zero.begin(), zero.end()); T init = T{initmsg.data().get(), int(initmsg.size())}; // data array std::vector<std::string> host_values({"one", "two", "three", "four", "five", "six", "eight", "nine"}); thrust::device_vector<char> dev_chars; thrust::host_vector<T> all_array(host_values.size()); std::tie(dev_chars, all_array) = strings_to_string_views(host_values); // create a column with bool vector cudf::test::strings_column_wrapper w_col(host_values.begin(), host_values.end()); auto d_col = cudf::column_device_view::create(w_col); // GPU test auto it_dev = d_col->begin<T>(); this->iterator_test_thrust(all_array, it_dev, host_values.size()); } TYPED_TEST(IteratorTest, nonull_pair_iterator) { using T = TypeParam; // data and valid arrays auto host_values_std = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); thrust::host_vector<T> host_values(host_values_std); // create a column cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > replaced_array(host_values.size()); std::transform(host_values.begin(), host_values.end(), replaced_array.begin(), [](auto s) { return thrust::make_pair(s, true); }); // GPU test auto it_dev = d_col->pair_begin<T, false>(); this->iterator_test_thrust(replaced_array, it_dev, host_values.size()); } TYPED_TEST(IteratorTest, null_pair_iterator) { using T = TypeParam; // data and valid arrays auto host_values = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); thrust::host_vector<bool> host_bools(std::vector<bool>({1, 1, 0, 1, 1, 1, 0, 1, 1})); // create a column with bool vector cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > value_and_validity(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_and_validity.begin(), [](auto s, auto b) { return thrust::pair<T, bool>{s, b}; }); thrust::host_vector<thrust::pair<T,bool> > value_all_valid(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_all_valid.begin(), [](auto s, auto b) { return thrust::pair<T, bool>{s, true}; }); // GPU test auto it_dev = d_col->pair_begin<T, true>(); this->iterator_test_thrust(value_and_validity, it_dev, host_values.size()); auto it_hasnonull_dev = d_col->pair_begin<T, false>(); this->iterator_test_thrust(value_all_valid, it_hasnonull_dev, host_values.size()); auto itb_dev = cudf::experimental::detail::make_validity_iterator(*d_col); this->iterator_test_thrust(host_bools, itb_dev, host_values.size()); } TYPED_TEST(IteratorTest, scalar_iterator) { using T = TypeParam; T init = static_cast<T>(random_int(-128, 128)); // data and valid arrays thrust::host_vector<T> host_values(100, init); std::vector<bool> host_bools(100, true); // create a scalar using ScalarType = cudf::experimental::scalar_type_t<T>; std::unique_ptr<cudf::scalar> s(new ScalarType{init, true}); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > value_and_validity(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_and_validity.begin(), [](auto v, auto b) { return thrust::pair<T, bool>{v, b}; }); // GPU test auto it_dev = cudf::experimental::detail::make_scalar_iterator<T>(*s); this->iterator_test_thrust(host_values, it_dev, host_values.size()); auto it_pair_dev = cudf::experimental::detail::make_pair_iterator<T>(*s); this->iterator_test_thrust(value_and_validity, it_pair_dev, host_values.size()); } TYPED_TEST(IteratorTest, null_scalar_iterator) { using T = TypeParam; T init = static_cast<T>(random_int(-128, 128)); // data and valid arrays std::vector<T> host_values(100, init); std::vector<bool> host_bools(100, true); // create a scalar using ScalarType = cudf::experimental::scalar_type_t<T>; std::unique_ptr<cudf::scalar> s(new ScalarType{init, true}); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > value_and_validity(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_and_validity.begin(), [](auto v, auto b) { return thrust::pair<T, bool>{v, b}; }); // GPU test auto it_pair_dev = cudf::experimental::detail::make_pair_iterator<T>(*s); this->iterator_test_thrust(value_and_validity, it_pair_dev, host_values.size()); } CUDF_TEST_PROGRAM_MAIN()
5421f895dbc2f438d25d33b5c4fe1ec4c8e12d8b.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/iterator.cuh> // include iterator header #include <cudf/detail/utilities/transform_unary_functions.cuh> //for meanvar #include <bitset> #include <cstdint> #include <iostream> #include <numeric> #include <random> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/cudf_gmock.hpp> #include <gmock/gmock.h> #include <tests/utilities/type_lists.hpp> #include <tests/utilities/column_wrapper.hpp> #include <thrust/equal.h> #include <thrust/transform.h> #include <thrust/functional.h> // for reduction tests #include <cub/device/device_reduce.cuh> #include <thrust/device_vector.h> // --------------------------------------------------------------------------- template <typename T> T random_int(T min, T max) { static unsigned seed = 13377331; static std::mt19937 engine{seed}; static std::uniform_int_distribution<T> uniform{min, max}; return uniform(engine); } bool random_bool() { static unsigned seed = 13377331; static std::mt19937 engine{seed}; static std::uniform_int_distribution<int> uniform{0, 1}; return static_cast<bool>(uniform(engine)); } template <typename T> std::ostream& operator<<(std::ostream& os, cudf::meanvar<T> const& rhs) { return os << "[" << rhs.value << ", " << rhs.value_squared << ", " << rhs.count << "] "; }; auto strings_to_string_views(std::vector<std::string>& input_strings) { auto all_valid = cudf::test::make_counting_transform_iterator(0, [](auto i) { return true; }); std::vector<char> chars; std::vector<int32_t> offsets; std::tie(chars, offsets) = cudf::test::detail::make_chars_and_offsets( input_strings.begin(), input_strings.end(), all_valid); thrust::device_vector<char> dev_chars(chars); char* c_start = thrust::raw_pointer_cast(dev_chars.data()); // calculate the expected value by CPU. (but contains device pointers) std::vector<cudf::string_view> replaced_array(input_strings.size()); std::transform(thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(replaced_array.size()), replaced_array.begin(), [c_start, offsets](auto i) { return cudf::string_view(c_start + offsets[i], offsets[i + 1] - offsets[i]); }); return std::make_tuple(std::move(dev_chars), replaced_array); } // --------------------------------------------------------------------------- template <typename T> struct IteratorTest : public cudf::test::BaseFixture { // iterator test case which uses cub template <typename InputIterator, typename T_output> void iterator_test_cub(T_output expected, InputIterator d_in, int num_items) { T_output init{0}; thrust::device_vector<T_output> dev_result(1, init); // Get temporary storage size size_t temp_storage_bytes = 0; cub::DeviceReduce::Reduce(nullptr, temp_storage_bytes, d_in, dev_result.begin(), num_items, thrust::minimum<T_output>{}, init); // Allocate temporary storage rmm::device_buffer d_temp_storage(temp_storage_bytes); // Run reduction cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, dev_result.begin(), num_items, thrust::minimum<T_output>{}, init); evaluate(expected, dev_result, "cub test"); } // iterator test case which uses thrust template <typename InputIterator, typename T_output> void iterator_test_thrust(thrust::host_vector<T_output>& expected, InputIterator d_in, int num_items) { InputIterator d_in_last = d_in + num_items; EXPECT_EQ(thrust::distance(d_in, d_in_last), num_items); thrust::device_vector<T_output> dev_expected(expected); // Can't use this because time_point make_pair bug in libcudacxx // bool result = thrust::equal(thrust::device, d_in, d_in_last, dev_expected.begin()); bool result = thrust::transform_reduce(thrust::device, thrust::make_zip_iterator(thrust::make_tuple(d_in, dev_expected.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_in_last, dev_expected.end())), [] __device__(auto it) { return (thrust::get<0>(it)) == T_output(thrust::get<1>(it)); }, true, thrust::logical_and<bool>()); #ifndef NDEBUG thrust::device_vector<bool> vec(expected.size(), false); thrust::transform(thrust::device, thrust::make_zip_iterator(thrust::make_tuple(d_in, dev_expected.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_in_last, dev_expected.end())), vec.begin(), [] __device__(auto it) { return (thrust::get<0>(it)) == T_output(thrust::get<1>(it)); } ); thrust::copy(vec.begin(), vec.end(), std::ostream_iterator<bool>(std::cout, " ")); std::cout<<std::endl; #endif EXPECT_TRUE(result) << "thrust test"; } template <typename T_output> void evaluate(T_output expected, thrust::device_vector<T_output>& dev_result, const char* msg = nullptr) { thrust::host_vector<T_output> hos_result(dev_result); EXPECT_EQ(expected, hos_result[0]) << msg; std::cout << "Done: expected <" << msg << "> = " //<< hos_result[0] //TODO uncomment after time_point ostream operator<< << std::endl; } template <typename T_output> void values_equal_test(thrust::host_vector<T_output>& expected, const cudf::column_device_view& col) { if (col.nullable()) { auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(col, T_output{0}); iterator_test_thrust(expected, it_dev, col.size()); } else { auto it_dev = col.begin<T_output>(); iterator_test_thrust(expected, it_dev, col.size()); } } }; using TestingTypes = cudf::test::AllTypes; TYPED_TEST_CASE(IteratorTest, TestingTypes); // tests for non-null iterator (pointer of device array) TYPED_TEST(IteratorTest, non_null_iterator) { using T = TypeParam; auto host_array = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); thrust::device_vector<T> dev_array(host_array); // calculate the expected value by CPU. thrust::host_vector<T> replaced_array(host_array); // driven by iterator as a pointer of device array. // FIXME: compilation error for cudf::experimental::bool8 // auto it_dev = dev_array.begin(); // this->iterator_test_thrust(replaced_array, it_dev, dev_array.size()); // this->iterator_test_cub(expected_value, it_dev, dev_array.size()); // test column input cudf::test::fixed_width_column_wrapper<T> w_col(host_array.begin(), host_array.end()); this->values_equal_test(replaced_array, *cudf::column_device_view::create(w_col)); } // Tests for null input iterator (column with null bitmap) // Actually, we can use cub for reduction with nulls without creating custom // kernel or multiple steps. We may accelarate the reduction for a column using // cub TYPED_TEST(IteratorTest, null_iterator) { using T = TypeParam; T init = T{0}; // data and valid arrays auto host_values = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1}); // create a column with bool vector cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<T> replaced_array(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x : init; }); T expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // TODO uncomment after time_point ostream operator<< // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, T{0}); this->iterator_test_cub(expected_value, it_dev, d_col->size()); this->values_equal_test(replaced_array, *d_col); } // Tests up cast reduction with null iterator. // The up cast iterator will be created by transform_iterator and // cudf::experimental::detail::make_null_replacement_iterator(col, T{0}) TYPED_TEST(IteratorTest, null_iterator_upcast) { const int column_size{1000}; using T = int8_t; using T_upcast = int64_t; T init{0}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int<T>(-128, 127)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<T> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x : init; }); T_upcast expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, T{0}); auto it_dev_upcast = thrust::make_transform_iterator(it_dev, thrust::identity<T_upcast>()); this->iterator_test_thrust(replaced_array, it_dev_upcast, d_col->size()); this->iterator_test_cub(expected_value, it_dev, d_col->size()); } // Tests for square input iterator using helper strcut // `cudf::transformer_squared<T, T_upcast>` The up cast iterator will be created // by make_transform_iterator( // cudf::experimental::detail::make_null_replacement_iterator(col, T{0}), // cudf::detail::transformer_squared<T_upcast>) TYPED_TEST(IteratorTest, null_iterator_square) { const int column_size{1000}; using T = int8_t; using T_upcast = int64_t; T init{0}; cudf::transformer_squared<T_upcast> transformer{}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int(-128, 128)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<T_upcast> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x * x : init; }); T_upcast expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, T{0}); auto it_dev_upcast = thrust::make_transform_iterator(it_dev, thrust::identity<T_upcast>()); auto it_dev_squared = thrust::make_transform_iterator(it_dev_upcast, transformer); this->iterator_test_thrust(replaced_array, it_dev_squared, d_col->size()); this->iterator_test_cub(expected_value, it_dev_squared, d_col->size()); } TYPED_TEST(IteratorTest, large_size_reduction) { using T = TypeParam; const int column_size{1000000}; const T init{0}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int(-128, 128)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<TypeParam> w_col( host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate by cudf::reduce thrust::host_vector<T> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? x : init; }); T expected_value = *std::min_element(replaced_array.begin(), replaced_array.end()); // std::cout << "expected <null_iterator> = " << expected_value << std::endl; // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, init); this->iterator_test_thrust(replaced_array, it_dev, d_col->size()); this->iterator_test_cub(expected_value, it_dev, d_col->size()); } // Transformers and Operators for pair_iterator test template<typename ElementType> struct transformer_pair_meanvar { using ResultType = thrust::pair<cudf::meanvar<ElementType>, bool>; CUDA_HOST_DEVICE_CALLABLE ResultType operator()(thrust::pair<ElementType, bool> const& pair) { ElementType v = pair.first; return {{v, static_cast<ElementType>(v*v), (pair.second)? 1 : 0 }, pair.second}; }; }; struct sum_if_not_null { template <typename T> CUDA_HOST_DEVICE_CALLABLE thrust::pair<T, bool> operator()( const thrust::pair<T, bool>& lhs, const thrust::pair<T, bool>& rhs) { if (lhs.second & rhs.second) return {lhs.first+rhs.first, true}; else if (lhs.second) return {lhs}; else return {rhs}; } }; template <typename T> struct PairIteratorTest : public cudf::test::BaseFixture {}; TYPED_TEST_CASE(PairIteratorTest, cudf::test::NumericTypes); // TODO: enable this test also at __CUDACC_DEBUG__ // This test causes fatal compilation error only at device debug mode. // Workaround: exclude this test only at device debug mode. #if !defined(__CUDACC_DEBUG__) // This test computes `count`, `sum`, `sum_of_squares` at a single reduction call. // It would be useful for `var`, `std` operation TYPED_TEST(PairIteratorTest, mean_var_output) { using T = TypeParam; using T_output = cudf::meanvar<T>; transformer_pair_meanvar<T> transformer{}; const int column_size{5000}; const T init{0}; // data and valid arrays std::vector<T> host_values(column_size); std::generate(host_values.begin(), host_values.end(), []() { return static_cast<T>(random_int(-128, 128)); }); std::vector<bool> host_bools(column_size); std::generate(host_bools.begin(), host_bools.end(), []() { return static_cast<bool>(random_bool()); }); cudf::test::fixed_width_column_wrapper<TypeParam> w_col( host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate expected values by CPU T_output expected_value; expected_value.count = d_col->size() - d_col->null_count(); std::vector<T> replaced_array(d_col->size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_array.begin(), [&](T x, bool b) { return (b) ? static_cast<T>(x) : init; }); expected_value.count = d_col->size() - d_col->null_count(); expected_value.value = std::accumulate(replaced_array.begin(), replaced_array.end(), T{0}); expected_value.value_squared = std::accumulate(replaced_array.begin(), replaced_array.end(), T{0}, [](T acc, T i) { return acc + i * i; }); std::cout << "expected <mixed_output> = " << expected_value << std::endl; // GPU test auto it_dev = d_col->pair_begin<T, true>(); auto it_dev_squared = thrust::make_transform_iterator(it_dev, transformer); auto result = thrust::reduce( it_dev_squared, it_dev_squared+ d_col->size(), thrust::make_pair(T_output{}, true), sum_if_not_null{} ); EXPECT_EQ(expected_value, result.first) << "pair iterator reduction sum"; } #endif TYPED_TEST(IteratorTest, error_handling) { using T = TypeParam; auto host_array = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1}); cudf::test::fixed_width_column_wrapper<T> w_col_no_null(host_array.begin(), host_array.end()); cudf::test::fixed_width_column_wrapper<T> w_col_null(host_array.begin(), host_array.end(), host_bools.begin()); auto d_col_no_null = cudf::column_device_view::create(w_col_no_null); auto d_col_null = cudf::column_device_view::create(w_col_null); // expects error: data type mismatch if (!(std::is_same<T, double>::value)) { CUDF_EXPECT_THROW_MESSAGE((d_col_null->begin<double>()), "the data type mismatch"); } // expects error: data type mismatch if (!(std::is_same<T, float>::value)) { CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_null_replacement_iterator(*d_col_null, float{0})), "the data type mismatch"); } CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_null_replacement_iterator(*d_col_no_null, T{0})), "Unexpected non-nullable column."); CUDF_EXPECT_THROW_MESSAGE((d_col_null->begin<T>()), "Unexpected column with nulls."); CUDF_EXPECT_THROW_MESSAGE((d_col_no_null->pair_begin<T, true>()), "Unexpected non-nullable column."); CUDF_EXPECT_NO_THROW((d_col_null->pair_begin<T, false>())); CUDF_EXPECT_NO_THROW((d_col_null->pair_begin<T, true>())); //scalar iterator using ScalarType = cudf::experimental::scalar_type_t<T>; std::unique_ptr<cudf::scalar> s(new ScalarType{T{1}, false}); CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_scalar_iterator<T>(*s)), "the scalar value must be valid"); CUDF_EXPECT_NO_THROW((cudf::experimental::detail::make_pair_iterator<T>(*s))); // expects error: data type mismatch if (!(std::is_same<T, double>::value)) { CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_scalar_iterator<double>(*s)), "the data type mismatch"); CUDF_EXPECT_THROW_MESSAGE((cudf::experimental::detail::make_pair_iterator<double>(*s)), "the data type mismatch"); } } struct StringIteratorTest : public IteratorTest<cudf::string_view> { }; TEST_F(StringIteratorTest, string_view_null_iterator ) { using T = cudf::string_view; // T init = T{"", 0}; std::string zero("zero"); // the char data has to be in GPU thrust::device_vector<char> initmsg(zero.begin(), zero.end()); T init = T{initmsg.data().get(), int(initmsg.size())}; // data and valid arrays std::vector<std::string> host_values({"one", "two", "three", "four", "five", "six", "eight", "nine"}); std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1}); // replace nulls in CPU std::vector<std::string> replaced_strings(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), replaced_strings.begin(), [zero](auto s, auto b) { return b ? s : zero; }); thrust::device_vector<char> dev_chars; thrust::host_vector<T> replaced_array(host_values.size()); std::tie(dev_chars, replaced_array) = strings_to_string_views(replaced_strings); // create a column with bool vector cudf::test::strings_column_wrapper w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // GPU test auto it_dev = cudf::experimental::detail::make_null_replacement_iterator(*d_col, init); this->iterator_test_thrust(replaced_array, it_dev, host_values.size()); // this->values_equal_test(replaced_array, *d_col); //string_view{0} is invalid } TEST_F(StringIteratorTest, string_view_no_null_iterator ) { using T = cudf::string_view; // T init = T{"", 0}; std::string zero("zero"); // the char data has to be in GPU thrust::device_vector<char> initmsg(zero.begin(), zero.end()); T init = T{initmsg.data().get(), int(initmsg.size())}; // data array std::vector<std::string> host_values({"one", "two", "three", "four", "five", "six", "eight", "nine"}); thrust::device_vector<char> dev_chars; thrust::host_vector<T> all_array(host_values.size()); std::tie(dev_chars, all_array) = strings_to_string_views(host_values); // create a column with bool vector cudf::test::strings_column_wrapper w_col(host_values.begin(), host_values.end()); auto d_col = cudf::column_device_view::create(w_col); // GPU test auto it_dev = d_col->begin<T>(); this->iterator_test_thrust(all_array, it_dev, host_values.size()); } TYPED_TEST(IteratorTest, nonull_pair_iterator) { using T = TypeParam; // data and valid arrays auto host_values_std = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); thrust::host_vector<T> host_values(host_values_std); // create a column cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > replaced_array(host_values.size()); std::transform(host_values.begin(), host_values.end(), replaced_array.begin(), [](auto s) { return thrust::make_pair(s, true); }); // GPU test auto it_dev = d_col->pair_begin<T, false>(); this->iterator_test_thrust(replaced_array, it_dev, host_values.size()); } TYPED_TEST(IteratorTest, null_pair_iterator) { using T = TypeParam; // data and valid arrays auto host_values = cudf::test::make_type_param_vector<T>({0, 6, 0, -14, 13, 64, -13, -20, 45}); thrust::host_vector<bool> host_bools(std::vector<bool>({1, 1, 0, 1, 1, 1, 0, 1, 1})); // create a column with bool vector cudf::test::fixed_width_column_wrapper<T> w_col(host_values.begin(), host_values.end(), host_bools.begin()); auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > value_and_validity(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_and_validity.begin(), [](auto s, auto b) { return thrust::pair<T, bool>{s, b}; }); thrust::host_vector<thrust::pair<T,bool> > value_all_valid(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_all_valid.begin(), [](auto s, auto b) { return thrust::pair<T, bool>{s, true}; }); // GPU test auto it_dev = d_col->pair_begin<T, true>(); this->iterator_test_thrust(value_and_validity, it_dev, host_values.size()); auto it_hasnonull_dev = d_col->pair_begin<T, false>(); this->iterator_test_thrust(value_all_valid, it_hasnonull_dev, host_values.size()); auto itb_dev = cudf::experimental::detail::make_validity_iterator(*d_col); this->iterator_test_thrust(host_bools, itb_dev, host_values.size()); } TYPED_TEST(IteratorTest, scalar_iterator) { using T = TypeParam; T init = static_cast<T>(random_int(-128, 128)); // data and valid arrays thrust::host_vector<T> host_values(100, init); std::vector<bool> host_bools(100, true); // create a scalar using ScalarType = cudf::experimental::scalar_type_t<T>; std::unique_ptr<cudf::scalar> s(new ScalarType{init, true}); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > value_and_validity(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_and_validity.begin(), [](auto v, auto b) { return thrust::pair<T, bool>{v, b}; }); // GPU test auto it_dev = cudf::experimental::detail::make_scalar_iterator<T>(*s); this->iterator_test_thrust(host_values, it_dev, host_values.size()); auto it_pair_dev = cudf::experimental::detail::make_pair_iterator<T>(*s); this->iterator_test_thrust(value_and_validity, it_pair_dev, host_values.size()); } TYPED_TEST(IteratorTest, null_scalar_iterator) { using T = TypeParam; T init = static_cast<T>(random_int(-128, 128)); // data and valid arrays std::vector<T> host_values(100, init); std::vector<bool> host_bools(100, true); // create a scalar using ScalarType = cudf::experimental::scalar_type_t<T>; std::unique_ptr<cudf::scalar> s(new ScalarType{init, true}); // calculate the expected value by CPU. thrust::host_vector<thrust::pair<T,bool> > value_and_validity(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_and_validity.begin(), [](auto v, auto b) { return thrust::pair<T, bool>{v, b}; }); // GPU test auto it_pair_dev = cudf::experimental::detail::make_pair_iterator<T>(*s); this->iterator_test_thrust(value_and_validity, it_pair_dev, host_values.size()); } CUDF_TEST_PROGRAM_MAIN()
66257b8cdd5bdb046641b6f3aae490ecc7d25799.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simple_hip.cuh" #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/reduction/detail/reduction_functions.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <cuda/atomic> namespace cudf { namespace reduction { namespace detail { namespace { /** * @brief Compute reduction any() for dictionary columns. * * This compiles 10x faster than using thrust::reduce or the * cudf::simple::reduction::detail::reduce utility. * Both of these use the CUB DeviceReduce which aggressively inlines * the input iterator logic. */ struct any_fn { template <typename Iterator> struct any_true_fn { __device__ void operator()(size_type idx) { if (!*d_result && (iter[idx] != *d_result)) { cuda::atomic_ref<int32_t, cuda::thread_scope_device> ref{*d_result}; ref.fetch_or(1, cuda::std::memory_order_relaxed); } } Iterator iter; int32_t* d_result; }; template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr> std::unique_ptr<scalar> operator()(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const d_dict = cudf::column_device_view::create(input, stream); auto const iter = [&] { auto null_iter = op::max{}.template get_null_replacing_element_transformer<bool>(); auto pair_iter = cudf::dictionary::detail::make_dictionary_pair_iterator<T>(*d_dict, input.has_nulls()); return thrust::make_transform_iterator(pair_iter, null_iter); }(); auto d_result = rmm::device_scalar<int32_t>(0, stream, rmm::mr::get_current_device_resource()); thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), input.size(), any_true_fn<decltype(iter)>{iter, d_result.data()}); return std::make_unique<numeric_scalar<bool>>(d_result.value(stream), true, stream, mr); } template <typename T, std::enable_if_t<!std::is_arithmetic_v<T>>* = nullptr> std::unique_ptr<scalar> operator()(column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("Unexpected key type for dictionary in reduction any()"); } }; } // namespace std::unique_ptr<cudf::scalar> any(column_view const& col, cudf::data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8), "any() operation can be applied with output type `bool8` only"); if (cudf::is_dictionary(col.type())) { return cudf::type_dispatcher( dictionary_column_view(col).keys().type(), detail::any_fn{}, col, stream, mr); } using reducer = simple::detail::bool_result_element_dispatcher<op::max>; // dispatch for non-dictionary types return cudf::type_dispatcher(col.type(), reducer{}, col, init, stream, mr); } } // namespace detail } // namespace reduction } // namespace cudf
66257b8cdd5bdb046641b6f3aae490ecc7d25799.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simple.cuh" #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/reduction/detail/reduction_functions.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <cuda/atomic> namespace cudf { namespace reduction { namespace detail { namespace { /** * @brief Compute reduction any() for dictionary columns. * * This compiles 10x faster than using thrust::reduce or the * cudf::simple::reduction::detail::reduce utility. * Both of these use the CUB DeviceReduce which aggressively inlines * the input iterator logic. */ struct any_fn { template <typename Iterator> struct any_true_fn { __device__ void operator()(size_type idx) { if (!*d_result && (iter[idx] != *d_result)) { cuda::atomic_ref<int32_t, cuda::thread_scope_device> ref{*d_result}; ref.fetch_or(1, cuda::std::memory_order_relaxed); } } Iterator iter; int32_t* d_result; }; template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr> std::unique_ptr<scalar> operator()(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const d_dict = cudf::column_device_view::create(input, stream); auto const iter = [&] { auto null_iter = op::max{}.template get_null_replacing_element_transformer<bool>(); auto pair_iter = cudf::dictionary::detail::make_dictionary_pair_iterator<T>(*d_dict, input.has_nulls()); return thrust::make_transform_iterator(pair_iter, null_iter); }(); auto d_result = rmm::device_scalar<int32_t>(0, stream, rmm::mr::get_current_device_resource()); thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), input.size(), any_true_fn<decltype(iter)>{iter, d_result.data()}); return std::make_unique<numeric_scalar<bool>>(d_result.value(stream), true, stream, mr); } template <typename T, std::enable_if_t<!std::is_arithmetic_v<T>>* = nullptr> std::unique_ptr<scalar> operator()(column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("Unexpected key type for dictionary in reduction any()"); } }; } // namespace std::unique_ptr<cudf::scalar> any(column_view const& col, cudf::data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8), "any() operation can be applied with output type `bool8` only"); if (cudf::is_dictionary(col.type())) { return cudf::type_dispatcher( dictionary_column_view(col).keys().type(), detail::any_fn{}, col, stream, mr); } using reducer = simple::detail::bool_result_element_dispatcher<op::max>; // dispatch for non-dictionary types return cudf::type_dispatcher(col.type(), reducer{}, col, init, stream, mr); } } // namespace detail } // namespace reduction } // namespace cudf
6c23b011efa110ad15e100d392b2158464cb0b2b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "hello.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( hello), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( hello), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( hello), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6c23b011efa110ad15e100d392b2158464cb0b2b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "hello.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); hello<<<gridBlock,threadBlock>>>(a,b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { hello<<<gridBlock,threadBlock>>>(a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { hello<<<gridBlock,threadBlock>>>(a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
04bae9a655284352112bd8ecbe85e94f5087c413.hip
// !!! This is a file automatically generated by hipify!!! /* LATTICEEASY consists of the C++ files ``latticeeasy.cpp,'' ``initialize.cpp,'' ``evolution.cpp,'' ``output.cpp,'' ``latticeeasy.h,'' ``parameters.h,''. (The distribution also includes the file ffteasy.cpp but this file is distributed separately and therefore not considered part of the LATTICEEASY distribution in what follows.) LATTICEEASY is free. We are not in any way, shape, or form expecting to make money off of these routines. We wrote them for the sake of doing good science and we're putting them out on the Internet in case other people might find them useful. Feel free to download them, incorporate them into your code, modify them, translate the comment lines into Swahili, or whatever else you want. What we do want is the following: 1) Leave this notice (i.e. this entire paragraph beginning with ``LATTICEEASY consists of...'' and ending with our email addresses) in with the code wherever you put it. Even if you're just using it in-house in your department, business, or wherever else we would like these credits to remain with it. This is partly so that people can... 2) Give us feedback. Did LATTICEEASY work great for you and help your work? Did you hate it? Did you find a way to improve it, or translate it into another programming language? Whatever the case might be, we would love to hear about it. Please let us know at the email address below. 3) Finally, insofar as we have the legal right to do so we forbid you to make money off of this code without our consent. In other words if you want to publish these functions in a book or bundle them into commercial software or anything like that contact us about it first. We'll probably say yes, but we would like to reserve that right. For any comments or questions you can reach us at gfelder@email.smith.edu Igor.Tkachev@cern.ch Enjoy LATTICEEASY! Gary Felder and Igor Tkachev */ #include <chrono> // for benchmarking #include <string> #include <queue> #include "latticeeasy.cuh" letype* f[nflds]; letype* fd[nflds]; letype* h[nflds][6]; letype* hd[nflds][6]; cufft_type*** hdk = nullptr; fTT_type*** hdkTT = nullptr; cufft_type*** EMTk = nullptr; fTT_type*** EMTkTT = nullptr; letype* EMT[nflds][6]; letype* curr_gradientEnergy;//[nflds]; letype* curr_potEnergy;//[num_potential_terms]; letype dvdf_params[num_dvdf_params]; letype pot_params[num_pot_params]; letype* hmean[6]; double* hmean_acc[6]; letype* hdmean[6]; double* hdmean_acc[6]; // energy momentum tensor in momentum space symmTensor<field<cufft_type, true>>* EMT_mom = new symmTensor<field<cufft_type, true>>[gwnflds](); symmTensor<field<fTT_type, true>>* EMT_mom_TT = new symmTensor<field<fTT_type, true>>[gwnflds](); // tensor perturbation in momentum space symmTensor<field<cufft_type, true>>* dh_mom = new symmTensor<field<cufft_type, true>>[gwnflds](); symmTensor<field<fTT_type, true>>* dh_mom_TT = new symmTensor<field<fTT_type, true>>[gwnflds](); // binned things symmTensor<std::vector<letype>> bins = symmTensor<std::vector<letype>>(); double t, t0; // Current time and initial time (t0=0 unless the run is a continuation of a previous one) double a = 1.0, ad = 0.0, ad2 = 0.0, aterm = 0.0; // Scale factor and its derivatives (aterm is a combination of the others used in the equations of motion). Values are initialized to their defaults for the case of no expansion. double hubble_init= 0.0; // Initial value of the Hubble constant int run_number; // 0 for a first run, 1 for a continuation of a "0" run, etc.. Stored in the grid image (see checkpoint() function). int no_initialization = 0; // If this variable is set to 1 by the model file then the fields will not be initialized in the normal way. char mode_[10] = "w"; // Mode in which to open files, i.e. write ("w") or append ("a+"). Depends on the variable continue_run and on whether a previous grid image was found. letype rescaling = 1.0; // Rescaling for output. This is left as 1 unless the model file modifies it. char ext_[500] = "_0.dat"; // Extension for filenames - set once and used by all output functions int nfldsout; // Number of fields to output letype model_vars[num_model_vars]; // Model-specific variables int main() { printf("Precision is: %s.\n", typeid(letype) == typeid(float) ? "float" : "double"); cudaMemGC<letype> res(6); // allocate memory for all fields gpuErrchk(hipMallocManaged(&curr_gradientEnergy, nflds * sizeof(letype))); gpuErrchk(hipMallocManaged(&curr_potEnergy, num_potential_terms * sizeof(letype))); for(int fld = 0; fld < nflds; fld++) { gpuErrchk(hipMallocManaged(&f[fld], gridsize * sizeof(letype))); gpuErrchk(hipMallocManaged(&fd[fld], gridsize * sizeof(letype))); } if constexpr(sgw) { for(int gwfld = 0; gwfld < nflds; gwfld++) { gpuErrchk(hipMallocManaged(&hmean[gwfld], 6 * sizeof(letype))); gpuErrchk(hipMallocManaged(&hmean_acc[gwfld], 6 * sizeof(double))); gpuErrchk(hipMallocManaged(&hdmean[gwfld], 6 * sizeof(letype))); gpuErrchk(hipMallocManaged(&hdmean_acc[gwfld], 6 * sizeof(double))); for(int fld = 0; fld < 6; fld++) { gpuErrchk(hipMallocManaged(&h[gwfld][fld], gridsize * sizeof(letype))); gpuErrchk(hipMallocManaged(&hd[gwfld][fld], gridsize * sizeof(letype))); gpuErrchk(hipMallocManaged(&EMT[gwfld][fld], gridsize * sizeof(letype))); gpuErrchk(hipMemset(h[gwfld][fld], 0, gridsize*sizeof(letype))); gpuErrchk(hipMemset(hd[gwfld][fld], 0, gridsize*sizeof(letype))); gpuErrchk(hipMemset(EMT[gwfld][fld], 0, gridsize*sizeof(letype))); } } gpuErrchk(hipMallocManaged(&hdk, gwnflds * sizeof(cufft_type**))); gpuErrchk(hipMallocManaged(&hdkTT, gwnflds * sizeof(fTT_type**))); gpuErrchk(hipMallocManaged(&EMTk, gwnflds * sizeof(cufft_type**))); gpuErrchk(hipMallocManaged(&EMTkTT, gwnflds * sizeof(fTT_type**))); hipMemPrefetchAsync(hdk, gwnflds * sizeof(letype**), hipCpuDeviceId, NULL); hipMemPrefetchAsync(hdkTT, gwnflds * sizeof(fTT_type**), hipCpuDeviceId, NULL); for(int gwfld = 0; gwfld < gwnflds; gwfld++) { gpuErrchk(hipMallocManaged(&hdk[gwfld], 6 * sizeof(cufft_type*))); gpuErrchk(hipMallocManaged(&hdkTT[gwfld], 6 * sizeof(fTT_type*))); gpuErrchk(hipMallocManaged(&EMTk[gwfld], 6 * sizeof(cufft_type*))); gpuErrchk(hipMallocManaged(&EMTkTT[gwfld], 6 * sizeof(fTT_type*))); } for(int i = 0; i < nflds; i++) { for(int j = 0; j < 6; j++) { hmean_acc[i][j] = 0.0; hdmean_acc[i][j] = 0.0; } } } prefetchAsyncFields(f, fd, hipCpuDeviceId); // prefetchAsyncGWFields(h, hd, EMT, hipCpuDeviceId); gpuErrchk(hipDeviceSynchronize()); // copy pointer to field to constant memory for easier access in kernels copyFieldsToConstantMemory(); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // copy the values of the scale factor at which to calculate an output to a queue for easier handling std::queue<double> a_queue; for (const auto& v : a_saves) a_queue.push(v); int numsteps = 0, output_interval = 0; // Quantities for counting how often to calculate and output derived quantities FILE *output_= fopen("output.txt","w"); // Outputs time. Used to remotely monitor progress int update_time; // Controls when to output time to output file and screen omp_set_num_threads(2); if(seed<1) // The use of seed<1 turns off certain functions (random numbers, fourier transforms, gradients, and potential energy) and should only be used for debugging printf("Warning: The parameter seed has been set to %d, which will result in incorrect output. For correct output set seed to a positive integer.",seed); initialize(); // Set parameter values and initial conditions gpuErrchk(hipPeekAtLastError()); t = t0; output_interval = int(checkpoint_interval / dt); //prefetch fields before starting any kernels prefetchAsyncFields(f, fd, 0); //prefetchAsyncGWFields(h, hd, EMT, 0); // Take Initial Half Time Step if this is a new run if(run_number == 0) evolve_fields(0.5*dt); // precalculate the paramter that are necessary to evolve the fields prepareParamsForCalcLapl(dvdf_params); prepareParamsForPotEnergy(pot_params); update_time=time(NULL)+print_interval; // Set initial time for update while((t <= tf || tf == -1) && a <= af) // Main time evolution loop { gpuErrchk(hipPeekAtLastError()); evolve_derivs(dt); // evolve derivatives gpuErrchk(hipPeekAtLastError()); evolve_fields(dt); // evolve fields gpuErrchk(hipPeekAtLastError()); numsteps++; // check if it is time for output if(noutput_times != 0 && numsteps % output_interval == 0 && t < tf && a < af) { save(0); // Calculate and output grid-averaged quantities (means, variances, etc.) } // check if scale factor advance far enough for output if(a_queue.size() != 0) { if(a > a_queue.front()) // Save data at the specified values of the scale factor { save(1); a_queue.pop(); } } if(time(NULL) >= update_time) // Print an update whenever elapsed time exceeds print_interval { if(screen_updates){ // This option determines whether or not to update progress on the screen printf("t = %f\ta = %f\n", t, a); fflush(stdout); } fprintf(output_, "%f\n", t); // Output progress to a file for monitoring progress fflush(output_); // Make sure output file is always up to date update_time += print_interval; // Set time for next update } } // End of main loop gpuErrchk(hipDeviceSynchronize()); printf("Saving final data\n"); save(1); // Calculate and save quantities. Force infrequently calculated quantities to be calculated. output_parameters(); // Save run parameters and elapsed time fprintf(output_,"LATTICEEASY program finished\n"); printf("LATTICEEASY program finished\n"); // free all the memory that was allocated for cuda for(int fld = 0; fld < nflds; fld++) { gpuErrchk(hipFree(f[fld])); gpuErrchk(hipFree(fd[fld])); } if constexpr(sgw) { for(int gwfld = 0; gwfld < nflds; gwfld++) { for(int fld = 0; fld < 6; fld++) { gpuErrchk(hipFree(h[gwfld][fld])); gpuErrchk(hipFree(hd[gwfld][fld])); gpuErrchk(hipFree(EMT[gwfld][fld])); } } for(int gwfld = 0; gwfld < nflds; gwfld++) { gpuErrchk(hipFree(hdk[gwfld])); gpuErrchk(hipFree(hdkTT[gwfld])); } if constexpr(gwnflds > 1) gpuErrchk(hipFree(hdkTT[gwnflds - 1])); } gpuErrchk(hipFree(hdk)); gpuErrchk(hipFree(hdkTT)); //fclose(benchmarkFile); return(0); }
04bae9a655284352112bd8ecbe85e94f5087c413.cu
/* LATTICEEASY consists of the C++ files ``latticeeasy.cpp,'' ``initialize.cpp,'' ``evolution.cpp,'' ``output.cpp,'' ``latticeeasy.h,'' ``parameters.h,''. (The distribution also includes the file ffteasy.cpp but this file is distributed separately and therefore not considered part of the LATTICEEASY distribution in what follows.) LATTICEEASY is free. We are not in any way, shape, or form expecting to make money off of these routines. We wrote them for the sake of doing good science and we're putting them out on the Internet in case other people might find them useful. Feel free to download them, incorporate them into your code, modify them, translate the comment lines into Swahili, or whatever else you want. What we do want is the following: 1) Leave this notice (i.e. this entire paragraph beginning with ``LATTICEEASY consists of...'' and ending with our email addresses) in with the code wherever you put it. Even if you're just using it in-house in your department, business, or wherever else we would like these credits to remain with it. This is partly so that people can... 2) Give us feedback. Did LATTICEEASY work great for you and help your work? Did you hate it? Did you find a way to improve it, or translate it into another programming language? Whatever the case might be, we would love to hear about it. Please let us know at the email address below. 3) Finally, insofar as we have the legal right to do so we forbid you to make money off of this code without our consent. In other words if you want to publish these functions in a book or bundle them into commercial software or anything like that contact us about it first. We'll probably say yes, but we would like to reserve that right. For any comments or questions you can reach us at gfelder@email.smith.edu Igor.Tkachev@cern.ch Enjoy LATTICEEASY! Gary Felder and Igor Tkachev */ #include <chrono> // for benchmarking #include <string> #include <queue> #include "latticeeasy.cuh" letype* f[nflds]; letype* fd[nflds]; letype* h[nflds][6]; letype* hd[nflds][6]; cufft_type*** hdk = nullptr; fTT_type*** hdkTT = nullptr; cufft_type*** EMTk = nullptr; fTT_type*** EMTkTT = nullptr; letype* EMT[nflds][6]; letype* curr_gradientEnergy;//[nflds]; letype* curr_potEnergy;//[num_potential_terms]; letype dvdf_params[num_dvdf_params]; letype pot_params[num_pot_params]; letype* hmean[6]; double* hmean_acc[6]; letype* hdmean[6]; double* hdmean_acc[6]; // energy momentum tensor in momentum space symmTensor<field<cufft_type, true>>* EMT_mom = new symmTensor<field<cufft_type, true>>[gwnflds](); symmTensor<field<fTT_type, true>>* EMT_mom_TT = new symmTensor<field<fTT_type, true>>[gwnflds](); // tensor perturbation in momentum space symmTensor<field<cufft_type, true>>* dh_mom = new symmTensor<field<cufft_type, true>>[gwnflds](); symmTensor<field<fTT_type, true>>* dh_mom_TT = new symmTensor<field<fTT_type, true>>[gwnflds](); // binned things symmTensor<std::vector<letype>> bins = symmTensor<std::vector<letype>>(); double t, t0; // Current time and initial time (t0=0 unless the run is a continuation of a previous one) double a = 1.0, ad = 0.0, ad2 = 0.0, aterm = 0.0; // Scale factor and its derivatives (aterm is a combination of the others used in the equations of motion). Values are initialized to their defaults for the case of no expansion. double hubble_init= 0.0; // Initial value of the Hubble constant int run_number; // 0 for a first run, 1 for a continuation of a "0" run, etc.. Stored in the grid image (see checkpoint() function). int no_initialization = 0; // If this variable is set to 1 by the model file then the fields will not be initialized in the normal way. char mode_[10] = "w"; // Mode in which to open files, i.e. write ("w") or append ("a+"). Depends on the variable continue_run and on whether a previous grid image was found. letype rescaling = 1.0; // Rescaling for output. This is left as 1 unless the model file modifies it. char ext_[500] = "_0.dat"; // Extension for filenames - set once and used by all output functions int nfldsout; // Number of fields to output letype model_vars[num_model_vars]; // Model-specific variables int main() { printf("Precision is: %s.\n", typeid(letype) == typeid(float) ? "float" : "double"); cudaMemGC<letype> res(6); // allocate memory for all fields gpuErrchk(cudaMallocManaged(&curr_gradientEnergy, nflds * sizeof(letype))); gpuErrchk(cudaMallocManaged(&curr_potEnergy, num_potential_terms * sizeof(letype))); for(int fld = 0; fld < nflds; fld++) { gpuErrchk(cudaMallocManaged(&f[fld], gridsize * sizeof(letype))); gpuErrchk(cudaMallocManaged(&fd[fld], gridsize * sizeof(letype))); } if constexpr(sgw) { for(int gwfld = 0; gwfld < nflds; gwfld++) { gpuErrchk(cudaMallocManaged(&hmean[gwfld], 6 * sizeof(letype))); gpuErrchk(cudaMallocManaged(&hmean_acc[gwfld], 6 * sizeof(double))); gpuErrchk(cudaMallocManaged(&hdmean[gwfld], 6 * sizeof(letype))); gpuErrchk(cudaMallocManaged(&hdmean_acc[gwfld], 6 * sizeof(double))); for(int fld = 0; fld < 6; fld++) { gpuErrchk(cudaMallocManaged(&h[gwfld][fld], gridsize * sizeof(letype))); gpuErrchk(cudaMallocManaged(&hd[gwfld][fld], gridsize * sizeof(letype))); gpuErrchk(cudaMallocManaged(&EMT[gwfld][fld], gridsize * sizeof(letype))); gpuErrchk(cudaMemset(h[gwfld][fld], 0, gridsize*sizeof(letype))); gpuErrchk(cudaMemset(hd[gwfld][fld], 0, gridsize*sizeof(letype))); gpuErrchk(cudaMemset(EMT[gwfld][fld], 0, gridsize*sizeof(letype))); } } gpuErrchk(cudaMallocManaged(&hdk, gwnflds * sizeof(cufft_type**))); gpuErrchk(cudaMallocManaged(&hdkTT, gwnflds * sizeof(fTT_type**))); gpuErrchk(cudaMallocManaged(&EMTk, gwnflds * sizeof(cufft_type**))); gpuErrchk(cudaMallocManaged(&EMTkTT, gwnflds * sizeof(fTT_type**))); cudaMemPrefetchAsync(hdk, gwnflds * sizeof(letype**), cudaCpuDeviceId, NULL); cudaMemPrefetchAsync(hdkTT, gwnflds * sizeof(fTT_type**), cudaCpuDeviceId, NULL); for(int gwfld = 0; gwfld < gwnflds; gwfld++) { gpuErrchk(cudaMallocManaged(&hdk[gwfld], 6 * sizeof(cufft_type*))); gpuErrchk(cudaMallocManaged(&hdkTT[gwfld], 6 * sizeof(fTT_type*))); gpuErrchk(cudaMallocManaged(&EMTk[gwfld], 6 * sizeof(cufft_type*))); gpuErrchk(cudaMallocManaged(&EMTkTT[gwfld], 6 * sizeof(fTT_type*))); } for(int i = 0; i < nflds; i++) { for(int j = 0; j < 6; j++) { hmean_acc[i][j] = 0.0; hdmean_acc[i][j] = 0.0; } } } prefetchAsyncFields(f, fd, cudaCpuDeviceId); // prefetchAsyncGWFields(h, hd, EMT, cudaCpuDeviceId); gpuErrchk(cudaDeviceSynchronize()); // copy pointer to field to constant memory for easier access in kernels copyFieldsToConstantMemory(); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // copy the values of the scale factor at which to calculate an output to a queue for easier handling std::queue<double> a_queue; for (const auto& v : a_saves) a_queue.push(v); int numsteps = 0, output_interval = 0; // Quantities for counting how often to calculate and output derived quantities FILE *output_= fopen("output.txt","w"); // Outputs time. Used to remotely monitor progress int update_time; // Controls when to output time to output file and screen omp_set_num_threads(2); if(seed<1) // The use of seed<1 turns off certain functions (random numbers, fourier transforms, gradients, and potential energy) and should only be used for debugging printf("Warning: The parameter seed has been set to %d, which will result in incorrect output. For correct output set seed to a positive integer.",seed); initialize(); // Set parameter values and initial conditions gpuErrchk(cudaPeekAtLastError()); t = t0; output_interval = int(checkpoint_interval / dt); //prefetch fields before starting any kernels prefetchAsyncFields(f, fd, 0); //prefetchAsyncGWFields(h, hd, EMT, 0); // Take Initial Half Time Step if this is a new run if(run_number == 0) evolve_fields(0.5*dt); // precalculate the paramter that are necessary to evolve the fields prepareParamsForCalcLapl(dvdf_params); prepareParamsForPotEnergy(pot_params); update_time=time(NULL)+print_interval; // Set initial time for update while((t <= tf || tf == -1) && a <= af) // Main time evolution loop { gpuErrchk(cudaPeekAtLastError()); evolve_derivs(dt); // evolve derivatives gpuErrchk(cudaPeekAtLastError()); evolve_fields(dt); // evolve fields gpuErrchk(cudaPeekAtLastError()); numsteps++; // check if it is time for output if(noutput_times != 0 && numsteps % output_interval == 0 && t < tf && a < af) { save(0); // Calculate and output grid-averaged quantities (means, variances, etc.) } // check if scale factor advance far enough for output if(a_queue.size() != 0) { if(a > a_queue.front()) // Save data at the specified values of the scale factor { save(1); a_queue.pop(); } } if(time(NULL) >= update_time) // Print an update whenever elapsed time exceeds print_interval { if(screen_updates){ // This option determines whether or not to update progress on the screen printf("t = %f\ta = %f\n", t, a); fflush(stdout); } fprintf(output_, "%f\n", t); // Output progress to a file for monitoring progress fflush(output_); // Make sure output file is always up to date update_time += print_interval; // Set time for next update } } // End of main loop gpuErrchk(cudaDeviceSynchronize()); printf("Saving final data\n"); save(1); // Calculate and save quantities. Force infrequently calculated quantities to be calculated. output_parameters(); // Save run parameters and elapsed time fprintf(output_,"LATTICEEASY program finished\n"); printf("LATTICEEASY program finished\n"); // free all the memory that was allocated for cuda for(int fld = 0; fld < nflds; fld++) { gpuErrchk(cudaFree(f[fld])); gpuErrchk(cudaFree(fd[fld])); } if constexpr(sgw) { for(int gwfld = 0; gwfld < nflds; gwfld++) { for(int fld = 0; fld < 6; fld++) { gpuErrchk(cudaFree(h[gwfld][fld])); gpuErrchk(cudaFree(hd[gwfld][fld])); gpuErrchk(cudaFree(EMT[gwfld][fld])); } } for(int gwfld = 0; gwfld < nflds; gwfld++) { gpuErrchk(cudaFree(hdk[gwfld])); gpuErrchk(cudaFree(hdkTT[gwfld])); } if constexpr(gwnflds > 1) gpuErrchk(cudaFree(hdkTT[gwnflds - 1])); } gpuErrchk(cudaFree(hdk)); gpuErrchk(cudaFree(hdkTT)); //fclose(benchmarkFile); return(0); }
edb4c9ad3a6ebdad54704d92f9b85c56c982914c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "analyze_height.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Person *people = NULL; hipMalloc(&people, XSIZE*YSIZE); int *statResults = NULL; hipMalloc(&statResults, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( analyze_height), dim3(gridBlock),dim3(threadBlock), 0, 0, people,statResults); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( analyze_height), dim3(gridBlock),dim3(threadBlock), 0, 0, people,statResults); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( analyze_height), dim3(gridBlock),dim3(threadBlock), 0, 0, people,statResults); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
edb4c9ad3a6ebdad54704d92f9b85c56c982914c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "analyze_height.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Person *people = NULL; cudaMalloc(&people, XSIZE*YSIZE); int *statResults = NULL; cudaMalloc(&statResults, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); analyze_height<<<gridBlock,threadBlock>>>(people,statResults); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { analyze_height<<<gridBlock,threadBlock>>>(people,statResults); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { analyze_height<<<gridBlock,threadBlock>>>(people,statResults); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c116a9695e226a9793f243a18c834474d5c5f214.hip
// !!! This is a file automatically generated by hipify!!! #include "lock_hip.cuh" Lock::Lock() { int state = 0; HANDLE_ERROR(hipMalloc((void**)&mutex, sizeof(int))); HANDLE_ERROR(hipMemcpy(mutex, &state, sizeof(int), hipMemcpyHostToDevice)); } Lock::~Lock() { hipFree(mutex); } __device__ void Lock::lock() { while (atomicCAS(mutex, 0, 1) != 0); } __device__ void Lock::unlock() { atomicExch(mutex, 0); }
c116a9695e226a9793f243a18c834474d5c5f214.cu
#include "lock.cuh" Lock::Lock() { int state = 0; HANDLE_ERROR(cudaMalloc((void**)&mutex, sizeof(int))); HANDLE_ERROR(cudaMemcpy(mutex, &state, sizeof(int), cudaMemcpyHostToDevice)); } Lock::~Lock() { cudaFree(mutex); } __device__ void Lock::lock() { while (atomicCAS(mutex, 0, 1) != 0); } __device__ void Lock::unlock() { atomicExch(mutex, 0); }
444334974d2e17b12118d8b9e081a2ec56147a7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _SCAN_BEST_KERNEL_CU_ #define _SCAN_BEST_KERNEL_CU_ // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 32 banks on M2070 #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(int* s_data, int *g_blockSums, int* g_blockStride, int blockIndex, unsigned int stride) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; g_blockStride[blockIndex]=stride; } // zero the last element in the scan so it will propagate back to the front //------------------------------- Ali ---------------------------------- if (blockIndex==0) { s_data[index] = 0; } else { // while (g_blockSums[blockIndex-1]==0 && g_blockStride[blockIndex-1]==0) // s_data[index]=0; s_data[index] = g_blockSums[blockIndex-1]-g_blockStride[blockIndex-1]; } //------------------------------- Ali ---------------------------------- // s_data[index] = 0; } } __device__ unsigned int buildSum(int *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); ///------------------------- Ali ------------------------ s_data[bi] = max(s_data[ai],s_data[bi]+stride); // s_data[bi] += s_data[ai]; ///------------------------- Ali ------------------------ } stride *= 2; } return stride; } __device__ void scanRootToLeaves(int *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = s_data[ai]; s_data[ai] = s_data[bi]; //---------------------- Ali -------------------------- s_data[bi] = max(t,s_data[bi])-stride; //---------------------- Ali -------------------------- // s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(int *data, int blockIndex, int *blockSums, int* blockStride) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, blockStride, (blockIndex == 0) ? blockIdx.x : blockIndex, stride); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int* g_blockStride, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ int s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums, g_blockStride); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } #endif // #ifndef _SCAN_BEST_KERNEL_CU_
444334974d2e17b12118d8b9e081a2ec56147a7b.cu
#ifndef _SCAN_BEST_KERNEL_CU_ #define _SCAN_BEST_KERNEL_CU_ // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 32 banks on M2070 #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(int* s_data, int *g_blockSums, int* g_blockStride, int blockIndex, unsigned int stride) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; g_blockStride[blockIndex]=stride; } // zero the last element in the scan so it will propagate back to the front //------------------------------- Ali ---------------------------------- if (blockIndex==0) { s_data[index] = 0; } else { // while (g_blockSums[blockIndex-1]==0 && g_blockStride[blockIndex-1]==0) // s_data[index]=0; s_data[index] = g_blockSums[blockIndex-1]-g_blockStride[blockIndex-1]; } //------------------------------- Ali ---------------------------------- // s_data[index] = 0; } } __device__ unsigned int buildSum(int *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); ///------------------------- Ali ------------------------ s_data[bi] = max(s_data[ai],s_data[bi]+stride); // s_data[bi] += s_data[ai]; ///------------------------- Ali ------------------------ } stride *= 2; } return stride; } __device__ void scanRootToLeaves(int *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = s_data[ai]; s_data[ai] = s_data[bi]; //---------------------- Ali -------------------------- s_data[bi] = max(t,s_data[bi])-stride; //---------------------- Ali -------------------------- // s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(int *data, int blockIndex, int *blockSums, int* blockStride) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, blockStride, (blockIndex == 0) ? blockIdx.x : blockIndex, stride); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int* g_blockStride, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ int s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums, g_blockStride); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } #endif // #ifndef _SCAN_BEST_KERNEL_CU_
87b6d9652808d03829373fd9ce14c452f39463c2.hip
// !!! This is a file automatically generated by hipify!!! /* Matrix Inversion * Group F: M. Lechner, P. Knbel, J. Lvhall * * All Test suites */ #include "includes.h" static void do_complete_check(float *d_mat,float* d_mat2, float *d_inv, float* h_inv, int n) { printf("Doing complete check for Identity:\n"); hipEvent_t start, stop; float milliseconds; float *d_identity; cudaCheck(hipMalloc((void **)&d_identity, n*n* sizeof(float))); /* Multiply matrix with inverse */ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); printf("Matrix multipying..."); matrix_multiplication(d_identity,d_mat2,d_inv,n); printf("[OK]"); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf(" (in %1.3f ms)\n",milliseconds); hipEventDestroy(start); hipEventDestroy(stop); // printf("Identity matrix:\n"); // print_matrix_on_device_kernel<<<1,1>>>(d_identity,n); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); printf("Check for Identity matrix..."); int ur = identity_matrix(d_identity,n); printf("[OK]"); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf(" (in %1.3f ms)\n",milliseconds); hipEventDestroy(start); hipEventDestroy(stop); if(ur) printf("SUCCESS! Matix inversion was successfull!!!!!\n"); else printf("FAILED! Matrix inversion not successfull\n"); cudaCheck(hipMemcpy(d_inv, h_inv, n*n * sizeof(float), hipMemcpyHostToDevice)); printf("Matrix from host multipying..."); matrix_multiplication(d_identity,d_mat2,d_inv,n); printf("[OK]\n"); // printf("Identity matrix:\n"); // print_matrix_on_device_kernel<<<1,1>>>(d_identity,n); printf("Check host matrix for Identity..."); ur = identity_matrix(d_identity,n); printf("[OK]\n"); if(!ur) printf("Host matrix inversion failed!\n"); } static int check_first_elements_for_identity(float *A, float *Ainv, int n) { int sub=5; for(int x = 0; x < sub; x++) { for(int y = 0; y < sub; y++) { float sum=0; for(int k=0;k<n;k++) { sum+= A[x*n+k]*Ainv[k*n+y]; } if(x==y) { if( sum<0.99||sum>1.01 ) { return 0; } } else{ if(sum<-0.01 || sum >0.01) { return 0; } } } } return 1; } static void do_partial_check(float *d_inv, float* h_inv, float *h_mat,float *d_mat2, int n) { printf("Doing partial check for identity!\n"); cudaCheck(hipMemcpy(h_mat,d_mat2,sizeof(float)*n*n,hipMemcpyDeviceToHost)); printf("Checking CPU matrix for identity ... "); if(check_first_elements_for_identity(h_mat,h_inv,n)) { printf("[SUCCESS]\n"); } else printf("[FAIL]\n"); cudaCheck(hipMemcpy(h_inv,d_inv,sizeof(float)*n*n,hipMemcpyDeviceToHost)); printf("Checking GPU matrix for identity ... "); if(check_first_elements_for_identity(h_mat,h_inv,n)) { printf("[SUCCESS]\n"); } else printf("[FAIL]\n"); } void test_gauss(int n){ printf("running Jakobs tests.\n"); float time = 0; hipEvent_t start, stop; float *matrix; float * matrix_org; printf("\nDoing matrix inversion test with n=%d\n",n); if(n == 3){ matrix = tools_create_identity_matrix(n);//(float *)malloc(n*n* sizeof(float)); matrix[1] = 1; matrix[6] = 1; matrix_org = tools_create_identity_matrix(n); //used instead of malloc because lazy and easy.. } else { matrix = (float *)malloc(sizeof(float)*n*n); matrix_org = (float *)malloc(sizeof(float)*n*n); float * d_mat; d_mat = random_matrix_generate(n,100,1); gpuErrchk(hipMemcpy(matrix, d_mat, n*n * sizeof(float), hipMemcpyDeviceToHost)) cudaCheck(hipFree(d_mat)); } int i; for(i = 0;i <n*n; i++){ matrix_org[i] = matrix[i]; } float* inverse = tools_create_identity_matrix(n); float* inverse_matrix_cpu = tools_create_identity_matrix(n); /* Print out test matrix */ if(n == 3){ printf("test Matrix org:\n"); tools_print_matrix(matrix,n); printf("test Matrix:\n"); tools_print_matrix(matrix,n); tools_WAprint(n,matrix); } hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipEventSynchronize(start); gauss_inverse_gpu(matrix, n, inverse); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("CUDA inverse took ms: %f\n", time); hipEventDestroy(start); hipEventDestroy(stop); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipEventSynchronize(start); //running cpu test first because it has singularity check. //inversion destroys the matrix int succ= gauss_inverse_cpu(matrix, n, inverse_matrix_cpu); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("CPU inverse took ms: %f\n", time); hipEventDestroy(start); hipEventDestroy(stop); if(!succ) { printf("Matrix singular!"); exit(EXIT_SUCCESS); } //restore the matrix for(i = 0;i <n*n; i++){ matrix[i] = matrix_org[i]; } if(!tools_is_equal(inverse,inverse_matrix_cpu,n*n)){ printf("matrixes not equal. printing.\n\n"); printf("gpu matrix \n"); tools_print_matrix(inverse, n); printf("\n\ncpu matrix \n"); tools_print_matrix(inverse_matrix_cpu, n); printf("start matrix\n"); tools_WAprint(n,matrix_org); } else { printf("matrixes equal. all is good. \n"); } free(matrix); free(matrix_org); free(inverse); free(inverse_matrix_cpu); } void test_cofactors(int n){ /* float *d_mat, h_mat; float *d_inv, h_inv; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); d_mat = random_matrix_generate(n,100,1); cudaCheck(hipMalloc((void**)&d_inv, sizeof(float)*n*n)); h_mat=(float*)malloc(sizeof(float)*n*n); h_inv=(float*)malloc(sizeof(float)*n*n); cudaCheck(hipMemcpy(h_mat, d_mat, n*n * sizeof(float), hipMemcpyDeviceToHost)); if(n==5){ } */ printf("Not implemented!"); } void test_lu_decomposition(int n) { /* 100MB Matrix (5000x5000) take 4min to invert on GPU (GT 525m, with 96 threads) vs 28 min on a Intel i5 2430m core (= 7 x speedup) */ printf("Mathias test utility!\n"); //test_gpu_pivoting(); float *d_mat, *d_inv, *d_mat2; float *h_mat, *h_inv; printf("\nDoing LU matrix inversion test with n=%d\n",n); d_mat = random_matrix_generate(n,100,1); cudaCheck(hipMalloc((void**)&d_inv, sizeof(float)*n*n)); cudaCheck(hipMalloc((void**)&d_mat2, sizeof(float)*n*n)); h_mat=(float*)malloc(sizeof(float)*n*n); h_inv=(float*)malloc(sizeof(float)*n*n); cudaCheck(hipMemcpy(h_mat, d_mat, n*n * sizeof(float), hipMemcpyDeviceToHost)); /* Copy random matrix on device */ cudaCheck(hipMemcpy(d_mat2, d_mat, n*n * sizeof(float), hipMemcpyDeviceToDevice)); if(lu_dec_matrix_inverse_gpu(d_mat, d_inv, n)==0) { printf("Matrix singular!"); exit(EXIT_SUCCESS); } lu_dec_matrix_inverse_cpu(h_mat,h_inv,n); // Do check if inversion was successfull if(n<=900) { do_complete_check(d_mat,d_mat2, d_inv, h_inv, n); } else{ do_partial_check(d_inv, h_inv, h_mat, d_mat2,n); } cudaCheck(hipFree(d_mat)); cudaCheck(hipFree(d_mat2)); cudaCheck(hipFree(d_inv)); free(h_inv); free(h_mat); hipProfilerStop(); }
87b6d9652808d03829373fd9ce14c452f39463c2.cu
/* Matrix Inversion * Group F: M. Lechner, P. Knöbel, J. Lövhall * * All Test suites */ #include "includes.h" static void do_complete_check(float *d_mat,float* d_mat2, float *d_inv, float* h_inv, int n) { printf("Doing complete check for Identity:\n"); cudaEvent_t start, stop; float milliseconds; float *d_identity; cudaCheck(cudaMalloc((void **)&d_identity, n*n* sizeof(float))); /* Multiply matrix with inverse */ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); printf("Matrix multipying..."); matrix_multiplication(d_identity,d_mat2,d_inv,n); printf("[OK]"); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf(" (in %1.3f ms)\n",milliseconds); cudaEventDestroy(start); cudaEventDestroy(stop); // printf("Identity matrix:\n"); // print_matrix_on_device_kernel<<<1,1>>>(d_identity,n); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); printf("Check for Identity matrix..."); int ur = identity_matrix(d_identity,n); printf("[OK]"); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf(" (in %1.3f ms)\n",milliseconds); cudaEventDestroy(start); cudaEventDestroy(stop); if(ur) printf("SUCCESS! Matix inversion was successfull!!!!!\n"); else printf("FAILED! Matrix inversion not successfull\n"); cudaCheck(cudaMemcpy(d_inv, h_inv, n*n * sizeof(float), cudaMemcpyHostToDevice)); printf("Matrix from host multipying..."); matrix_multiplication(d_identity,d_mat2,d_inv,n); printf("[OK]\n"); // printf("Identity matrix:\n"); // print_matrix_on_device_kernel<<<1,1>>>(d_identity,n); printf("Check host matrix for Identity..."); ur = identity_matrix(d_identity,n); printf("[OK]\n"); if(!ur) printf("Host matrix inversion failed!\n"); } static int check_first_elements_for_identity(float *A, float *Ainv, int n) { int sub=5; for(int x = 0; x < sub; x++) { for(int y = 0; y < sub; y++) { float sum=0; for(int k=0;k<n;k++) { sum+= A[x*n+k]*Ainv[k*n+y]; } if(x==y) { if( sum<0.99||sum>1.01 ) { return 0; } } else{ if(sum<-0.01 || sum >0.01) { return 0; } } } } return 1; } static void do_partial_check(float *d_inv, float* h_inv, float *h_mat,float *d_mat2, int n) { printf("Doing partial check for identity!\n"); cudaCheck(cudaMemcpy(h_mat,d_mat2,sizeof(float)*n*n,cudaMemcpyDeviceToHost)); printf("Checking CPU matrix for identity ... "); if(check_first_elements_for_identity(h_mat,h_inv,n)) { printf("[SUCCESS]\n"); } else printf("[FAIL]\n"); cudaCheck(cudaMemcpy(h_inv,d_inv,sizeof(float)*n*n,cudaMemcpyDeviceToHost)); printf("Checking GPU matrix for identity ... "); if(check_first_elements_for_identity(h_mat,h_inv,n)) { printf("[SUCCESS]\n"); } else printf("[FAIL]\n"); } void test_gauss(int n){ printf("running Jakobs tests.\n"); float time = 0; cudaEvent_t start, stop; float *matrix; float * matrix_org; printf("\nDoing matrix inversion test with n=%d\n",n); if(n == 3){ matrix = tools_create_identity_matrix(n);//(float *)malloc(n*n* sizeof(float)); matrix[1] = 1; matrix[6] = 1; matrix_org = tools_create_identity_matrix(n); //used instead of malloc because lazy and easy.. } else { matrix = (float *)malloc(sizeof(float)*n*n); matrix_org = (float *)malloc(sizeof(float)*n*n); float * d_mat; d_mat = random_matrix_generate(n,100,1); gpuErrchk(cudaMemcpy(matrix, d_mat, n*n * sizeof(float), cudaMemcpyDeviceToHost)) cudaCheck(cudaFree(d_mat)); } int i; for(i = 0;i <n*n; i++){ matrix_org[i] = matrix[i]; } float* inverse = tools_create_identity_matrix(n); float* inverse_matrix_cpu = tools_create_identity_matrix(n); /* Print out test matrix */ if(n == 3){ printf("test Matrix org:\n"); tools_print_matrix(matrix,n); printf("test Matrix:\n"); tools_print_matrix(matrix,n); tools_WAprint(n,matrix); } cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaEventSynchronize(start); gauss_inverse_gpu(matrix, n, inverse); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("CUDA inverse took ms: %f\n", time); cudaEventDestroy(start); cudaEventDestroy(stop); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaEventSynchronize(start); //running cpu test first because it has singularity check. //inversion destroys the matrix int succ= gauss_inverse_cpu(matrix, n, inverse_matrix_cpu); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("CPU inverse took ms: %f\n", time); cudaEventDestroy(start); cudaEventDestroy(stop); if(!succ) { printf("Matrix singular!"); exit(EXIT_SUCCESS); } //restore the matrix for(i = 0;i <n*n; i++){ matrix[i] = matrix_org[i]; } if(!tools_is_equal(inverse,inverse_matrix_cpu,n*n)){ printf("matrixes not equal. printing.\n\n"); printf("gpu matrix \n"); tools_print_matrix(inverse, n); printf("\n\ncpu matrix \n"); tools_print_matrix(inverse_matrix_cpu, n); printf("start matrix\n"); tools_WAprint(n,matrix_org); } else { printf("matrixes equal. all is good. \n"); } free(matrix); free(matrix_org); free(inverse); free(inverse_matrix_cpu); } void test_cofactors(int n){ /* float *d_mat, h_mat; float *d_inv, h_inv; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); d_mat = random_matrix_generate(n,100,1); cudaCheck(cudaMalloc((void**)&d_inv, sizeof(float)*n*n)); h_mat=(float*)malloc(sizeof(float)*n*n); h_inv=(float*)malloc(sizeof(float)*n*n); cudaCheck(cudaMemcpy(h_mat, d_mat, n*n * sizeof(float), cudaMemcpyDeviceToHost)); if(n==5){ } */ printf("Not implemented!"); } void test_lu_decomposition(int n) { /* 100MB Matrix (5000x5000) take 4min to invert on GPU (GT 525m, with 96 threads) vs 28 min on a Intel i5 2430m core (= 7 x speedup) */ printf("Mathias test utility!\n"); //test_gpu_pivoting(); float *d_mat, *d_inv, *d_mat2; float *h_mat, *h_inv; printf("\nDoing LU matrix inversion test with n=%d\n",n); d_mat = random_matrix_generate(n,100,1); cudaCheck(cudaMalloc((void**)&d_inv, sizeof(float)*n*n)); cudaCheck(cudaMalloc((void**)&d_mat2, sizeof(float)*n*n)); h_mat=(float*)malloc(sizeof(float)*n*n); h_inv=(float*)malloc(sizeof(float)*n*n); cudaCheck(cudaMemcpy(h_mat, d_mat, n*n * sizeof(float), cudaMemcpyDeviceToHost)); /* Copy random matrix on device */ cudaCheck(cudaMemcpy(d_mat2, d_mat, n*n * sizeof(float), cudaMemcpyDeviceToDevice)); if(lu_dec_matrix_inverse_gpu(d_mat, d_inv, n)==0) { printf("Matrix singular!"); exit(EXIT_SUCCESS); } lu_dec_matrix_inverse_cpu(h_mat,h_inv,n); // Do check if inversion was successfull if(n<=900) { do_complete_check(d_mat,d_mat2, d_inv, h_inv, n); } else{ do_partial_check(d_inv, h_inv, h_mat, d_mat2,n); } cudaCheck(cudaFree(d_mat)); cudaCheck(cudaFree(d_mat2)); cudaCheck(cudaFree(d_inv)); free(h_inv); free(h_mat); cudaProfilerStop(); }
9f0ba42a33e83a5c5ce0919e6c91917461f29057.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); pool_process.finalize(ele, (static_cast<T>(pool_size))); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, output_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, input_grad_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D(const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( ele, input_data[(d * input_height + h) * input_width + w]); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); pool_process.finalize(ele, static_cast<T>(pool_size)); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, output_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, input_grad_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int c_offset = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pd_start = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int pd_end = min((d_offset + padding_depth) / stride_depth + 1, output_depth); int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pd_start; pd < pd_end; ++pd) { for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
9f0ba42a33e83a5c5ce0919e6c91917461f29057.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); pool_process.finalize(ele, (static_cast<T>(pool_size))); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, output_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, input_grad_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D(const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( ele, input_data[(d * input_height + h) * input_width + w]); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); pool_process.finalize(ele, static_cast<T>(pool_size)); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, output_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, input_grad_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int c_offset = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pd_start = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int pd_end = min((d_offset + padding_depth) / stride_depth + 1, output_depth); int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pd_start; pd < pd_end; ++pd) { for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
9e42bac045b0f7c314b2b9531ba99312555a9c85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stencil.hpp" #include "core/cuda_launcher.cuh" #include "core/cuda_macros.cuh" #include "core/macros.h" #include "stencil_operators.hpp" namespace ddj { Stencil::Stencil(SharedCudaPtr<char> data, int shift) { this->_data = unpack(data, shift); } __global__ void packKernel(int* data, int dataSize, char* output, int outputSize) { unsigned int output_idx = threadIdx.x + blockIdx.x * blockDim.x; // char array index unsigned int input_idx_start = output_idx * 8; unsigned int input_idx; if(output_idx >= outputSize) return; char part = 0; int number = 0; #pragma unroll for(int i = 0; i < 8; i++) { input_idx = input_idx_start + i; input_idx = input_idx < dataSize ? input_idx : dataSize - 1; number = data[input_idx]; part = SetNthBit(i, number, part); } output[output_idx] = part; } __global__ void unpackKernel(char* data, int dataSize, int* output, int outputSize) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // char array index unsigned int output_idx_start = idx * 8; if(idx >= dataSize) return; char number = data[idx]; // from now idx is the output index #pragma unroll for(int i = 0; i < 8; i++) { idx = output_idx_start + i; idx = idx < outputSize ? idx : outputSize - 1; output[idx] = GetNthBit(i, number) ? 1 : 0; } } SharedCudaPtr<char> Stencil::pack() { int dataSize = this->_data->size(); int charsNeeded = (dataSize + 7) / 8; auto result = CudaPtr<char>::make_shared(charsNeeded + 1); this->_policy.setSize(charsNeeded); hipLaunch(this->_policy, packKernel, this->_data->get(), dataSize, result->get()+1, result->size()-1); char rest = dataSize % 8; CUDA_CALL( hipMemcpy(result->get(), &rest, 1, CPY_HTD) ); hipDeviceSynchronize(); return result; } SharedCudaPtr<int> Stencil::unpack(SharedCudaPtr<char> data, int shift) { char* dataPtr = data->get() + shift; size_t size = data->size() - shift; // GET NUMBER OF ELEMENTS char rest; CUDA_CALL( hipMemcpy(&rest, dataPtr, 1, CPY_DTH) ); int numElements = (size-1)*8; if(rest) numElements -= 8 - rest; // PREAPARE MEMORY FOR RESULT auto result = CudaPtr<int>::make_shared(numElements); // UNPACK STENCIL this->_policy.setSize(size); hipLaunch(this->_policy, unpackKernel, dataPtr+1, size-1, result->get(), result->size() ); hipDeviceSynchronize(); return result; } template<typename T, typename Predicate> __global__ void _createStencilKernel(T* data, size_t size, int* output, Predicate pred) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx >= size) return; output[idx] = pred(data[idx]); } template<typename T, typename Predicate> Stencil Stencil::Create(SharedCudaPtr<T> data, Predicate pred) { Stencil stencil(CudaPtr<int>::make_shared(data->size())); ExecutionPolicy policy; policy.setSize(data->size()); hipLaunch(policy, _createStencilKernel<T, Predicate>, data->get(), data->size(), stencil->get(), pred); hipDeviceSynchronize(); return stencil; } #define STENCIL_SPEC(X) \ template Stencil Stencil::Create<X, EqualOperator<X>>(SharedCudaPtr<X> data, EqualOperator<X> pred); \ template Stencil Stencil::Create<X, NotEqualOperator<X>>(SharedCudaPtr<X> data, NotEqualOperator<X> pred); \ template Stencil Stencil::Create<X, InsideOperator<X>>(SharedCudaPtr<X> data, InsideOperator<X> pred); \ \ template Stencil Stencil::Create<X, OutsideOperator<char>>(SharedCudaPtr<X> data, OutsideOperator<char> pred); \ template Stencil Stencil::Create<X, OutsideOperator<short>>(SharedCudaPtr<X> data, OutsideOperator<short> pred); \ template Stencil Stencil::Create<X, OutsideOperator<double>>(SharedCudaPtr<X> data, OutsideOperator<double> pred); \ template Stencil Stencil::Create<X, OutsideOperator<float>>(SharedCudaPtr<X> data, OutsideOperator<float> pred); \ template Stencil Stencil::Create<X, OutsideOperator<int>>(SharedCudaPtr<X> data, OutsideOperator<int> pred); \ template Stencil Stencil::Create<X, OutsideOperator<long>>(SharedCudaPtr<X> data, OutsideOperator<long> pred); \ template Stencil Stencil::Create<X, OutsideOperator<long long>>(SharedCudaPtr<X> data, OutsideOperator<long long> pred); \ template Stencil Stencil::Create<X, OutsideOperator<unsigned int>>(SharedCudaPtr<X> data, OutsideOperator<unsigned int> pred); \ \ template Stencil Stencil::Create<X, LowerOperator<char>>(SharedCudaPtr<X> data, LowerOperator<char> pred); \ template Stencil Stencil::Create<X, LowerOperator<short>>(SharedCudaPtr<X> data, LowerOperator<short> pred); \ template Stencil Stencil::Create<X, LowerOperator<double>>(SharedCudaPtr<X> data, LowerOperator<double> pred); \ template Stencil Stencil::Create<X, LowerOperator<float>>(SharedCudaPtr<X> data, LowerOperator<float> pred); \ template Stencil Stencil::Create<X, LowerOperator<int>>(SharedCudaPtr<X> data, LowerOperator<int> pred); \ template Stencil Stencil::Create<X, LowerOperator<long>>(SharedCudaPtr<X> data, LowerOperator<long> pred); \ template Stencil Stencil::Create<X, LowerOperator<long long>>(SharedCudaPtr<X> data, LowerOperator<long long> pred); \ template Stencil Stencil::Create<X, LowerOperator<unsigned int>>(SharedCudaPtr<X> data, LowerOperator<unsigned int> pred); FOR_EACH(STENCIL_SPEC, char, short, double, float, int, long, long long, unsigned int) }/* namespace ddj */
9e42bac045b0f7c314b2b9531ba99312555a9c85.cu
#include "stencil.hpp" #include "core/cuda_launcher.cuh" #include "core/cuda_macros.cuh" #include "core/macros.h" #include "stencil_operators.hpp" namespace ddj { Stencil::Stencil(SharedCudaPtr<char> data, int shift) { this->_data = unpack(data, shift); } __global__ void packKernel(int* data, int dataSize, char* output, int outputSize) { unsigned int output_idx = threadIdx.x + blockIdx.x * blockDim.x; // char array index unsigned int input_idx_start = output_idx * 8; unsigned int input_idx; if(output_idx >= outputSize) return; char part = 0; int number = 0; #pragma unroll for(int i = 0; i < 8; i++) { input_idx = input_idx_start + i; input_idx = input_idx < dataSize ? input_idx : dataSize - 1; number = data[input_idx]; part = SetNthBit(i, number, part); } output[output_idx] = part; } __global__ void unpackKernel(char* data, int dataSize, int* output, int outputSize) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // char array index unsigned int output_idx_start = idx * 8; if(idx >= dataSize) return; char number = data[idx]; // from now idx is the output index #pragma unroll for(int i = 0; i < 8; i++) { idx = output_idx_start + i; idx = idx < outputSize ? idx : outputSize - 1; output[idx] = GetNthBit(i, number) ? 1 : 0; } } SharedCudaPtr<char> Stencil::pack() { int dataSize = this->_data->size(); int charsNeeded = (dataSize + 7) / 8; auto result = CudaPtr<char>::make_shared(charsNeeded + 1); this->_policy.setSize(charsNeeded); cudaLaunch(this->_policy, packKernel, this->_data->get(), dataSize, result->get()+1, result->size()-1); char rest = dataSize % 8; CUDA_CALL( cudaMemcpy(result->get(), &rest, 1, CPY_HTD) ); cudaDeviceSynchronize(); return result; } SharedCudaPtr<int> Stencil::unpack(SharedCudaPtr<char> data, int shift) { char* dataPtr = data->get() + shift; size_t size = data->size() - shift; // GET NUMBER OF ELEMENTS char rest; CUDA_CALL( cudaMemcpy(&rest, dataPtr, 1, CPY_DTH) ); int numElements = (size-1)*8; if(rest) numElements -= 8 - rest; // PREAPARE MEMORY FOR RESULT auto result = CudaPtr<int>::make_shared(numElements); // UNPACK STENCIL this->_policy.setSize(size); cudaLaunch(this->_policy, unpackKernel, dataPtr+1, size-1, result->get(), result->size() ); cudaDeviceSynchronize(); return result; } template<typename T, typename Predicate> __global__ void _createStencilKernel(T* data, size_t size, int* output, Predicate pred) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx >= size) return; output[idx] = pred(data[idx]); } template<typename T, typename Predicate> Stencil Stencil::Create(SharedCudaPtr<T> data, Predicate pred) { Stencil stencil(CudaPtr<int>::make_shared(data->size())); ExecutionPolicy policy; policy.setSize(data->size()); cudaLaunch(policy, _createStencilKernel<T, Predicate>, data->get(), data->size(), stencil->get(), pred); cudaDeviceSynchronize(); return stencil; } #define STENCIL_SPEC(X) \ template Stencil Stencil::Create<X, EqualOperator<X>>(SharedCudaPtr<X> data, EqualOperator<X> pred); \ template Stencil Stencil::Create<X, NotEqualOperator<X>>(SharedCudaPtr<X> data, NotEqualOperator<X> pred); \ template Stencil Stencil::Create<X, InsideOperator<X>>(SharedCudaPtr<X> data, InsideOperator<X> pred); \ \ template Stencil Stencil::Create<X, OutsideOperator<char>>(SharedCudaPtr<X> data, OutsideOperator<char> pred); \ template Stencil Stencil::Create<X, OutsideOperator<short>>(SharedCudaPtr<X> data, OutsideOperator<short> pred); \ template Stencil Stencil::Create<X, OutsideOperator<double>>(SharedCudaPtr<X> data, OutsideOperator<double> pred); \ template Stencil Stencil::Create<X, OutsideOperator<float>>(SharedCudaPtr<X> data, OutsideOperator<float> pred); \ template Stencil Stencil::Create<X, OutsideOperator<int>>(SharedCudaPtr<X> data, OutsideOperator<int> pred); \ template Stencil Stencil::Create<X, OutsideOperator<long>>(SharedCudaPtr<X> data, OutsideOperator<long> pred); \ template Stencil Stencil::Create<X, OutsideOperator<long long>>(SharedCudaPtr<X> data, OutsideOperator<long long> pred); \ template Stencil Stencil::Create<X, OutsideOperator<unsigned int>>(SharedCudaPtr<X> data, OutsideOperator<unsigned int> pred); \ \ template Stencil Stencil::Create<X, LowerOperator<char>>(SharedCudaPtr<X> data, LowerOperator<char> pred); \ template Stencil Stencil::Create<X, LowerOperator<short>>(SharedCudaPtr<X> data, LowerOperator<short> pred); \ template Stencil Stencil::Create<X, LowerOperator<double>>(SharedCudaPtr<X> data, LowerOperator<double> pred); \ template Stencil Stencil::Create<X, LowerOperator<float>>(SharedCudaPtr<X> data, LowerOperator<float> pred); \ template Stencil Stencil::Create<X, LowerOperator<int>>(SharedCudaPtr<X> data, LowerOperator<int> pred); \ template Stencil Stencil::Create<X, LowerOperator<long>>(SharedCudaPtr<X> data, LowerOperator<long> pred); \ template Stencil Stencil::Create<X, LowerOperator<long long>>(SharedCudaPtr<X> data, LowerOperator<long long> pred); \ template Stencil Stencil::Create<X, LowerOperator<unsigned int>>(SharedCudaPtr<X> data, LowerOperator<unsigned int> pred); FOR_EACH(STENCIL_SPEC, char, short, double, float, int, long, long long, unsigned int) }/* namespace ddj */
e61ef5a0015f586e543940ce9961344e6b90b50c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUDA.hpp" /* change spherical vector into normalized xyz vector*/ __forceinline__ __host__ __device__ vec3f sphere_to_normal(const vec3f &sphere_direction) { const float &theta = sphere_direction.y; const float &phi = sphere_direction.z; return vec3f(cos(theta) * sin(phi), sin(theta) * sin(phi), cos(phi)); } /* change xyz vector into spherical vector with 0 length */ // __forceinline__ __host__ __device__ vec3f normal_to_sphere(const vec3f &xyz_direction) // { // float theta = atan(xyz_direction.y / xyz_direction.x); // float phi = acos(xyz_direction.z / length(xyz_direction)); // if (xyz_direction.x < 0) // { // theta = (xyz_direction.y > 0) ? theta + M_PI : theta - M_PI; // } // return vec3f(0.f, theta, phi); // } //------------------------------------------------------------------------------ // ray gen program - the actual rendering happens in here //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const int accumID = optixLaunchParams.frame.accumID; const auto &camera = optixLaunchParams.camera; PRD prd; prd.random.init(ix + accumID * optixLaunchParams.frame.size.x, iy + accumID * optixLaunchParams.frame.size.y); prd.pixelColor = vec3f(0.f); // the values we store the PRD pointer in: uint32_t u0, u1; packPointer(&prd, u0, u1); int numPixelSamples = NUM_PIXEL_SAMPLES; vec3f pixelColor = 0.f; for (int sampleID = 0; sampleID < numPixelSamples; sampleID++) { vec3f rayDir; if (camera.camera_type == PINHOLE) { // normalized screen plane position, in [0,1]^2 #if NUM_PIXEL_SAMPLES > 1 const vec2f screen(vec2f(ix + prd.random() - 0.5f, iy + prd.random() - 0.5f) / vec2f(optixLaunchParams.frame.size)); #else const vec2f screen(vec2f(ix, iy) / vec2f(optixLaunchParams.frame.size)); #endif // generate ray direction rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); } else if (camera.camera_type == ENV) { // sperical coordinate position #if NUM_PIXEL_SAMPLES > 1 vec3f spherical_position((ix + prd.random() - 0.5f) * camera.horizontal + (iy + prd.random() - 0.5f) * camera.vertical); #else vec3f spherical_position((float)ix * camera.horizontal + (float)iy * camera.vertical); #endif spherical_position -= vec3f(0.f, M_PI, 0.f); // change into xyz coordinate position const vec3f xyz_position(sphere_to_normal(spherical_position)); // view port transform rayDir = {dot(camera.matrix.vx, xyz_position), dot(camera.matrix.vy, xyz_position), dot(camera.matrix.vz, xyz_position)}; } const int &ray_type = optixLaunchParams.launch_ray_type; optixTrace(optixLaunchParams.traversable, camera.position, rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE, ray_type, // SBT offset RAY_TYPE_COUNT, // SBT stride ray_type, // missSBTIndex u0, u1); pixelColor += prd.pixelColor; } const int r = int(255.99f * min(pixelColor.x / numPixelSamples, 1.f)); const int g = int(255.99f * min(pixelColor.y / numPixelSamples, 1.f)); const int b = int(255.99f * min(pixelColor.z / numPixelSamples, 1.f)); // convert to 32-bit rgba value (we explicitly set alpha to 0xff // to make stb_image_write happy ... const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16); // and write to frame buffer ... const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x; optixLaunchParams.frame.colorBuffer[fbIndex] = rgba; }
e61ef5a0015f586e543940ce9961344e6b90b50c.cu
#include "CUDA.hpp" /* change spherical vector into normalized xyz vector*/ __forceinline__ __host__ __device__ vec3f sphere_to_normal(const vec3f &sphere_direction) { const float &theta = sphere_direction.y; const float &phi = sphere_direction.z; return vec3f(cos(theta) * sin(phi), sin(theta) * sin(phi), cos(phi)); } /* change xyz vector into spherical vector with 0 length */ // __forceinline__ __host__ __device__ vec3f normal_to_sphere(const vec3f &xyz_direction) // { // float theta = atan(xyz_direction.y / xyz_direction.x); // float phi = acos(xyz_direction.z / length(xyz_direction)); // if (xyz_direction.x < 0) // { // theta = (xyz_direction.y > 0) ? theta + M_PI : theta - M_PI; // } // return vec3f(0.f, theta, phi); // } //------------------------------------------------------------------------------ // ray gen program - the actual rendering happens in here //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const int accumID = optixLaunchParams.frame.accumID; const auto &camera = optixLaunchParams.camera; PRD prd; prd.random.init(ix + accumID * optixLaunchParams.frame.size.x, iy + accumID * optixLaunchParams.frame.size.y); prd.pixelColor = vec3f(0.f); // the values we store the PRD pointer in: uint32_t u0, u1; packPointer(&prd, u0, u1); int numPixelSamples = NUM_PIXEL_SAMPLES; vec3f pixelColor = 0.f; for (int sampleID = 0; sampleID < numPixelSamples; sampleID++) { vec3f rayDir; if (camera.camera_type == PINHOLE) { // normalized screen plane position, in [0,1]^2 #if NUM_PIXEL_SAMPLES > 1 const vec2f screen(vec2f(ix + prd.random() - 0.5f, iy + prd.random() - 0.5f) / vec2f(optixLaunchParams.frame.size)); #else const vec2f screen(vec2f(ix, iy) / vec2f(optixLaunchParams.frame.size)); #endif // generate ray direction rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); } else if (camera.camera_type == ENV) { // sperical coordinate position #if NUM_PIXEL_SAMPLES > 1 vec3f spherical_position((ix + prd.random() - 0.5f) * camera.horizontal + (iy + prd.random() - 0.5f) * camera.vertical); #else vec3f spherical_position((float)ix * camera.horizontal + (float)iy * camera.vertical); #endif spherical_position -= vec3f(0.f, M_PI, 0.f); // change into xyz coordinate position const vec3f xyz_position(sphere_to_normal(spherical_position)); // view port transform rayDir = {dot(camera.matrix.vx, xyz_position), dot(camera.matrix.vy, xyz_position), dot(camera.matrix.vz, xyz_position)}; } const int &ray_type = optixLaunchParams.launch_ray_type; optixTrace(optixLaunchParams.traversable, camera.position, rayDir, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask(255), OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE, ray_type, // SBT offset RAY_TYPE_COUNT, // SBT stride ray_type, // missSBTIndex u0, u1); pixelColor += prd.pixelColor; } const int r = int(255.99f * min(pixelColor.x / numPixelSamples, 1.f)); const int g = int(255.99f * min(pixelColor.y / numPixelSamples, 1.f)); const int b = int(255.99f * min(pixelColor.z / numPixelSamples, 1.f)); // convert to 32-bit rgba value (we explicitly set alpha to 0xff // to make stb_image_write happy ... const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16); // and write to frame buffer ... const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x; optixLaunchParams.frame.colorBuffer[fbIndex] = rgba; }
a331fde21c8232edf2a4239c51169233d7eeb60a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Maintainer: adulaney #include "DynamicStructureFactorGPU.cuh" #include <assert.h> /*! \file DynamicStructureFactorGPU.cu \brief Declares GPU kernel code for computing dynamic structure factor on the GPU. */ //! Kernel for computing dynamic structure factor on the GPU /*! \param */ extern "C" __global__ void gpu_dynamic_structure_factor_compute_kernel(const Scalar3 *d_init_pos, const Scalar3 *d_pos, const Scalar3 *d_k, Scalar *d_partial_sum_cos, Scalar *d_partial_sum_sin, Scalar *d_partial_sum_init_cos, Scalar *d_partial_sum_init_sin, int N, int N_k) { __shared__ Scalar cos_sdata[32][32]; __shared__ Scalar sin_sdata[32][32]; __shared__ Scalar init_cos_sdata[32][32]; __shared__ Scalar init_sin_sdata[32][32]; int N_id = threadIdx.y + blockIdx.y * blockDim.y; int k_id = threadIdx.x + blockIdx.x * blockDim.x; if ((N_id < N) && (k_id < N_k)){ Scalar3 pos = d_pos[N_id]; Scalar3 init_pos = d_init_pos[N_id]; Scalar3 kvec = d_k[k_id]; Scalar phase = kvec.x * pos.x + kvec.y * pos.y + kvec.z * pos.z; Scalar init_phase = kvec.x * init_pos.x + kvec.y * init_pos.y + kvec.z * init_pos.z; __syncthreads(); cos_sdata[threadIdx.x][threadIdx.y] = slow::cos(phase); sin_sdata[threadIdx.x][threadIdx.y] = slow::sin(phase); init_cos_sdata[threadIdx.x][threadIdx.y] = slow::cos(init_phase); init_sin_sdata[threadIdx.x][threadIdx.y] = slow::sin(init_phase); __syncthreads(); // Reduce the sum in parallel int offs = blockDim.y >> 1; while (offs > 0){ if (threadIdx.y < offs){ cos_sdata[threadIdx.x][threadIdx.y] += cos_sdata[threadIdx.x][threadIdx.y + offs]; sin_sdata[threadIdx.x][threadIdx.y] += sin_sdata[threadIdx.x][threadIdx.y + offs]; init_cos_sdata[threadIdx.x][threadIdx.y] += init_cos_sdata[threadIdx.x][threadIdx.y + offs]; init_sin_sdata[threadIdx.x][threadIdx.y] += init_sin_sdata[threadIdx.x][threadIdx.y + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.y == 0){ d_partial_sum_cos[blockIdx.y * N_k + k_id] = cos_sdata[threadIdx.x][0]; d_partial_sum_sin[blockIdx.y * N_k + k_id] = sin_sdata[threadIdx.x][0]; d_partial_sum_init_cos[blockIdx.y * N_k + k_id] = init_cos_sdata[threadIdx.x][0]; d_partial_sum_init_sin[blockIdx.y * N_k + k_id] = init_sin_sdata[threadIdx.x][0]; } } } extern "C" __global__ void gpu_reduce_dynamic_partial_sum_kernel(Scalar *d_partial_sum_cos, Scalar *d_partial_sum_sin, Scalar *d_partial_sum_init_cos, Scalar *d_partial_sum_init_sin, Scalar *d_sum_cos, Scalar *d_sum_sin, Scalar *d_sum_init_cos, Scalar *d_sum_init_sin, unsigned int num_blocks_n, int N_k) { __shared__ Scalar cos_sdata[32][32]; __shared__ Scalar sin_sdata[32][32]; __shared__ Scalar init_cos_sdata[32][32]; __shared__ Scalar init_sin_sdata[32][32]; int k_id = threadIdx.x + blockIdx.x * blockDim.x; if (k_id < N_k){ Scalar sum_cos = Scalar(0.0); Scalar sum_sin = Scalar(0.0); Scalar sum_init_cos = Scalar(0.0); Scalar sum_init_sin = Scalar(0.0); for (int start = 0; start < num_blocks_n; start += blockDim.y){ __syncthreads(); if (start + threadIdx.y < num_blocks_n){ cos_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_cos[k_id + N_k*(start + threadIdx.y)]; sin_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_sin[k_id + N_k*(start + threadIdx.y)]; init_cos_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_init_cos[k_id + N_k*(start + threadIdx.y)]; init_sin_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_init_sin[k_id + N_k*(start + threadIdx.y)]; } else{ cos_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); sin_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); init_cos_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); init_sin_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); } __syncthreads(); // reduce sum in parallel int offs = blockDim.y >> 1; while (offs > 0){ if (threadIdx.y < offs){ cos_sdata[threadIdx.x][threadIdx.y] += cos_sdata[threadIdx.x][threadIdx.y + offs]; sin_sdata[threadIdx.x][threadIdx.y] += sin_sdata[threadIdx.x][threadIdx.y + offs]; init_cos_sdata[threadIdx.x][threadIdx.y] += init_cos_sdata[threadIdx.x][threadIdx.y + offs]; init_sin_sdata[threadIdx.x][threadIdx.y] += init_sin_sdata[threadIdx.x][threadIdx.y + offs]; } offs >>= 1; __syncthreads(); } sum_cos += cos_sdata[threadIdx.x][0]; sum_sin += sin_sdata[threadIdx.x][0]; sum_init_cos += init_cos_sdata[threadIdx.x][0]; sum_init_sin += init_sin_sdata[threadIdx.x][0]; } if (threadIdx.y == 0){ d_sum_cos[k_id] = sum_cos; d_sum_sin[k_id] = sum_sin; d_sum_init_cos[k_id] = sum_init_cos; d_sum_init_sin[k_id] = sum_init_sin; } } } hipError_t gpu_dynamic_structure_factor_compute(const Scalar3 *d_init_pos, const Scalar3 *d_pos, const Scalar3 *d_k, Scalar *d_partial_sum_cos, Scalar *d_partial_sum_sin, Scalar *d_partial_sum_init_cos, Scalar *d_partial_sum_init_sin, Scalar *d_sum_cos, Scalar *d_sum_sin, Scalar *d_sum_init_cos, Scalar *d_sum_init_sin, unsigned int num_blocks_n, unsigned int num_blocks_k, int N, int N_k) { // define grid to run kernel int block_size = 32; dim3 blocks( num_blocks_k, num_blocks_n); dim3 threads(block_size, block_size); dim3 blocks1(num_blocks_k, 1); dim3 threads1(block_size, block_size); // run the kernel hipLaunchKernelGGL(( gpu_dynamic_structure_factor_compute_kernel), dim3(blocks), dim3(threads), 0, 0, d_init_pos, d_pos, d_k, d_partial_sum_cos, d_partial_sum_sin, d_partial_sum_init_cos, d_partial_sum_init_sin, N, N_k); // run the summation kernel hipLaunchKernelGGL(( gpu_reduce_dynamic_partial_sum_kernel), dim3(blocks1), dim3(threads1), 0, 0, d_partial_sum_cos, d_partial_sum_sin, d_partial_sum_init_cos, d_partial_sum_init_sin, d_sum_cos, d_sum_sin, d_sum_init_cos, d_sum_init_sin, num_blocks_n, N_k); return hipSuccess; }
a331fde21c8232edf2a4239c51169233d7eeb60a.cu
// Maintainer: adulaney #include "DynamicStructureFactorGPU.cuh" #include <assert.h> /*! \file DynamicStructureFactorGPU.cu \brief Declares GPU kernel code for computing dynamic structure factor on the GPU. */ //! Kernel for computing dynamic structure factor on the GPU /*! \param */ extern "C" __global__ void gpu_dynamic_structure_factor_compute_kernel(const Scalar3 *d_init_pos, const Scalar3 *d_pos, const Scalar3 *d_k, Scalar *d_partial_sum_cos, Scalar *d_partial_sum_sin, Scalar *d_partial_sum_init_cos, Scalar *d_partial_sum_init_sin, int N, int N_k) { __shared__ Scalar cos_sdata[32][32]; __shared__ Scalar sin_sdata[32][32]; __shared__ Scalar init_cos_sdata[32][32]; __shared__ Scalar init_sin_sdata[32][32]; int N_id = threadIdx.y + blockIdx.y * blockDim.y; int k_id = threadIdx.x + blockIdx.x * blockDim.x; if ((N_id < N) && (k_id < N_k)){ Scalar3 pos = d_pos[N_id]; Scalar3 init_pos = d_init_pos[N_id]; Scalar3 kvec = d_k[k_id]; Scalar phase = kvec.x * pos.x + kvec.y * pos.y + kvec.z * pos.z; Scalar init_phase = kvec.x * init_pos.x + kvec.y * init_pos.y + kvec.z * init_pos.z; __syncthreads(); cos_sdata[threadIdx.x][threadIdx.y] = slow::cos(phase); sin_sdata[threadIdx.x][threadIdx.y] = slow::sin(phase); init_cos_sdata[threadIdx.x][threadIdx.y] = slow::cos(init_phase); init_sin_sdata[threadIdx.x][threadIdx.y] = slow::sin(init_phase); __syncthreads(); // Reduce the sum in parallel int offs = blockDim.y >> 1; while (offs > 0){ if (threadIdx.y < offs){ cos_sdata[threadIdx.x][threadIdx.y] += cos_sdata[threadIdx.x][threadIdx.y + offs]; sin_sdata[threadIdx.x][threadIdx.y] += sin_sdata[threadIdx.x][threadIdx.y + offs]; init_cos_sdata[threadIdx.x][threadIdx.y] += init_cos_sdata[threadIdx.x][threadIdx.y + offs]; init_sin_sdata[threadIdx.x][threadIdx.y] += init_sin_sdata[threadIdx.x][threadIdx.y + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.y == 0){ d_partial_sum_cos[blockIdx.y * N_k + k_id] = cos_sdata[threadIdx.x][0]; d_partial_sum_sin[blockIdx.y * N_k + k_id] = sin_sdata[threadIdx.x][0]; d_partial_sum_init_cos[blockIdx.y * N_k + k_id] = init_cos_sdata[threadIdx.x][0]; d_partial_sum_init_sin[blockIdx.y * N_k + k_id] = init_sin_sdata[threadIdx.x][0]; } } } extern "C" __global__ void gpu_reduce_dynamic_partial_sum_kernel(Scalar *d_partial_sum_cos, Scalar *d_partial_sum_sin, Scalar *d_partial_sum_init_cos, Scalar *d_partial_sum_init_sin, Scalar *d_sum_cos, Scalar *d_sum_sin, Scalar *d_sum_init_cos, Scalar *d_sum_init_sin, unsigned int num_blocks_n, int N_k) { __shared__ Scalar cos_sdata[32][32]; __shared__ Scalar sin_sdata[32][32]; __shared__ Scalar init_cos_sdata[32][32]; __shared__ Scalar init_sin_sdata[32][32]; int k_id = threadIdx.x + blockIdx.x * blockDim.x; if (k_id < N_k){ Scalar sum_cos = Scalar(0.0); Scalar sum_sin = Scalar(0.0); Scalar sum_init_cos = Scalar(0.0); Scalar sum_init_sin = Scalar(0.0); for (int start = 0; start < num_blocks_n; start += blockDim.y){ __syncthreads(); if (start + threadIdx.y < num_blocks_n){ cos_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_cos[k_id + N_k*(start + threadIdx.y)]; sin_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_sin[k_id + N_k*(start + threadIdx.y)]; init_cos_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_init_cos[k_id + N_k*(start + threadIdx.y)]; init_sin_sdata[threadIdx.x][threadIdx.y] = d_partial_sum_init_sin[k_id + N_k*(start + threadIdx.y)]; } else{ cos_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); sin_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); init_cos_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); init_sin_sdata[threadIdx.x][threadIdx.y] = Scalar(0.0); } __syncthreads(); // reduce sum in parallel int offs = blockDim.y >> 1; while (offs > 0){ if (threadIdx.y < offs){ cos_sdata[threadIdx.x][threadIdx.y] += cos_sdata[threadIdx.x][threadIdx.y + offs]; sin_sdata[threadIdx.x][threadIdx.y] += sin_sdata[threadIdx.x][threadIdx.y + offs]; init_cos_sdata[threadIdx.x][threadIdx.y] += init_cos_sdata[threadIdx.x][threadIdx.y + offs]; init_sin_sdata[threadIdx.x][threadIdx.y] += init_sin_sdata[threadIdx.x][threadIdx.y + offs]; } offs >>= 1; __syncthreads(); } sum_cos += cos_sdata[threadIdx.x][0]; sum_sin += sin_sdata[threadIdx.x][0]; sum_init_cos += init_cos_sdata[threadIdx.x][0]; sum_init_sin += init_sin_sdata[threadIdx.x][0]; } if (threadIdx.y == 0){ d_sum_cos[k_id] = sum_cos; d_sum_sin[k_id] = sum_sin; d_sum_init_cos[k_id] = sum_init_cos; d_sum_init_sin[k_id] = sum_init_sin; } } } cudaError_t gpu_dynamic_structure_factor_compute(const Scalar3 *d_init_pos, const Scalar3 *d_pos, const Scalar3 *d_k, Scalar *d_partial_sum_cos, Scalar *d_partial_sum_sin, Scalar *d_partial_sum_init_cos, Scalar *d_partial_sum_init_sin, Scalar *d_sum_cos, Scalar *d_sum_sin, Scalar *d_sum_init_cos, Scalar *d_sum_init_sin, unsigned int num_blocks_n, unsigned int num_blocks_k, int N, int N_k) { // define grid to run kernel int block_size = 32; dim3 blocks( num_blocks_k, num_blocks_n); dim3 threads(block_size, block_size); dim3 blocks1(num_blocks_k, 1); dim3 threads1(block_size, block_size); // run the kernel gpu_dynamic_structure_factor_compute_kernel<<< blocks, threads>>>(d_init_pos, d_pos, d_k, d_partial_sum_cos, d_partial_sum_sin, d_partial_sum_init_cos, d_partial_sum_init_sin, N, N_k); // run the summation kernel gpu_reduce_dynamic_partial_sum_kernel<<< blocks1, threads1>>>(d_partial_sum_cos, d_partial_sum_sin, d_partial_sum_init_cos, d_partial_sum_init_sin, d_sum_cos, d_sum_sin, d_sum_init_cos, d_sum_init_sin, num_blocks_n, N_k); return cudaSuccess; }
fbf1b662ec0909a0dc6ed0ffd3b8bc217cf3ffa2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #include <time.h> #define DATA_SIZE 250000 #define KSELECT 50 #define BLOCK_NUM 1 #define THREAD_NUM 50 // Check CUDA bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } __global__ static void randomSelect(int* result){ const int tid = threadIdx.x; const int bid = blockIdx.x; int size = (int)(DATA_SIZE/KSELECT); // generate number int start = bid*THREAD_NUM+tid*size+1; // random select hiprandState_t state; hiprand_init(tid,tid,0,&state); result[tid] = hiprand(&state)%(size)+start; } int main() { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); int*result; clock_t begin = clock(); hipMalloc((void**) &result, sizeof(int) * KSELECT); hipLaunchKernelGGL(( randomSelect), dim3(BLOCK_NUM), dim3(THREAD_NUM),0, 0, result); int result_host[KSELECT]; hipMemcpy(&result_host, result, sizeof(int) * KSELECT,hipMemcpyDeviceToHost); hipFree(result); clock_t end = clock(); for(int i = 0; i < KSELECT; i++) { printf("%d ",result_host[i]); } // count time double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("\n Spend: %f s",time_spent); return 0; }
fbf1b662ec0909a0dc6ed0ffd3b8bc217cf3ffa2.cu
#include <stdio.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include <time.h> #define DATA_SIZE 250000 #define KSELECT 50 #define BLOCK_NUM 1 #define THREAD_NUM 50 // Check CUDA bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } __global__ static void randomSelect(int* result){ const int tid = threadIdx.x; const int bid = blockIdx.x; int size = (int)(DATA_SIZE/KSELECT); // generate number int start = bid*THREAD_NUM+tid*size+1; // random select curandState state; curand_init(tid,tid,0,&state); result[tid] = curand(&state)%(size)+start; } int main() { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); int*result; clock_t begin = clock(); cudaMalloc((void**) &result, sizeof(int) * KSELECT); randomSelect<<<BLOCK_NUM, THREAD_NUM,0>>>(result); int result_host[KSELECT]; cudaMemcpy(&result_host, result, sizeof(int) * KSELECT,cudaMemcpyDeviceToHost); cudaFree(result); clock_t end = clock(); for(int i = 0; i < KSELECT; i++) { printf("%d ",result_host[i]); } // count time double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("\n Spend: %f s",time_spent); return 0; }
b925a0c0db8cf43539819bb39767f7d4f7ea5851.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace arb { namespace bbp_catalogue { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto _pp_var_F __attribute__((unused)) = params_.globals[0];\ auto* _pp_var_cai __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_gamma __attribute__((unused)) = params_.parameters[0];\ auto* _pp_var_decay __attribute__((unused)) = params_.parameters[1];\ auto* _pp_var_depth __attribute__((unused)) = params_.parameters[2];\ auto* _pp_var_minCai __attribute__((unused)) = params_.parameters[3];\ auto* _pp_var_initCai __attribute__((unused)) = params_.parameters[4];\ auto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_cai[tid_] = _pp_var_initCai[tid_]; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_ca_indexi_ = _pp_var_ion_ca_index[tid_]; auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type ica = 0.10000000000000001*_pp_var_ion_ca.current_density[ion_ca_indexi_]; arb_value_type ll0_, ba_0_, a_0_, ll1_; ll1_ = 0.; ll0_ = 0.; a_0_ = -( 1.0/_pp_var_decay[tid_]); ba_0_ = ( -5000.0*ica*_pp_var_gamma[tid_]/(_pp_var_F*_pp_var_depth[tid_])- -_pp_var_minCai[tid_]/_pp_var_decay[tid_])/a_0_; ll0_ = a_0_*dt; ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_); _pp_var_cai[tid_] = -ba_0_+(_pp_var_cai[tid_]+ba_0_)*ll1_; } } __global__ void write_ions(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_ca_indexi_ = _pp_var_ion_ca_index[tid_]; arb_value_type cai_shadowed_ = 0; cai_shadowed_ = _pp_var_cai[tid_]; _pp_var_ion_ca.internal_concentration[ion_ca_indexi_] = fma(_pp_var_weight[tid_], cai_shadowed_, _pp_var_ion_ca.internal_concentration[ion_ca_indexi_]); } } } // namespace void mechanism_CaDynamics_E2_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p); } void mechanism_CaDynamics_E2_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_CaDynamics_E2_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_CaDynamics_E2_gpu_write_ions_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( write_ions), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_CaDynamics_E2_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_CaDynamics_E2_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace bbp_catalogue } // namespace arb
b925a0c0db8cf43539819bb39767f7d4f7ea5851.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace arb { namespace bbp_catalogue { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto _pp_var_F __attribute__((unused)) = params_.globals[0];\ auto* _pp_var_cai __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_gamma __attribute__((unused)) = params_.parameters[0];\ auto* _pp_var_decay __attribute__((unused)) = params_.parameters[1];\ auto* _pp_var_depth __attribute__((unused)) = params_.parameters[2];\ auto* _pp_var_minCai __attribute__((unused)) = params_.parameters[3];\ auto* _pp_var_initCai __attribute__((unused)) = params_.parameters[4];\ auto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_cai[tid_] = _pp_var_initCai[tid_]; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_ca_indexi_ = _pp_var_ion_ca_index[tid_]; auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type ica = 0.10000000000000001*_pp_var_ion_ca.current_density[ion_ca_indexi_]; arb_value_type ll0_, ba_0_, a_0_, ll1_; ll1_ = 0.; ll0_ = 0.; a_0_ = -( 1.0/_pp_var_decay[tid_]); ba_0_ = ( -5000.0*ica*_pp_var_gamma[tid_]/(_pp_var_F*_pp_var_depth[tid_])- -_pp_var_minCai[tid_]/_pp_var_decay[tid_])/a_0_; ll0_ = a_0_*dt; ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_); _pp_var_cai[tid_] = -ba_0_+(_pp_var_cai[tid_]+ba_0_)*ll1_; } } __global__ void write_ions(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_ca_indexi_ = _pp_var_ion_ca_index[tid_]; arb_value_type cai_shadowed_ = 0; cai_shadowed_ = _pp_var_cai[tid_]; _pp_var_ion_ca.internal_concentration[ion_ca_indexi_] = fma(_pp_var_weight[tid_], cai_shadowed_, _pp_var_ion_ca.internal_concentration[ion_ca_indexi_]); } } } // namespace void mechanism_CaDynamics_E2_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p); } void mechanism_CaDynamics_E2_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_CaDynamics_E2_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); advance_state<<<grid_dim, block_dim>>>(*p); } void mechanism_CaDynamics_E2_gpu_write_ions_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); write_ions<<<grid_dim, block_dim>>>(*p); } void mechanism_CaDynamics_E2_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_CaDynamics_E2_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace bbp_catalogue } // namespace arb
513bc71be23128af468cc0c63d724789f41f5c8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// #include "gradops_hdr2.cuh" __global__ void hyperdifrhosource2_parallel(struct params *p, real *w, real *wnew, real *wmod, real *dwn1, real *wd, int order, int ordero, real *wtemp, int field, int dim, real dt) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int ii,ii1,ii0; real fip,fim1,tmpc; int index,k; int ni=p->n[0]; int nj=p->n[1]; //real dt=p->dt; real dy=p->dx[1]; real dx=p->dx[0]; //real g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; real rdx; int ip,jp,ipg,jpg; jp=iindex/(ni/(p->npgp[0])); ip=iindex-(jp*(ni/(p->npgp[0]))); rdx=(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1)); for(ipg=0;ipg<(p->npgp[0]);ipg++) for(jpg=0;jpg<(p->npgp[1]);jpg++) { i=ip*(p->npgp[0])+ipg; j=jp*(p->npgp[1])+jpg; //if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2)) //if(i>1 && j >1 && i<((p->n[0])-1) && j<((p->n[1])-1)) //if(i>32 && j >32 && i<((p->n[0])-32) && j<((p->n[1])-32)) if(i<((p->n[0])) && j<((p->n[1]))) { //dwn1[fencode_hdr2(p,i,j,field)]=( wd[fencode_hdr2(p,i,j,hdnur)] * grad1r_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) - wd[fencode_hdr2(p,i,j,hdnul)] *grad1l_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) )/rdx; //dwn1[fencode_hdr2(p,i,j,field)]=( wtemp[fencode_hdr2(p,i,j,hdnur)] * grad1r_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) - wtemp[fencode_hdr2(p,i,j,hdnul)] *grad1l_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) ); dwn1[fencode_hdr2(p,i,j,field)]=( wd[fencode_hdr2(p,i,j,hdnur)] * wtemp[fencode_hdr2(p,i,j,tmp1)] - wd[fencode_hdr2(p,i,j,hdnul)] *wtemp[fencode_hdr2(p,i,j,tmp2)] )/rdx; wmod[fencode_hdr2(p,i,j,field)+(ordero*NVAR*(p->n[0])*(p->n[1]))]=wmod[fencode_hdr2(p,i,j,field)+(ordero*NVAR*(p->n[0])*(p->n[1]))]+dt*dwn1[fencode_hdr2(p,i,j,field)]; } } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors_hdr2(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuhyperdifrhosource2(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero,real **d_wtemp, int field, int dim, real dt) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod, // real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd) //init_parallel(struct params *p, real *b, real *u, real *v, real *h) hipLaunchKernelGGL(( hyperdifrhosource2_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,dt); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); hipDeviceSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //hipDeviceSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // hipDeviceSynchronize(); // hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost); //hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost); //hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost); //checkErrors("copy data from device"); }
513bc71be23128af468cc0c63d724789f41f5c8b.cu
#include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// #include "gradops_hdr2.cuh" __global__ void hyperdifrhosource2_parallel(struct params *p, real *w, real *wnew, real *wmod, real *dwn1, real *wd, int order, int ordero, real *wtemp, int field, int dim, real dt) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int ii,ii1,ii0; real fip,fim1,tmpc; int index,k; int ni=p->n[0]; int nj=p->n[1]; //real dt=p->dt; real dy=p->dx[1]; real dx=p->dx[0]; //real g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; real rdx; int ip,jp,ipg,jpg; jp=iindex/(ni/(p->npgp[0])); ip=iindex-(jp*(ni/(p->npgp[0]))); rdx=(((p->dx[0])*(dim==0))+(p->dx[1])*(dim==1)); for(ipg=0;ipg<(p->npgp[0]);ipg++) for(jpg=0;jpg<(p->npgp[1]);jpg++) { i=ip*(p->npgp[0])+ipg; j=jp*(p->npgp[1])+jpg; //if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2)) //if(i>1 && j >1 && i<((p->n[0])-1) && j<((p->n[1])-1)) //if(i>32 && j >32 && i<((p->n[0])-32) && j<((p->n[1])-32)) if(i<((p->n[0])) && j<((p->n[1]))) { //dwn1[fencode_hdr2(p,i,j,field)]=( wd[fencode_hdr2(p,i,j,hdnur)] * grad1r_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) - wd[fencode_hdr2(p,i,j,hdnul)] *grad1l_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) )/rdx; //dwn1[fencode_hdr2(p,i,j,field)]=( wtemp[fencode_hdr2(p,i,j,hdnur)] * grad1r_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) - wtemp[fencode_hdr2(p,i,j,hdnul)] *grad1l_hdr2(wmod+order*NVAR*(p->n[0])*(p->n[1]),p,i,j,rho,dim) ); dwn1[fencode_hdr2(p,i,j,field)]=( wd[fencode_hdr2(p,i,j,hdnur)] * wtemp[fencode_hdr2(p,i,j,tmp1)] - wd[fencode_hdr2(p,i,j,hdnul)] *wtemp[fencode_hdr2(p,i,j,tmp2)] )/rdx; wmod[fencode_hdr2(p,i,j,field)+(ordero*NVAR*(p->n[0])*(p->n[1]))]=wmod[fencode_hdr2(p,i,j,field)+(ordero*NVAR*(p->n[0])*(p->n[1]))]+dt*dwn1[fencode_hdr2(p,i,j,field)]; } } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors_hdr2(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuhyperdifrhosource2(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero,real **d_wtemp, int field, int dim, real dt) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod, // real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd) //init_parallel(struct params *p, real *b, real *u, real *v, real *h) hyperdifrhosource2_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,dt); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); cudaThreadSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //cudaThreadSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // cudaThreadSynchronize(); // cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost); //cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost); //cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost); //checkErrors("copy data from device"); }
c70e52e3717b845adc27f0391ab1e83f837dfa6f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { hipError_t error = hipGetLastError (); if (error != hipSuccess) { printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error)); exit(-1); } } __global__ void j2d64pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) { //Determing the block's indices int i0 = (int)(blockIdx.x)*(int)(blockDim.x); int i = max(i0,4) + (int)(threadIdx.x); int j0 = (int)(blockIdx.y)*(int)(blockDim.y); int j = max(j0,4) + (int)(threadIdx.y); double (*in)[8200] = (double (*)[8200]) l_in; double (*out)[8200] = (double (*)[8200]) l_out; if (i>=4 & j>=4 & i<=N-5 & j<=N-5) { out[j][i] = (in[j-4][i-4] - in[j-4][i+4] - in[j+4][i-4] + in[j+4][i+4]) * 1.274495 + (-in[j-4][i-3] + in[j-4][i+3] + in[j-3][i+4] - in[j-3][i-4] + in[j+3][i-4] - in[j+3][i+4] + in[j+4][i-3] - in[j+4][i+3]) * 0.000136017 + (in[j-4][i-2] - in[j-4][i+2] + in[j-2][i-4] - in[j-2][i+4] - in[j+2][i-4] + in[j+2][i+4] - in[j+4][i-2] + in[j+4][i+2]) * 0.000714000 + (-in[j-4][i-1] + in[j-4][i+1] - in[j-1][i-4] + in[j-1][i+4] + in[j+1][i-4] - in[j+1][i+4] + in[j+4][i-1] - in[j+4][i+1]) * 0.00285600 + (in[j-3][i-3] - in[j-3][i+3] - in[j+3][i-3] + in[j+3][i+3]) * 0.00145161 + (-in[j-3][i-2] + in[j-3][i+2] - in[j-2][i-3] + in[j-2][i+3] + in[j+2][i-3] - in[j+2][i+3] + in[j+3][i-2] - in[j+3][i+2]) * 0.00762000 + (in[j-3][i-1] - in[j-3][i+1] + in[j-1][i-3] - in[j-1][i+3] - in[j+1][i-3] + in[j+1][i+3] - in[j+3][i-1] + in[j+3][i+1]) * 0.0304800 + (in[j-2][i-2] - in[j-2][i+2] - in[j+2][i-2] + in[j+2][i+2]) * 0.0400000 + (-in[j-2][i-1] + in[j-2][i+1] - in[j-1][i-2] + in[j-1][i+2] + in[j+1][i-2] - in[j+1][i+2] + in[j+2][i-1] - in[j+2][i+1]) * 0.160000 + (in[j-1][i-1] - in[j-1][i+1] - in[j+1][i-1] + in[j+1][i+1]) * 0.640000; } } extern "C" void host_code (double *h_in, double *h_out, int N) { double *in; hipMalloc (&in, sizeof(double)*N*N); check_error ("Failed to allocate device memory for in\n"); hipMemcpy (in, h_in, sizeof(double)*N*N, hipMemcpyHostToDevice); double *out; hipMalloc (&out, sizeof(double)*N*N); check_error ("Failed to allocate device memory for out\n"); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y)); hipLaunchKernelGGL(( j2d64pt), dim3(gridconfig), dim3(blockconfig), 0, 0, in, out, N); hipMemcpy (h_out, out, sizeof(double)*N*N, hipMemcpyDeviceToHost); hipFree (in); hipFree (out); }
c70e52e3717b845adc27f0391ab1e83f837dfa6f.cu
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void j2d64pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) { //Determing the block's indices int i0 = (int)(blockIdx.x)*(int)(blockDim.x); int i = max(i0,4) + (int)(threadIdx.x); int j0 = (int)(blockIdx.y)*(int)(blockDim.y); int j = max(j0,4) + (int)(threadIdx.y); double (*in)[8200] = (double (*)[8200]) l_in; double (*out)[8200] = (double (*)[8200]) l_out; if (i>=4 & j>=4 & i<=N-5 & j<=N-5) { out[j][i] = (in[j-4][i-4] - in[j-4][i+4] - in[j+4][i-4] + in[j+4][i+4]) * 1.274495 + (-in[j-4][i-3] + in[j-4][i+3] + in[j-3][i+4] - in[j-3][i-4] + in[j+3][i-4] - in[j+3][i+4] + in[j+4][i-3] - in[j+4][i+3]) * 0.000136017 + (in[j-4][i-2] - in[j-4][i+2] + in[j-2][i-4] - in[j-2][i+4] - in[j+2][i-4] + in[j+2][i+4] - in[j+4][i-2] + in[j+4][i+2]) * 0.000714000 + (-in[j-4][i-1] + in[j-4][i+1] - in[j-1][i-4] + in[j-1][i+4] + in[j+1][i-4] - in[j+1][i+4] + in[j+4][i-1] - in[j+4][i+1]) * 0.00285600 + (in[j-3][i-3] - in[j-3][i+3] - in[j+3][i-3] + in[j+3][i+3]) * 0.00145161 + (-in[j-3][i-2] + in[j-3][i+2] - in[j-2][i-3] + in[j-2][i+3] + in[j+2][i-3] - in[j+2][i+3] + in[j+3][i-2] - in[j+3][i+2]) * 0.00762000 + (in[j-3][i-1] - in[j-3][i+1] + in[j-1][i-3] - in[j-1][i+3] - in[j+1][i-3] + in[j+1][i+3] - in[j+3][i-1] + in[j+3][i+1]) * 0.0304800 + (in[j-2][i-2] - in[j-2][i+2] - in[j+2][i-2] + in[j+2][i+2]) * 0.0400000 + (-in[j-2][i-1] + in[j-2][i+1] - in[j-1][i-2] + in[j-1][i+2] + in[j+1][i-2] - in[j+1][i+2] + in[j+2][i-1] - in[j+2][i+1]) * 0.160000 + (in[j-1][i-1] - in[j-1][i+1] - in[j+1][i-1] + in[j+1][i+1]) * 0.640000; } } extern "C" void host_code (double *h_in, double *h_out, int N) { double *in; cudaMalloc (&in, sizeof(double)*N*N); check_error ("Failed to allocate device memory for in\n"); cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice); double *out; cudaMalloc (&out, sizeof(double)*N*N); check_error ("Failed to allocate device memory for out\n"); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y)); j2d64pt<<<gridconfig, blockconfig>>> (in, out, N); cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost); cudaFree (in); cudaFree (out); }
5a73e3e486739d7e3cfa71b6b7b24b40e80b3835.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> s d c @author Adrien REMY */ #include "common_magma.h" #include "zgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- ZPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db COMPLEX_16 array, dimension (n) The n vector db computed by ZGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_zprbt_mtv_q( magma_int_t n, magmaDoubleComplex *du, magmaDoubleComplex *db, magma_queue_t queue) { /* */ magma_int_t threads = block_length; magma_int_t grid = n/(4*block_length) + ((n%(4*block_length))!=0); hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, du, n, db, 0); hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, du, n+n/2, db, n/2); threads = block_length; grid = n/(2*block_length) + ((n%(2*block_length))!=0); hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue , n, du, 0, db, 0); } /** @see magmablas_zprbt_mtv_q ********************************************************************/ extern "C" void magmablas_zprbt_mtv( magma_int_t n, magmaDoubleComplex *du, magmaDoubleComplex *db) { magmablas_zprbt_mtv_q(n, du, db, magma_stream); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- ZPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db COMPLEX_16 array, dimension (n) The n vector db computed by ZGESV_NOPIV_GPU On exit db = dv*db @param[in] dv COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_zprbt_mv_q( magma_int_t n, magmaDoubleComplex *dv, magmaDoubleComplex *db, magma_queue_t queue) { magma_int_t threads = block_length; magma_int_t grid = n/(2*block_length) + ((n%(2*block_length))!=0); hipLaunchKernelGGL(( magmablas_zapply_vector_kernel), dim3(grid), dim3(threads), 0, queue , n, dv, 0, db, 0); threads = block_length; grid = n/(4*block_length) + ((n%(4*block_length))!=0); hipLaunchKernelGGL(( magmablas_zapply_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dv, n, db, 0); hipLaunchKernelGGL(( magmablas_zapply_vector_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dv, n+n/2, db, n/2); } /** @see magmablas_zprbt_mtv_q ********************************************************************/ extern "C" void magmablas_zprbt_mv( magma_int_t n, magmaDoubleComplex *dv, magmaDoubleComplex *db) { magmablas_zprbt_mv_q(n, dv, db, magma_stream); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- ZPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA COMPLEX_16 array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_zprbt_q( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *du, magmaDoubleComplex *dv, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid(n/(4*block_height) + ((n%(4*block_height))!=0), n/(4*block_width) + ((n%(4*block_width))!=0)); hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, 0, ldda, du, 0, dv, 0); hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, ldda*n/2, ldda, du, 0, dv, n/2); hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, n/2, ldda, du, n/2, dv, 0); hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue , n/2, dA, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2(n/(2*block_height) + ((n%(2*block_height))!=0), n/(2*block_width) + ((n%(2*block_width))!=0)); hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel), dim3(grid2), dim3(threads2), 0, queue , n, dA, 0, ldda, du, -ldda, dv, -ldda); } /** @see magmablas_zprbt_q ********************************************************************/ extern "C" void magmablas_zprbt( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *du, magmaDoubleComplex *dv) { magmablas_zprbt_q(n, dA, ldda, du, dv, magma_stream); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////// // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void zaxpycp2_kernel( int m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_Z_ADD( x[i], r[i] ); r[i] = b[i]; } } // ---------------------------------------------------------------------- // adds x += r --and-- // copies r = b extern "C" void magmablas_zaxpycp2_q( magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); hipLaunchKernelGGL(( zaxpycp2_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b ); } extern "C" void magmablas_zaxpycp2( magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b) { magmablas_zaxpycp2_q( m, r, x, b, magma_stream ); }
5a73e3e486739d7e3cfa71b6b7b24b40e80b3835.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> s d c @author Adrien REMY */ #include "common_magma.h" #include "zgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- ZPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db COMPLEX_16 array, dimension (n) The n vector db computed by ZGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_zprbt_mtv_q( magma_int_t n, magmaDoubleComplex *du, magmaDoubleComplex *db, magma_queue_t queue) { /* */ magma_int_t threads = block_length; magma_int_t grid = n/(4*block_length) + ((n%(4*block_length))!=0); magmablas_zapply_transpose_vector_kernel<<< grid, threads, 0, queue >>>(n/2, du, n, db, 0); magmablas_zapply_transpose_vector_kernel<<< grid, threads, 0, queue >>>(n/2, du, n+n/2, db, n/2); threads = block_length; grid = n/(2*block_length) + ((n%(2*block_length))!=0); magmablas_zapply_transpose_vector_kernel<<< grid, threads, 0, queue >>>(n, du, 0, db, 0); } /** @see magmablas_zprbt_mtv_q ********************************************************************/ extern "C" void magmablas_zprbt_mtv( magma_int_t n, magmaDoubleComplex *du, magmaDoubleComplex *db) { magmablas_zprbt_mtv_q(n, du, db, magma_stream); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- ZPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db COMPLEX_16 array, dimension (n) The n vector db computed by ZGESV_NOPIV_GPU On exit db = dv*db @param[in] dv COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_zprbt_mv_q( magma_int_t n, magmaDoubleComplex *dv, magmaDoubleComplex *db, magma_queue_t queue) { magma_int_t threads = block_length; magma_int_t grid = n/(2*block_length) + ((n%(2*block_length))!=0); magmablas_zapply_vector_kernel<<< grid, threads, 0, queue >>>(n, dv, 0, db, 0); threads = block_length; grid = n/(4*block_length) + ((n%(4*block_length))!=0); magmablas_zapply_vector_kernel<<< grid, threads, 0, queue >>>(n/2, dv, n, db, 0); magmablas_zapply_vector_kernel<<< grid, threads, 0, queue >>>(n/2, dv, n+n/2, db, n/2); } /** @see magmablas_zprbt_mtv_q ********************************************************************/ extern "C" void magmablas_zprbt_mv( magma_int_t n, magmaDoubleComplex *dv, magmaDoubleComplex *db) { magmablas_zprbt_mv_q(n, dv, db, magma_stream); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- ZPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA COMPLEX_16 array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv COMPLEX_16 array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_zprbt_q( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *du, magmaDoubleComplex *dv, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid(n/(4*block_height) + ((n%(4*block_height))!=0), n/(4*block_width) + ((n%(4*block_width))!=0)); magmablas_zelementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, 0, ldda, du, 0, dv, 0); magmablas_zelementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, ldda*n/2, ldda, du, 0, dv, n/2); magmablas_zelementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, n/2, ldda, du, n/2, dv, 0); magmablas_zelementary_multiplication_kernel<<< grid, threads, 0, queue >>>(n/2, dA, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2(n/(2*block_height) + ((n%(2*block_height))!=0), n/(2*block_width) + ((n%(2*block_width))!=0)); magmablas_zelementary_multiplication_kernel<<< grid2, threads2, 0, queue >>>(n, dA, 0, ldda, du, -ldda, dv, -ldda); } /** @see magmablas_zprbt_q ********************************************************************/ extern "C" void magmablas_zprbt( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *du, magmaDoubleComplex *dv) { magmablas_zprbt_q(n, dA, ldda, du, dv, magma_stream); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////// // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void zaxpycp2_kernel( int m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_Z_ADD( x[i], r[i] ); r[i] = b[i]; } } // ---------------------------------------------------------------------- // adds x += r --and-- // copies r = b extern "C" void magmablas_zaxpycp2_q( magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); zaxpycp2_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b ); } extern "C" void magmablas_zaxpycp2( magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b) { magmablas_zaxpycp2_q( m, r, x, b, magma_stream ); }
ca7d057f8dacc5ed7a782f562ae08cbf9af736fe.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "anyMethod.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *buff = NULL; hipMalloc(&buff, XSIZE*YSIZE); unsigned char *buffer_out = NULL; hipMalloc(&buffer_out, XSIZE*YSIZE); int w = XSIZE; int h = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( anyMethod), dim3(gridBlock),dim3(threadBlock), 0, 0, buff,buffer_out,w,h); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( anyMethod), dim3(gridBlock),dim3(threadBlock), 0, 0, buff,buffer_out,w,h); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( anyMethod), dim3(gridBlock),dim3(threadBlock), 0, 0, buff,buffer_out,w,h); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ca7d057f8dacc5ed7a782f562ae08cbf9af736fe.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "anyMethod.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *buff = NULL; cudaMalloc(&buff, XSIZE*YSIZE); unsigned char *buffer_out = NULL; cudaMalloc(&buffer_out, XSIZE*YSIZE); int w = XSIZE; int h = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); anyMethod<<<gridBlock,threadBlock>>>(buff,buffer_out,w,h); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { anyMethod<<<gridBlock,threadBlock>>>(buff,buffer_out,w,h); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { anyMethod<<<gridBlock,threadBlock>>>(buff,buffer_out,w,h); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bc1b3672abb48544bfa1714821da08639cdcb20c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <optix.h> #include "random.h" #include "LaunchParams7.h" // our launch params #include <vec_math.h> // NVIDIAs math utils extern "C" { __constant__ LaunchParams optixLaunchParams; } // a single ray type enum { PHONG=0, SHADOW, RAY_TYPE_COUNT }; struct colorPRD{ float3 color; unsigned int seed; } ; struct shadowPRD{ float shadowAtt; unsigned int seed; } ; // ------------------------------------------------------- // closest hit computes color based lolely on the triangle normal extern "C" __global__ void __closesthit__radiance(){ shadowPRD shadowAttPRD; shadowAttPRD.shadowAtt = 1.0f; uint32_t u0, u1; packPointer( &shadowAttPRD, u0, u1 ); // get the payload variable float3 &prd = *(float3*)getPRD<float3>(); const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; // get mesh data const TriangleMeshSBTData &sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); // retrieve primitive id and indexes const int primID = optixGetPrimitiveIndex(); const uint3 index = sbtData.index[primID]; // get barycentric coordinates const float u = optixGetTriangleBarycentrics().x; const float v = optixGetTriangleBarycentrics().y; if (sbtData.hasTexture && sbtData.vertexD.texCoord0) { // compute pixel texture coordinate const float4 tc = (1.f-u-v) * sbtData.vertexD.texCoord0[index.x] + u * sbtData.vertexD.texCoord0[index.y] + v * sbtData.vertexD.texCoord0[index.z]; // fetch texture value float4 fromTexture = tex2D<float4>(sbtData.texture,tc.x,tc.y); prd= make_float3(fromTexture); } else prd = sbtData.color; // Normal do pixel float4 normal = (1.f-u-v) * sbtData.vertexD.normal[index.x] + u * sbtData.vertexD.normal[index.y] + v * sbtData.vertexD.normal[index.z]; float4 pos = (1.f-u-v) * sbtData.vertexD.position[index.x] + u * sbtData.vertexD.position[index.y] + v * sbtData.vertexD.position[index.z]; // direction towards light float3 lPos = make_float3(optixLaunchParams.global->lightPos); float3 lDir = normalize(lPos - make_float3(pos)); float3 nn = normalize(make_float3(normal)); float intensity = max(dot(lDir, nn),0.0f); float tmax = length(lPos - make_float3(pos)); optixTrace(optixLaunchParams.traversable, make_float3(pos), lDir, 0.001f, // tmins tmax, // tmax 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_DISABLE_ANYHIT, SHADOW, // SBT offset RAY_TYPE_COUNT, // SBT stride SHADOW, // missSBTIndex u0, u1 ); prd = prd * min(intensity * shadowAttPRD.shadowAtt + 0.2, 1.0); } // nothing to do in here extern "C" __global__ void __anyhit__radiance() { } // miss sets the background color extern "C" __global__ void __miss__radiance() { float3 &prd = *(float3*)getPRD<float3>(); // set blue as background color prd = make_float3(0.0f, 0.7f, 1.0f); } // ----------------------------------------------- // Shadow rays // nothing to do in here extern "C" __global__ void __anyhit__shadow() { } // nothing to do in here extern "C" __global__ void __closesthit__shadow() { shadowPRD &prd = *(shadowPRD*)getPRD<shadowPRD>(); prd.shadowAtt = 0; } // miss sets the background color extern "C" __global__ void __miss__shadow() { } // ----------------------------------------------- // Primary Rays extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const auto &camera = optixLaunchParams.camera; if (optixLaunchParams.frame.frame == 0 && ix == 0 && iy == 0) { // print info to console printf("===========================================\n"); printf("Nau Ray-Tracing Debug\n"); const float4 &ld = optixLaunchParams.global->lightPos; printf("LightPos: %f, %f %f %f\n", ld.x,ld.y,ld.z,ld.w); printf("Launch dim: %u %u\n", optixGetLaunchDimensions().x, optixGetLaunchDimensions().y); printf("Rays per pixel squared: %d \n", optixLaunchParams.frame.raysPerPixel); printf("===========================================\n"); } float lensDistance = optixLaunchParams.global->lensDistance; float focalDistance = optixLaunchParams.global->focalDistance * 100; float aperture = optixLaunchParams.global->aperture * 10; float3 frente = normalize(cross(camera.vertical,camera.horizontal)); float3 lensCentre = camera.position + frente*lensDistance; // ray payload colorPRD pixelColorPRD; pixelColorPRD.color = make_float3(1.f); float raysPerPixel = float(optixLaunchParams.frame.raysPerPixel); // half pixel float2 delta = make_float2(1.0f/raysPerPixel, 1.0f/raysPerPixel); // compute ray direction // normalized screen plane position, in [-1, 1]^2 float red = 0.0f, blue = 0.0f, green = 0.0f; for (int i = 0; i < raysPerPixel; ++i) { for (int j = 0; j < raysPerPixel; ++j) { uint32_t seed = tea<4>( ix * optixGetLaunchDimensions().x + iy, i*raysPerPixel + j ); pixelColorPRD.seed = seed; uint32_t u0, u1; packPointer( &pixelColorPRD, u0, u1 ); const float2 subpixel_jitter = make_float2(i * delta.x, j * delta.y); const float2 screen(make_float2(ix + subpixel_jitter.x, iy + subpixel_jitter.y) / make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0); // note: nau already takes into account the field of view and ratio when computing // camera horizontal and vertival float3 cPos = camera.position+(-screen.x)*camera.horizontal + (-screen.y ) * camera.vertical; float3 rayDir = normalize(lensCentre - cPos); float3 proj_frente_rayDir = dot(rayDir,frente)*frente; // Vetor que vai do centro da lente para o ponto de foco no plano de foco float3 ray = rayDir * focalDistance / length(proj_frente_rayDir); float3 pFocal = lensCentre + ray; float randR = aperture * sqrt(rnd(seed)); float randA = rnd(seed) * 2 * M_PIf; float x = randR * cos(randA); float y = randR * sin(randA); float3 randAperture = lensCentre + camera.horizontal * x + camera.vertical * y; float3 rayDirection = pFocal - randAperture; // trace primary ray optixTrace(optixLaunchParams.traversable, randAperture, rayDirection, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_NONE,//,OPTIX_RAY_FLAG_DISABLE_ANYHIT PHONG, // SBT offset RAY_TYPE_COUNT, // SBT stride PHONG, // missSBTIndex u0, u1 ); red += pixelColorPRD.color.x / (raysPerPixel*raysPerPixel); green += pixelColorPRD.color.y / (raysPerPixel*raysPerPixel); blue += pixelColorPRD.color.z / (raysPerPixel*raysPerPixel); } } //convert float (0-1) to int (0-255) const int r = int(255.0f*red); const int g = int(255.0f*green); const int b = int(255.0f*blue); // convert to 32-bit rgba value const uint32_t rgba = 0xff000000 | (r<<0) | (g<<8) | (b<<16); // compute index const uint32_t fbIndex = ix + iy*optixGetLaunchDimensions().x; // write to output buffer optixLaunchParams.frame.colorBuffer[fbIndex] = rgba; }
bc1b3672abb48544bfa1714821da08639cdcb20c.cu
#include <optix.h> #include "random.h" #include "LaunchParams7.h" // our launch params #include <vec_math.h> // NVIDIAs math utils extern "C" { __constant__ LaunchParams optixLaunchParams; } // a single ray type enum { PHONG=0, SHADOW, RAY_TYPE_COUNT }; struct colorPRD{ float3 color; unsigned int seed; } ; struct shadowPRD{ float shadowAtt; unsigned int seed; } ; // ------------------------------------------------------- // closest hit computes color based lolely on the triangle normal extern "C" __global__ void __closesthit__radiance(){ shadowPRD shadowAttPRD; shadowAttPRD.shadowAtt = 1.0f; uint32_t u0, u1; packPointer( &shadowAttPRD, u0, u1 ); // get the payload variable float3 &prd = *(float3*)getPRD<float3>(); const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; // get mesh data const TriangleMeshSBTData &sbtData = *(const TriangleMeshSBTData*)optixGetSbtDataPointer(); // retrieve primitive id and indexes const int primID = optixGetPrimitiveIndex(); const uint3 index = sbtData.index[primID]; // get barycentric coordinates const float u = optixGetTriangleBarycentrics().x; const float v = optixGetTriangleBarycentrics().y; if (sbtData.hasTexture && sbtData.vertexD.texCoord0) { // compute pixel texture coordinate const float4 tc = (1.f-u-v) * sbtData.vertexD.texCoord0[index.x] + u * sbtData.vertexD.texCoord0[index.y] + v * sbtData.vertexD.texCoord0[index.z]; // fetch texture value float4 fromTexture = tex2D<float4>(sbtData.texture,tc.x,tc.y); prd= make_float3(fromTexture); } else prd = sbtData.color; // Normal do pixel float4 normal = (1.f-u-v) * sbtData.vertexD.normal[index.x] + u * sbtData.vertexD.normal[index.y] + v * sbtData.vertexD.normal[index.z]; float4 pos = (1.f-u-v) * sbtData.vertexD.position[index.x] + u * sbtData.vertexD.position[index.y] + v * sbtData.vertexD.position[index.z]; // direction towards light float3 lPos = make_float3(optixLaunchParams.global->lightPos); float3 lDir = normalize(lPos - make_float3(pos)); float3 nn = normalize(make_float3(normal)); float intensity = max(dot(lDir, nn),0.0f); float tmax = length(lPos - make_float3(pos)); optixTrace(optixLaunchParams.traversable, make_float3(pos), lDir, 0.001f, // tmins tmax, // tmax 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_DISABLE_ANYHIT, SHADOW, // SBT offset RAY_TYPE_COUNT, // SBT stride SHADOW, // missSBTIndex u0, u1 ); prd = prd * min(intensity * shadowAttPRD.shadowAtt + 0.2, 1.0); } // nothing to do in here extern "C" __global__ void __anyhit__radiance() { } // miss sets the background color extern "C" __global__ void __miss__radiance() { float3 &prd = *(float3*)getPRD<float3>(); // set blue as background color prd = make_float3(0.0f, 0.7f, 1.0f); } // ----------------------------------------------- // Shadow rays // nothing to do in here extern "C" __global__ void __anyhit__shadow() { } // nothing to do in here extern "C" __global__ void __closesthit__shadow() { shadowPRD &prd = *(shadowPRD*)getPRD<shadowPRD>(); prd.shadowAtt = 0; } // miss sets the background color extern "C" __global__ void __miss__shadow() { } // ----------------------------------------------- // Primary Rays extern "C" __global__ void __raygen__renderFrame() { // compute a test pattern based on pixel ID const int ix = optixGetLaunchIndex().x; const int iy = optixGetLaunchIndex().y; const auto &camera = optixLaunchParams.camera; if (optixLaunchParams.frame.frame == 0 && ix == 0 && iy == 0) { // print info to console printf("===========================================\n"); printf("Nau Ray-Tracing Debug\n"); const float4 &ld = optixLaunchParams.global->lightPos; printf("LightPos: %f, %f %f %f\n", ld.x,ld.y,ld.z,ld.w); printf("Launch dim: %u %u\n", optixGetLaunchDimensions().x, optixGetLaunchDimensions().y); printf("Rays per pixel squared: %d \n", optixLaunchParams.frame.raysPerPixel); printf("===========================================\n"); } float lensDistance = optixLaunchParams.global->lensDistance; float focalDistance = optixLaunchParams.global->focalDistance * 100; float aperture = optixLaunchParams.global->aperture * 10; float3 frente = normalize(cross(camera.vertical,camera.horizontal)); float3 lensCentre = camera.position + frente*lensDistance; // ray payload colorPRD pixelColorPRD; pixelColorPRD.color = make_float3(1.f); float raysPerPixel = float(optixLaunchParams.frame.raysPerPixel); // half pixel float2 delta = make_float2(1.0f/raysPerPixel, 1.0f/raysPerPixel); // compute ray direction // normalized screen plane position, in [-1, 1]^2 float red = 0.0f, blue = 0.0f, green = 0.0f; for (int i = 0; i < raysPerPixel; ++i) { for (int j = 0; j < raysPerPixel; ++j) { uint32_t seed = tea<4>( ix * optixGetLaunchDimensions().x + iy, i*raysPerPixel + j ); pixelColorPRD.seed = seed; uint32_t u0, u1; packPointer( &pixelColorPRD, u0, u1 ); const float2 subpixel_jitter = make_float2(i * delta.x, j * delta.y); const float2 screen(make_float2(ix + subpixel_jitter.x, iy + subpixel_jitter.y) / make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0); // note: nau already takes into account the field of view and ratio when computing // camera horizontal and vertival float3 cPos = camera.position+(-screen.x)*camera.horizontal + (-screen.y ) * camera.vertical; float3 rayDir = normalize(lensCentre - cPos); float3 proj_frente_rayDir = dot(rayDir,frente)*frente; // Vetor que vai do centro da lente para o ponto de foco no plano de foco float3 ray = rayDir * focalDistance / length(proj_frente_rayDir); float3 pFocal = lensCentre + ray; float randR = aperture * sqrt(rnd(seed)); float randA = rnd(seed) * 2 * M_PIf; float x = randR * cos(randA); float y = randR * sin(randA); float3 randAperture = lensCentre + camera.horizontal * x + camera.vertical * y; float3 rayDirection = pFocal - randAperture; // trace primary ray optixTrace(optixLaunchParams.traversable, randAperture, rayDirection, 0.f, // tmin 1e20f, // tmax 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_NONE,//,OPTIX_RAY_FLAG_DISABLE_ANYHIT PHONG, // SBT offset RAY_TYPE_COUNT, // SBT stride PHONG, // missSBTIndex u0, u1 ); red += pixelColorPRD.color.x / (raysPerPixel*raysPerPixel); green += pixelColorPRD.color.y / (raysPerPixel*raysPerPixel); blue += pixelColorPRD.color.z / (raysPerPixel*raysPerPixel); } } //convert float (0-1) to int (0-255) const int r = int(255.0f*red); const int g = int(255.0f*green); const int b = int(255.0f*blue); // convert to 32-bit rgba value const uint32_t rgba = 0xff000000 | (r<<0) | (g<<8) | (b<<16); // compute index const uint32_t fbIndex = ix + iy*optixGetLaunchDimensions().x; // write to output buffer optixLaunchParams.frame.colorBuffer[fbIndex] = rgba; }
94cbd2b8684a43ececed3e0ade3715ad20c4c439.hip
// !!! This is a file automatically generated by hipify!!! #include "interpolate_directivity.h" #include "hip/hip_runtime.h" #include <cmath> __host__ __device__ float interpolate_directivity(float* data, cuda3DMatrix<float> data_prop, float azim, float elev) { //Bilinear 2D interpolation taken from //https://en.wikipedia.org/wiki/Bilinear_interpolation#Alternative_algorithm //NOTE:We had taken into account the fact that our "blocks" have length 1. size_t x1, x2, y1, y2; //Extremes of the block. float Q11, Q12, Q21, Q22; //Values at extremes float a0, a1, a2, a3; x1 = (size_t)floor(azim); x2 = (size_t)ceil(azim); y1 = (size_t)floor(elev); y2 = (size_t)ceil(elev); //We need some dimension on both axis. //This might be optimized somehow. if (x1 == x2) { x2++; } if (y1 == y2) { y2++; } Q11 = data_prop.at(data, y1, x1); Q12 = data_prop.at(data, y2, x1); Q21 = data_prop.at(data, y1, x2); Q22 = data_prop.at(data, y2, x2); a0 = Q11 * x2 * y2 - Q12 * x2 * y1 - Q21 * x1 * y2 + Q22 * x1 * y1; a1 = -Q11 * y2 + Q12 * y1 + Q21 * y2 - Q22 * y1; a2 = -Q11 * x2 + Q12 * x2 + Q21 * x1 - Q22 * x1; a3 = Q11 - Q12 - Q21 + Q22; return (a0 + a1 * azim + a2 * elev + a3 * azim * elev); }
94cbd2b8684a43ececed3e0ade3715ad20c4c439.cu
#include "interpolate_directivity.h" #include "cuda_runtime.h" #include <cmath> __host__ __device__ float interpolate_directivity(float* data, cuda3DMatrix<float> data_prop, float azim, float elev) { //Bilinear 2D interpolation taken from //https://en.wikipedia.org/wiki/Bilinear_interpolation#Alternative_algorithm //NOTE:We had taken into account the fact that our "blocks" have length 1. size_t x1, x2, y1, y2; //Extremes of the block. float Q11, Q12, Q21, Q22; //Values at extremes float a0, a1, a2, a3; x1 = (size_t)floor(azim); x2 = (size_t)ceil(azim); y1 = (size_t)floor(elev); y2 = (size_t)ceil(elev); //We need some dimension on both axis. //This might be optimized somehow. if (x1 == x2) { x2++; } if (y1 == y2) { y2++; } Q11 = data_prop.at(data, y1, x1); Q12 = data_prop.at(data, y2, x1); Q21 = data_prop.at(data, y1, x2); Q22 = data_prop.at(data, y2, x2); a0 = Q11 * x2 * y2 - Q12 * x2 * y1 - Q21 * x1 * y2 + Q22 * x1 * y1; a1 = -Q11 * y2 + Q12 * y1 + Q21 * y2 - Q22 * y1; a2 = -Q11 * x2 + Q12 * x2 + Q21 * x1 - Q22 * x1; a3 = Q11 - Q12 - Q21 + Q22; return (a0 + a1 * azim + a2 * elev + a3 * azim * elev); }
886c8cfd35d37d133d00765f87e7dfb0d3d1177f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { comp += (var_4 / var_5); for (int i=0; i < var_3; ++i) { comp = (-1.4981E-27f * fabsf(+0.0f)); comp = var_7 * expf(-1.0115E-44f); var_6[i] = -1.6598E-41f * (-1.3358E34f - log10f((var_8 * sinhf(+1.3993E-37f + (-1.9420E4f * +1.4445E-43f / -1.5239E34f))))); comp = var_6[i] - var_9 * (+1.3838E-35f / (+0.0f + (-1.8751E34f / var_10))); } if (comp == (var_11 + +1.3706E-36f)) { float tmp_1 = asinf(-1.4726E23f + -0.0f * (+1.0269E-35f / var_12)); comp = tmp_1 + (-1.1191E36f * powf(-1.8951E-43f, var_13 / sqrtf((var_14 + (var_15 + logf(-1.1694E-3f - acosf((-1.0170E34f - (var_16 + (+1.0204E-12f * var_17)))))))))); comp = var_18 * var_19 + -1.8301E-10f - var_20 + var_21 * -1.8279E-4f; } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float* tmp_7 = initPointer( atof(argv[7]) ); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22); hipDeviceSynchronize(); return 0; }
886c8cfd35d37d133d00765f87e7dfb0d3d1177f.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { comp += (var_4 / var_5); for (int i=0; i < var_3; ++i) { comp = (-1.4981E-27f * fabsf(+0.0f)); comp = var_7 * expf(-1.0115E-44f); var_6[i] = -1.6598E-41f * (-1.3358E34f - log10f((var_8 * sinhf(+1.3993E-37f + (-1.9420E4f * +1.4445E-43f / -1.5239E34f))))); comp = var_6[i] - var_9 * (+1.3838E-35f / (+0.0f + (-1.8751E34f / var_10))); } if (comp == (var_11 + +1.3706E-36f)) { float tmp_1 = asinf(-1.4726E23f + -0.0f * (+1.0269E-35f / var_12)); comp = tmp_1 + (-1.1191E36f * powf(-1.8951E-43f, var_13 / sqrtf((var_14 + (var_15 + logf(-1.1694E-3f - acosf((-1.0170E34f - (var_16 + (+1.0204E-12f * var_17)))))))))); comp = var_18 * var_19 + -1.8301E-10f - var_20 + var_21 * -1.8279E-4f; } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float* tmp_7 = initPointer( atof(argv[7]) ); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22); cudaDeviceSynchronize(); return 0; }
31d3803bb03f0972c61aca2b31fc7eeaa2860049.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" int main(void) { void Roy_Floyd(int *, int); const int Width = 6; int a[Width*Width] = { 0, 2, 5, 999, 999, 999, 999, 0, 7, 1, 999 , 8, 999, 999, 0, 4, 999, 999, 999, 999, 999, 0, 3, 999, 999, 999, 2, 999, 0, 3, 999, 5, 999, 2, 4, 0 }; Roy_Floyd(a, Width); for (int i = 0; i < (Width*Width); i++) { printf("%d \t", a[i]); if ((i + 1) % Width == 0) { printf("\n"); } } int quit; scanf("%d", &quit); return 0; } //Matrix multiplication kernel - thread specification __global__ void Compute_Path(int *Md, int Width, int k) { //2D Thread ID int ROW = blockIdx.x; int COL = threadIdx.x; float tmpSum = 0; //Pvalue stores the Pd element that is computed by the thread if (Md[ROW * Width + COL] > Md[ROW * Width + k] + Md[k * Width + COL]) Md[ROW * Width + COL] = Md[ROW * Width + k] + Md[k * Width + COL]; } void Roy_Floyd(int *M, int Width) { int size = Width*Width * sizeof(int); int *Md; //Transfer M and N to device memory hipMalloc((void**)&Md, size); hipMemcpy(Md, M, size, hipMemcpyHostToDevice); //Launch the device computation threads! for (int k = 0; k < Width; k++) Compute_Path << <Width, Width >> >(Md, Width, k); //Transfer P from device to host hipMemcpy(M, Md, size, hipMemcpyDeviceToHost); //Free device matrices hipFree(Md); }
31d3803bb03f0972c61aca2b31fc7eeaa2860049.cu
#include<cuda.h> #include<stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" int main(void) { void Roy_Floyd(int *, int); const int Width = 6; int a[Width*Width] = { 0, 2, 5, 999, 999, 999, 999, 0, 7, 1, 999 , 8, 999, 999, 0, 4, 999, 999, 999, 999, 999, 0, 3, 999, 999, 999, 2, 999, 0, 3, 999, 5, 999, 2, 4, 0 }; Roy_Floyd(a, Width); for (int i = 0; i < (Width*Width); i++) { printf("%d \t", a[i]); if ((i + 1) % Width == 0) { printf("\n"); } } int quit; scanf("%d", &quit); return 0; } //Matrix multiplication kernel - thread specification __global__ void Compute_Path(int *Md, int Width, int k) { //2D Thread ID int ROW = blockIdx.x; int COL = threadIdx.x; float tmpSum = 0; //Pvalue stores the Pd element that is computed by the thread if (Md[ROW * Width + COL] > Md[ROW * Width + k] + Md[k * Width + COL]) Md[ROW * Width + COL] = Md[ROW * Width + k] + Md[k * Width + COL]; } void Roy_Floyd(int *M, int Width) { int size = Width*Width * sizeof(int); int *Md; //Transfer M and N to device memory cudaMalloc((void**)&Md, size); cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice); //Launch the device computation threads! for (int k = 0; k < Width; k++) Compute_Path << <Width, Width >> >(Md, Width, k); //Transfer P from device to host cudaMemcpy(M, Md, size, cudaMemcpyDeviceToHost); //Free device matrices cudaFree(Md); }
195bb51bd72bac8c54721ae18cd145dfc919a62a.hip
// !!! This is a file automatically generated by hipify!!! // Vector addition: C = 1/A + 1/B // using multiple GPUs with OpenMP // Includes #include <stdio.h> #include <stdlib.h> #include <omp.h> // header for OpenMP #include <hip/hip_runtime.h> // Variables float* h_A; // host vectors float* h_B; float* h_C; float* h_D; // Functions void RandomInit(float*, int); // Device code __global__ void VecAdd(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = 1.0/A[i] + 1.0/B[i]; __syncthreads(); } // Host code int main(void) { printf("\n"); printf("Vector Addition with multiple GPUs \n"); int N, NGPU, cpu_thread_id=0; int *Dev; long mem = 1024*1024*1024; // 4 Giga for float data type. printf("Enter the number of GPUs: "); scanf("%d", &NGPU); printf("%d\n", NGPU); Dev = (int *)malloc(sizeof(int)*NGPU); int numDev = 0; printf("GPU device number: "); for(int i = 0; i < NGPU; i++) { scanf("%d", &Dev[i]); printf("%d ",Dev[i]); numDev++; if(getchar() == '\n') break; } printf("\n"); if(numDev != NGPU) { fprintf(stderr,"Should input %d GPU device numbers\n", NGPU); exit(1); } printf("Enter the size of the vectors: "); scanf("%d", &N); printf("%d\n", N); if (3*N > mem) { printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n"); exit(1); } long size = N*sizeof(float); // Set the sizes of threads and blocks int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d", &threadsPerBlock); printf("%d\n", threadsPerBlock); if(threadsPerBlock > 1024) { printf("The number of threads per block must be less than 1024 ! \n"); exit(1); } int blocksPerGrid = (N + threadsPerBlock*NGPU - 1) / (threadsPerBlock*NGPU); printf("The number of blocks is %d\n", blocksPerGrid); if(blocksPerGrid > 2147483647) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(1); } // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); if (! h_A || ! h_B || ! h_C) { printf("!!! Not enough memory.\n"); exit(1); } // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // declare cuda event for timer hipEvent_t start, stop; // hipEventCreate(&start); // events must be created after devices are set // hipEventCreate(&stop); float Intime,gputime,Outime; omp_set_num_threads(NGPU); #pragma omp parallel private(cpu_thread_id) { float *d_A, *d_B, *d_C; cpu_thread_id = omp_get_thread_num(); hipSetDevice(Dev[cpu_thread_id]); // hipSetDevice(cpu_thread_id); // start the timer if(cpu_thread_id == 0) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); } // Allocate vectors in device memory hipMalloc((void**)&d_A, size/NGPU); hipMalloc((void**)&d_B, size/NGPU); hipMalloc((void**)&d_C, size/NGPU); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice); #pragma omp barrier // stop the timer if(cpu_thread_id == 0) { hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &Intime, start, stop); printf("Data input time for GPU: %f (ms) \n",Intime); } // start the timer if(cpu_thread_id == 0) hipEventRecord(start,0); hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N/NGPU); hipDeviceSynchronize(); // stop the timer if(cpu_thread_id == 0) { hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime)); } // Copy result from device memory to host memory // h_C contains the result in host memory // start the timer if(cpu_thread_id == 0) hipEventRecord(start,0); hipMemcpy(h_C+N/NGPU*cpu_thread_id, d_C, size/NGPU, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); // stop the timer if(cpu_thread_id == 0) { hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &Outime, start, stop); printf("Data output time for GPU: %f (ms) \n",Outime); } } float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); // start the timer hipEventRecord(start,0); h_D = (float*)malloc(size); // compute the reference solution for (int i = 0; i < N; ++i) h_D[i] = 1.0/h_A[i] + 1.0/h_B[i]; // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float cputime; hipEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/gputime_tot); // Destroy timer hipEventDestroy(start); hipEventDestroy(stop); // check result printf("Check result:\n"); double sum=0; double diff; for (int i = 0; i < N; ++i) { diff = abs(h_D[i] - h_C[i]); sum += diff*diff; } sum = sqrt(sum); printf("norm(h_C - h_D)=%20.15e\n",sum); for (int i=0; i < NGPU; i++) { hipSetDevice(i); hipDeviceReset(); } return 0; } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
195bb51bd72bac8c54721ae18cd145dfc919a62a.cu
// Vector addition: C = 1/A + 1/B // using multiple GPUs with OpenMP // Includes #include <stdio.h> #include <stdlib.h> #include <omp.h> // header for OpenMP #include <cuda_runtime.h> // Variables float* h_A; // host vectors float* h_B; float* h_C; float* h_D; // Functions void RandomInit(float*, int); // Device code __global__ void VecAdd(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = 1.0/A[i] + 1.0/B[i]; __syncthreads(); } // Host code int main(void) { printf("\n"); printf("Vector Addition with multiple GPUs \n"); int N, NGPU, cpu_thread_id=0; int *Dev; long mem = 1024*1024*1024; // 4 Giga for float data type. printf("Enter the number of GPUs: "); scanf("%d", &NGPU); printf("%d\n", NGPU); Dev = (int *)malloc(sizeof(int)*NGPU); int numDev = 0; printf("GPU device number: "); for(int i = 0; i < NGPU; i++) { scanf("%d", &Dev[i]); printf("%d ",Dev[i]); numDev++; if(getchar() == '\n') break; } printf("\n"); if(numDev != NGPU) { fprintf(stderr,"Should input %d GPU device numbers\n", NGPU); exit(1); } printf("Enter the size of the vectors: "); scanf("%d", &N); printf("%d\n", N); if (3*N > mem) { printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n"); exit(1); } long size = N*sizeof(float); // Set the sizes of threads and blocks int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d", &threadsPerBlock); printf("%d\n", threadsPerBlock); if(threadsPerBlock > 1024) { printf("The number of threads per block must be less than 1024 ! \n"); exit(1); } int blocksPerGrid = (N + threadsPerBlock*NGPU - 1) / (threadsPerBlock*NGPU); printf("The number of blocks is %d\n", blocksPerGrid); if(blocksPerGrid > 2147483647) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(1); } // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); if (! h_A || ! h_B || ! h_C) { printf("!!! Not enough memory.\n"); exit(1); } // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // declare cuda event for timer cudaEvent_t start, stop; // cudaEventCreate(&start); // events must be created after devices are set // cudaEventCreate(&stop); float Intime,gputime,Outime; omp_set_num_threads(NGPU); #pragma omp parallel private(cpu_thread_id) { float *d_A, *d_B, *d_C; cpu_thread_id = omp_get_thread_num(); cudaSetDevice(Dev[cpu_thread_id]); // cudaSetDevice(cpu_thread_id); // start the timer if(cpu_thread_id == 0) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); } // Allocate vectors in device memory cudaMalloc((void**)&d_A, size/NGPU); cudaMalloc((void**)&d_B, size/NGPU); cudaMalloc((void**)&d_C, size/NGPU); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice); #pragma omp barrier // stop the timer if(cpu_thread_id == 0) { cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Intime, start, stop); printf("Data input time for GPU: %f (ms) \n",Intime); } // start the timer if(cpu_thread_id == 0) cudaEventRecord(start,0); VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N/NGPU); cudaDeviceSynchronize(); // stop the timer if(cpu_thread_id == 0) { cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime)); } // Copy result from device memory to host memory // h_C contains the result in host memory // start the timer if(cpu_thread_id == 0) cudaEventRecord(start,0); cudaMemcpy(h_C+N/NGPU*cpu_thread_id, d_C, size/NGPU, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // stop the timer if(cpu_thread_id == 0) { cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Outime, start, stop); printf("Data output time for GPU: %f (ms) \n",Outime); } } float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); // start the timer cudaEventRecord(start,0); h_D = (float*)malloc(size); // compute the reference solution for (int i = 0; i < N; ++i) h_D[i] = 1.0/h_A[i] + 1.0/h_B[i]; // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float cputime; cudaEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/gputime_tot); // Destroy timer cudaEventDestroy(start); cudaEventDestroy(stop); // check result printf("Check result:\n"); double sum=0; double diff; for (int i = 0; i < N; ++i) { diff = abs(h_D[i] - h_C[i]); sum += diff*diff; } sum = sqrt(sum); printf("norm(h_C - h_D)=%20.15e\n",sum); for (int i=0; i < NGPU; i++) { cudaSetDevice(i); cudaDeviceReset(); } return 0; } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
7a1079b61195b51733d7ec5d2268e19b7e4bcbd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/SpatialReplicationPadding.hip" #else void THNN_(SpatialReplicationPadding_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int padL, int padR, int padT, int padB) { THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s") if (numInputDims == 4) { numBatch = THCTensor_(size)(state, input, 0); planeDim++; dimh++; dimw++; } int numPlanes = THCTensor_(size)(state, input, planeDim); int inputH = THCTensor_(size)(state, input, dimh); int inputW = THCTensor_(size)(state, input, dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; THArgCheck(outputW >= 1 || outputH >= 1 , 2, "input (H: %d, W: %d)is too small." " Calculated output H: %d W: %d", inputH, inputW, outputH, outputW); THCDeviceTensor<scalar_t, 4> devInput; THCDeviceTensor<scalar_t, 4> devOutput; if (numInputDims == 3) { THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW); devInput = toDeviceTensor<scalar_t, 3>(state, input).upcastOuter<4>(); devOutput = toDeviceTensor<scalar_t, 3>(state, output).upcastOuter<4>(); } else { THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW); devInput = toDeviceTensor<scalar_t, 4>(state, input); devOutput = toDeviceTensor<scalar_t, 4>(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.getSize(1), devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( SpatialReplicationPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state), devInput, devOutput, padT, padB, padL, padR); } void THNN_(SpatialReplicationPadding_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int padL, int padR, int padT, int padB) { THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3, "output gradient tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input->size(dimh); int iwidth = input->size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3, "gradOutput width unexpected. Expected: %d, Got: %d", owidth, THCTensor_(size)(state, gradOutput, dimw)); THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3, "gradOutput height unexpected. Expected: %d, Got: %d", oheight, THCTensor_(size)(state, gradOutput, dimh)); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 4> devGradInput; THCDeviceTensor<scalar_t, 4> devGradOutput; if (numInputDims == 3) { devGradInput = toDeviceTensor<scalar_t, 3>(state, gradInput).upcastOuter<4>(); devGradOutput = toDeviceTensor<scalar_t, 3>(state, gradOutput).upcastOuter<4>(); } else { devGradInput = toDeviceTensor<scalar_t, 4>(state, gradInput); devGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.getSize(1), devGradOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( SpatialReplicationPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state), devGradInput, devGradOutput, padT, padB, padL, padR); } #endif
7a1079b61195b51733d7ec5d2268e19b7e4bcbd4.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/SpatialReplicationPadding.cu" #else void THNN_(SpatialReplicationPadding_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int padL, int padR, int padT, int padB) { THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s") if (numInputDims == 4) { numBatch = THCTensor_(size)(state, input, 0); planeDim++; dimh++; dimw++; } int numPlanes = THCTensor_(size)(state, input, planeDim); int inputH = THCTensor_(size)(state, input, dimh); int inputW = THCTensor_(size)(state, input, dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; THArgCheck(outputW >= 1 || outputH >= 1 , 2, "input (H: %d, W: %d)is too small." " Calculated output H: %d W: %d", inputH, inputW, outputH, outputW); THCDeviceTensor<scalar_t, 4> devInput; THCDeviceTensor<scalar_t, 4> devOutput; if (numInputDims == 3) { THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW); devInput = toDeviceTensor<scalar_t, 3>(state, input).upcastOuter<4>(); devOutput = toDeviceTensor<scalar_t, 3>(state, output).upcastOuter<4>(); } else { THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW); devInput = toDeviceTensor<scalar_t, 4>(state, input); devOutput = toDeviceTensor<scalar_t, 4>(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.getSize(1), devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); SpatialReplicationPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>( devInput, devOutput, padT, padB, padL, padR); } void THNN_(SpatialReplicationPadding_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int padL, int padR, int padT, int padB) { THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3, "output gradient tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input->size(dimh); int iwidth = input->size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3, "gradOutput width unexpected. Expected: %d, Got: %d", owidth, THCTensor_(size)(state, gradOutput, dimw)); THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3, "gradOutput height unexpected. Expected: %d, Got: %d", oheight, THCTensor_(size)(state, gradOutput, dimh)); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 4> devGradInput; THCDeviceTensor<scalar_t, 4> devGradOutput; if (numInputDims == 3) { devGradInput = toDeviceTensor<scalar_t, 3>(state, gradInput).upcastOuter<4>(); devGradOutput = toDeviceTensor<scalar_t, 3>(state, gradOutput).upcastOuter<4>(); } else { devGradInput = toDeviceTensor<scalar_t, 4>(state, gradInput); devGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.getSize(1), devGradOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); SpatialReplicationPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>( devGradInput, devGradOutput, padT, padB, padL, padR); } #endif
74a534810236985180cab79189cfd201d43fb2fc.hip
// !!! This is a file automatically generated by hipify!!! //Includes for IntelliSense #define _SIZE_T_DEFINED #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "ActivationFunction.cu" extern "C" { __constant__ int D_INPUT_UNITS; __constant__ int D_OUTPUT_UNITS; __constant__ ActivationFunctionEnum D_ACTIVATION_FUNCTION; //edited code from Brainsimulator for computation of //output from the hidden layer of RNN (recurrent neural network) __global__ void ForwardPassHiddenKernel( float *input, float *hiddenActivations, float *previousHiddenActivations, float *inputWeights, float *recurrentWeights, int hiddenLayerSize ) { int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; extern __shared__ float sharedMem[]; if (threadIdx.x == 0){ for (int i = 0; i < D_INPUT_UNITS; i++) { sharedMem[i] = input[i]; } for (int i = D_INPUT_UNITS; i < D_INPUT_UNITS + hiddenLayerSize; i++) { sharedMem[i] = previousHiddenActivations[i-D_INPUT_UNITS]; } } __syncthreads(); if (unitId < hiddenLayerSize) { float weightedSum = 0; int weightId = unitId * D_INPUT_UNITS; for (int i = 0; i < D_INPUT_UNITS; i++) { weightedSum += inputWeights[weightId] * sharedMem[i]; weightId++; } weightId = unitId * hiddenLayerSize; for (int i = 0; i < hiddenLayerSize; i++) { weightedSum += recurrentWeights[weightId] * sharedMem[i + D_INPUT_UNITS]; weightId++; } hiddenActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum); } } //edited code from Brainsimulator for computation of //output from the output layer of RNN (recurrent neural network) __global__ void ForwardPassOutputKernel( float *hiddenActivations, float *outputActivations, float *outputWeights, int hiddenLayerSize ) { int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; extern __shared__ float sharedMem[]; if (threadIdx.x == 0){ for (int i = 0; i < hiddenLayerSize; i++) { sharedMem[i] = hiddenActivations[i]; } } __syncthreads(); if (unitId < D_OUTPUT_UNITS) { float weightedSum = 0; int weightId = unitId * hiddenLayerSize; for (int i = 0; i < hiddenLayerSize; i++) { weightedSum += outputWeights[weightId] * sharedMem[i]; weightId++; } outputActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum); } } }
74a534810236985180cab79189cfd201d43fb2fc.cu
//Includes for IntelliSense #define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "ActivationFunction.cu" extern "C" { __constant__ int D_INPUT_UNITS; __constant__ int D_OUTPUT_UNITS; __constant__ ActivationFunctionEnum D_ACTIVATION_FUNCTION; //edited code from Brainsimulator for computation of //output from the hidden layer of RNN (recurrent neural network) __global__ void ForwardPassHiddenKernel( float *input, float *hiddenActivations, float *previousHiddenActivations, float *inputWeights, float *recurrentWeights, int hiddenLayerSize ) { int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; extern __shared__ float sharedMem[]; if (threadIdx.x == 0){ for (int i = 0; i < D_INPUT_UNITS; i++) { sharedMem[i] = input[i]; } for (int i = D_INPUT_UNITS; i < D_INPUT_UNITS + hiddenLayerSize; i++) { sharedMem[i] = previousHiddenActivations[i-D_INPUT_UNITS]; } } __syncthreads(); if (unitId < hiddenLayerSize) { float weightedSum = 0; int weightId = unitId * D_INPUT_UNITS; for (int i = 0; i < D_INPUT_UNITS; i++) { weightedSum += inputWeights[weightId] * sharedMem[i]; weightId++; } weightId = unitId * hiddenLayerSize; for (int i = 0; i < hiddenLayerSize; i++) { weightedSum += recurrentWeights[weightId] * sharedMem[i + D_INPUT_UNITS]; weightId++; } hiddenActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum); } } //edited code from Brainsimulator for computation of //output from the output layer of RNN (recurrent neural network) __global__ void ForwardPassOutputKernel( float *hiddenActivations, float *outputActivations, float *outputWeights, int hiddenLayerSize ) { int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; extern __shared__ float sharedMem[]; if (threadIdx.x == 0){ for (int i = 0; i < hiddenLayerSize; i++) { sharedMem[i] = hiddenActivations[i]; } } __syncthreads(); if (unitId < D_OUTPUT_UNITS) { float weightedSum = 0; int weightId = unitId * hiddenLayerSize; for (int i = 0; i < hiddenLayerSize; i++) { weightedSum += outputWeights[weightId] * sharedMem[i]; weightId++; } outputActivations[unitId] = Evaluate(D_ACTIVATION_FUNCTION, weightedSum); } } }
64f8443e96e29176c786e65ce932f14f63c35172.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cmath> #include <cassert> #include <cstdlib> #include <cutil.h> #include <omp.h> #include "cuda_pointer.h" #define NTHREAD 64 // 64 or 128 //#define NJBLOCK 14 // for GTX 470 #define NJBLOCK 30 #define NIBLOCK 32 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 2048 #define NXREDUCE 32 // must be >NJBLOCK #define NYREDUCE 8 #define NNB_PER_BLOCK 256 // NNB per block, must be power of 2 #define NB_BUF_SIZE (1<<20) // #define NNB_MAX 384 // total NNB at reduced #define MAX_CPU 32 #define MAX_GPU 8 // for clearity, for myself #define __out #define PROFILE #define NAN_CHECK(val) assert((val) == (val)); typedef unsigned short uint16; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(const double mj, const double xj[3], const double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; NAN_CHECK(xj[0]); NAN_CHECK(xj[1]); NAN_CHECK(xj[2]); NAN_CHECK(mj); NAN_CHECK(vj[0]); NAN_CHECK(vj[1]); NAN_CHECK(vj[2]); } }; struct Iparticle{ float3 pos; float h2; float3 vel; float dtr; Iparticle() {} Iparticle(const double h2i, const double dtri, const double xi[3], const double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; dtr = dtri; NAN_CHECK(xi[0]); NAN_CHECK(xi[1]); NAN_CHECK(xi[2]); NAN_CHECK(h2i); NAN_CHECK(vi[0]); NAN_CHECK(vi[1]); NAN_CHECK(vi[2]); } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words __device__ void clear(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } __device__ void operator+=(const Force &rhs){ acc.x += rhs.acc.x; acc.y += rhs.acc.y; acc.z += rhs.acc.z; pot += rhs.pot; jrk.x += rhs.jrk.x; jrk.y += rhs.jrk.y; jrk.z += rhs.jrk.z; if(nnb>=0 && rhs.nnb>=0){ nnb += rhs.nnb; }else{ nnb = -1; } } }; __device__ void dev_gravity( const int jidx, const Iparticle &ip, const Jparticle &jp, __out Force &fo, __out uint16 nblist[]){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; #if 1 float dxp = dx + ip.dtr * dvx; float dyp = dy + ip.dtr * dvy; float dzp = dz + ip.dtr * dvz; float r2p = dxp*dxp + dyp*dyp + dzp*dzp; #else float r2p = r2; #endif float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(min(r2, r2p) < ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void gravity_kernel( const int nbody, const Iparticle ipbuf[], const Jparticle jpbuf[], __out Force fobuf[][NJBLOCK], __out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + blockDim.x * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; fo.clear(); uint16 *nblist = nbbuf[iaddr][jbid]; for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); #if 0 jpshare[tid] = jpbuf[j+tid]; #else float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; #endif __syncthreads(); if(jend-j < NTHREAD){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 4 for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1; fobuf[iaddr][jbid] = fo; } #if 0 __global__ void reduce_kernel_old( const int nbody, const int joff, // here's partial forces and nblists, const Force fpart [][NJBLOCK], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], // and these to be redeced Force ftot [], int nbtot [][NNB_MAX]){ const int ibid = blockIdx.x; int tid = threadIdx.x; const int iaddr = tid + blockDim.x * ibid; Force fo; fo.clear(); int *nbdst = nbtot[iaddr]; bool oveflow = false; for(int jb=0; jb<NJBLOCK; jb++){ const int jstart = (nbody * jb) / NJBLOCK; const Force &fsrc = fpart[iaddr][jb]; fo += fsrc; if(fsrc.nnb > NNB_PER_BLOCK) oveflow = true; if(fo.nnb > NNB_MAX ) oveflow = true; if(!oveflow){ const int klen = fsrc.nnb; for(int k=0; k<klen; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][jb][k]); *nbdst++ = nbid; } } } if(oveflow) fo.nnb = -1; ftot[iaddr] = fo; } #endif __global__ void force_reduce_kernel( const int ni, const Force fpart[][NJBLOCK], __out Force ftot []){ const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; __shared__ Force fshare[NYREDUCE][NXREDUCE]; if(xid < NJBLOCK){ fshare[yid][xid] = fpart[iaddr][xid]; }else{ fshare[yid][xid].clear(); } Force *fs = fshare[yid]; #if NXREDUCE==32 if(xid < 16) fs[xid] += fs[xid + 16]; #endif if(xid < 8) fs[xid] += fs[xid + 8]; if(xid < 4) fs[xid] += fs[xid + 4]; if(xid < 2) fs[xid] += fs[xid + 2]; if(xid < 1) fs[xid] += fs[xid + 1]; if(iaddr < ni){ ftot[iaddr] = fs[0]; } } __global__ void gather_nb_kernel( const int ni, const int nj, const int joff, const Force fpart[][NJBLOCK], const Force ftot [], const int nboff[], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], __out int nblist[]) { const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; if(iaddr >= ni) return; if(ftot[iaddr].nnb < 0) return; const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb : 0; // now performe prefix sum __shared__ int ishare[NYREDUCE][NXREDUCE]; ishare[yid][xid] = mynnb; int *ish = ishare[yid]; if(xid>=1) ish[xid] += ish[xid-1]; if(xid>=2) ish[xid] += ish[xid-2]; if(xid>=4) ish[xid] += ish[xid-4]; if(xid>=8) ish[xid] += ish[xid-8]; #if NXREDUCE==32 if(xid>=16) ish[xid] += ish[xid-16]; #endif const int off = (xid == 0) ? 0 : ish[xid-1]; int *nbdst = nblist + nboff[iaddr] + off; const int jstart = (nj * xid) / NJBLOCK; if(xid < NJBLOCK){ for(int k=0; k<mynnb; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]); // const int nbid = iaddr * 1000 + k; nbdst[k] = nbid; } } } // Host Part #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav, time_reduce; static long long numInter; static cudaPointer <Jparticle> jpbuf [MAX_GPU]; static cudaPointer <Iparticle> ipbuf [MAX_GPU]; static cudaPointer <Force[NJBLOCK]> fpart [MAX_GPU]; static cudaPointer <Force> ftot [MAX_GPU]; static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU]; static cudaPointer <int> nblist[MAX_GPU]; static cudaPointer <int> nboff [MAX_GPU]; static int numCPU, numGPU; static int joff[MAX_GPU + 1]; static int nbody, nbodymax; static int devid[MAX_GPU]; static bool is_open = false; static bool devinit = false; static void GPUNB_devinit(){ if(devinit) return; assert(NXREDUCE >= NJBLOCK); assert(NXREDUCE <= 32); hipGetDeviceCount(&numGPU); assert(numGPU <= MAX_GPU); char *gpu_list = getenv("GPU_LIST"); if(gpu_list){ // get GPU list from environment variable numGPU = 0; char *p = strtok(gpu_list, " "); while(p){ devid[numGPU++] = atoi(p); p = strtok(NULL, " "); assert(numGPU <= MAX_GPU); } }else{ // use all GPUs for(int i=0; i<numGPU; i++){ devid[i] = i; } } // numGPU = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid == 0) numCPU = omp_get_num_threads(); } assert(numCPU <= MAX_CPU); assert(numGPU <= numCPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ hipSetDevice(devid[tid]); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Initializing NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); #if 1 for(int i=0; i<numGPU; i++){ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, devid[i]); fprintf(stderr, " device %d: %s\n", devid[i], prop.name); } #endif fprintf(stderr, "***********************\n"); #endif devinit = true; } static void GPUNB_open(int nbmax){ time_send = time_grav = time_reduce = 0.0; numInter = 0; nbodymax = nbmax; GPUNB_devinit(); if(is_open){ fprintf(stderr, "gpunb: it is already open\n"); return; } is_open = true; for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbmax) / numGPU; } // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ hipSetDevice(devid[tid]); int nj = joff[tid+1] - joff[tid]; jpbuf [tid].allocate(nj + NTHREAD); ipbuf [tid].allocate(NIMAX); fpart [tid].allocate(NIMAX); ftot [tid].allocate(NIMAX); nbpart[tid].allocate(NIMAX); nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist nboff [tid].allocate(NIMAX+1); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Opened NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); for(int i=0; i<numGPU+1; i++){ fprintf(stderr, " %d", joff[i]); } fprintf(stderr, "\n"); fprintf(stderr, "nbmax = %d\n", nbmax); fprintf(stderr, "***********************\n"); #endif } void GPUNB_close(){ if(!is_open){ fprintf(stderr, "gpunb: it is already close\n"); return; } is_open = false; // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ jpbuf [tid].free(); ipbuf [tid].free(); fpart [tid].free(); ftot [tid].free(); nbpart[tid].free(); nblist[tid].free(); nboff [tid].free(); } } // omp_set_num_threads(numCPU); nbodymax = 0; #ifdef PROFILE fprintf(stderr, "Closed NBODY6/GPU library\n"); fprintf(stderr, "***********************\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "time reduce : %f sec\n", time_reduce); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif } void GPUNB_send( const int _nbody, const double mj[], const double xj[][3], const double vj[][3]){ assert(is_open); nbody = _nbody; assert(nbody <= nbodymax); time_send -= get_wtime(); for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbody) / numGPU; } #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ int nj = joff[tid+1] - joff[tid]; for(int j=0; j<nj; j++){ int jj = j + joff[tid]; jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]); } jpbuf[tid].htod(nj); } } time_send += get_wtime(); } #if 0 void GPUNB_regf( int ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nbmax, int *listbase){ assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // hipSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); int nj = joff[tid+1] - joff[tid]; hipLaunchKernelGGL(( gravity_kernel) , dim3(grid), dim3(threads) , 0, 0, nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); // CUDA_SAFE_THREAD_SYNC(); #if 0 dim3 rgrid(niblock, 1, 1); hipLaunchKernelGGL(( reduce_kernel) , dim3(rgrid), dim3(threads) , 0, 0, nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]); #else const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); hipLaunchKernelGGL(( force_reduce_kernel) , dim3(rgrid), dim3(rthreads) , 0, 0, ni, fpart[tid], ftot[tid]); #endif // CUDA_SAFE_THREAD_SYNC(); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); // debugging // for(int k=0; k<nbsum; k++) nblist[tid][k] = -1; // nblist[tid].htod(nbsum); hipLaunchKernelGGL(( gather_nb_kernel) , dim3(rgrid), dim3(rthreads), 0, 0, ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase // omp_set_num_threads(numCPU); #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ *nnbp = -1; }else{ *nnbp = nnb; } } time_reduce += get_wtime(); } #else void GPUNB_regf_1st( const int ni, const double h2[], const double dtr[], const double xi[][3], const double vi[][3]) { assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // hipSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel const int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); const int nj = joff[tid+1] - joff[tid]; hipLaunchKernelGGL(( gravity_kernel) , dim3(grid), dim3(threads) , 0, 0, nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); } } // end omp parallel } void GPUNB_regf_2nd( const int ni, double acc[][3], double jrk[][3], double pot[], const int lmax, const int nbmax, int *listbase) { assert(is_open); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); hipLaunchKernelGGL(( force_reduce_kernel) , dim3(rgrid), dim3(rthreads) , 0, 0, ni, fpart[tid], ftot[tid]); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); const int nj = joff[tid+1] - joff[tid]; hipLaunchKernelGGL(( gather_nb_kernel) , dim3(rgrid), dim3(rthreads) , 0, 0, ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } // end omp parallel const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ *nnbp = -1; }else{ *nnbp = nnb; } } // end omp parallel for time_reduce += get_wtime(); } #endif extern "C" { void gpunb_devinit_(){ GPUNB_devinit(); } void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] // GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); GPUNB_regf_1st(*ni, h2, dtr, xi, vi); GPUNB_regf_2nd(*ni, acc, jrk, pot, *lmax, *nbmax, list); } void gpunb_regf_1st_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3]) { GPUNB_regf_1st(*ni, h2, dtr, xi, vi); } void gpunb_regf_2nd_( int *ni, double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list) { GPUNB_regf_2nd(*ni, acc, jrk, pot, *lmax, *nbmax, list); } }
64f8443e96e29176c786e65ce932f14f63c35172.cu
#include <cstdio> #include <cmath> #include <cassert> #include <cstdlib> #include <cutil.h> #include <omp.h> #include "cuda_pointer.h" #define NTHREAD 64 // 64 or 128 //#define NJBLOCK 14 // for GTX 470 #define NJBLOCK 30 #define NIBLOCK 32 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 2048 #define NXREDUCE 32 // must be >NJBLOCK #define NYREDUCE 8 #define NNB_PER_BLOCK 256 // NNB per block, must be power of 2 #define NB_BUF_SIZE (1<<20) // #define NNB_MAX 384 // total NNB at reduced #define MAX_CPU 32 #define MAX_GPU 8 // for clearity, for myself #define __out #define PROFILE #define NAN_CHECK(val) assert((val) == (val)); typedef unsigned short uint16; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(const double mj, const double xj[3], const double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; NAN_CHECK(xj[0]); NAN_CHECK(xj[1]); NAN_CHECK(xj[2]); NAN_CHECK(mj); NAN_CHECK(vj[0]); NAN_CHECK(vj[1]); NAN_CHECK(vj[2]); } }; struct Iparticle{ float3 pos; float h2; float3 vel; float dtr; Iparticle() {} Iparticle(const double h2i, const double dtri, const double xi[3], const double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; dtr = dtri; NAN_CHECK(xi[0]); NAN_CHECK(xi[1]); NAN_CHECK(xi[2]); NAN_CHECK(h2i); NAN_CHECK(vi[0]); NAN_CHECK(vi[1]); NAN_CHECK(vi[2]); } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words __device__ void clear(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } __device__ void operator+=(const Force &rhs){ acc.x += rhs.acc.x; acc.y += rhs.acc.y; acc.z += rhs.acc.z; pot += rhs.pot; jrk.x += rhs.jrk.x; jrk.y += rhs.jrk.y; jrk.z += rhs.jrk.z; if(nnb>=0 && rhs.nnb>=0){ nnb += rhs.nnb; }else{ nnb = -1; } } }; __device__ void dev_gravity( const int jidx, const Iparticle &ip, const Jparticle &jp, __out Force &fo, __out uint16 nblist[]){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; #if 1 float dxp = dx + ip.dtr * dvx; float dyp = dy + ip.dtr * dvy; float dzp = dz + ip.dtr * dvz; float r2p = dxp*dxp + dyp*dyp + dzp*dzp; #else float r2p = r2; #endif float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(min(r2, r2p) < ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void gravity_kernel( const int nbody, const Iparticle ipbuf[], const Jparticle jpbuf[], __out Force fobuf[][NJBLOCK], __out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + blockDim.x * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; fo.clear(); uint16 *nblist = nbbuf[iaddr][jbid]; for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); #if 0 jpshare[tid] = jpbuf[j+tid]; #else float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; #endif __syncthreads(); if(jend-j < NTHREAD){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 4 for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1; fobuf[iaddr][jbid] = fo; } #if 0 __global__ void reduce_kernel_old( const int nbody, const int joff, // here's partial forces and nblists, const Force fpart [][NJBLOCK], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], // and these to be redeced Force ftot [], int nbtot [][NNB_MAX]){ const int ibid = blockIdx.x; int tid = threadIdx.x; const int iaddr = tid + blockDim.x * ibid; Force fo; fo.clear(); int *nbdst = nbtot[iaddr]; bool oveflow = false; for(int jb=0; jb<NJBLOCK; jb++){ const int jstart = (nbody * jb) / NJBLOCK; const Force &fsrc = fpart[iaddr][jb]; fo += fsrc; if(fsrc.nnb > NNB_PER_BLOCK) oveflow = true; if(fo.nnb > NNB_MAX ) oveflow = true; if(!oveflow){ const int klen = fsrc.nnb; for(int k=0; k<klen; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][jb][k]); *nbdst++ = nbid; } } } if(oveflow) fo.nnb = -1; ftot[iaddr] = fo; } #endif __global__ void force_reduce_kernel( const int ni, const Force fpart[][NJBLOCK], __out Force ftot []){ const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; __shared__ Force fshare[NYREDUCE][NXREDUCE]; if(xid < NJBLOCK){ fshare[yid][xid] = fpart[iaddr][xid]; }else{ fshare[yid][xid].clear(); } Force *fs = fshare[yid]; #if NXREDUCE==32 if(xid < 16) fs[xid] += fs[xid + 16]; #endif if(xid < 8) fs[xid] += fs[xid + 8]; if(xid < 4) fs[xid] += fs[xid + 4]; if(xid < 2) fs[xid] += fs[xid + 2]; if(xid < 1) fs[xid] += fs[xid + 1]; if(iaddr < ni){ ftot[iaddr] = fs[0]; } } __global__ void gather_nb_kernel( const int ni, const int nj, const int joff, const Force fpart[][NJBLOCK], const Force ftot [], const int nboff[], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], __out int nblist[]) { const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; if(iaddr >= ni) return; if(ftot[iaddr].nnb < 0) return; const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb : 0; // now performe prefix sum __shared__ int ishare[NYREDUCE][NXREDUCE]; ishare[yid][xid] = mynnb; int *ish = ishare[yid]; if(xid>=1) ish[xid] += ish[xid-1]; if(xid>=2) ish[xid] += ish[xid-2]; if(xid>=4) ish[xid] += ish[xid-4]; if(xid>=8) ish[xid] += ish[xid-8]; #if NXREDUCE==32 if(xid>=16) ish[xid] += ish[xid-16]; #endif const int off = (xid == 0) ? 0 : ish[xid-1]; int *nbdst = nblist + nboff[iaddr] + off; const int jstart = (nj * xid) / NJBLOCK; if(xid < NJBLOCK){ for(int k=0; k<mynnb; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]); // const int nbid = iaddr * 1000 + k; nbdst[k] = nbid; } } } // Host Part #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav, time_reduce; static long long numInter; static cudaPointer <Jparticle> jpbuf [MAX_GPU]; static cudaPointer <Iparticle> ipbuf [MAX_GPU]; static cudaPointer <Force[NJBLOCK]> fpart [MAX_GPU]; static cudaPointer <Force> ftot [MAX_GPU]; static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU]; static cudaPointer <int> nblist[MAX_GPU]; static cudaPointer <int> nboff [MAX_GPU]; static int numCPU, numGPU; static int joff[MAX_GPU + 1]; static int nbody, nbodymax; static int devid[MAX_GPU]; static bool is_open = false; static bool devinit = false; static void GPUNB_devinit(){ if(devinit) return; assert(NXREDUCE >= NJBLOCK); assert(NXREDUCE <= 32); cudaGetDeviceCount(&numGPU); assert(numGPU <= MAX_GPU); char *gpu_list = getenv("GPU_LIST"); if(gpu_list){ // get GPU list from environment variable numGPU = 0; char *p = strtok(gpu_list, " "); while(p){ devid[numGPU++] = atoi(p); p = strtok(NULL, " "); assert(numGPU <= MAX_GPU); } }else{ // use all GPUs for(int i=0; i<numGPU; i++){ devid[i] = i; } } // numGPU = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid == 0) numCPU = omp_get_num_threads(); } assert(numCPU <= MAX_CPU); assert(numGPU <= numCPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ cudaSetDevice(devid[tid]); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Initializing NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); #if 1 for(int i=0; i<numGPU; i++){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, devid[i]); fprintf(stderr, " device %d: %s\n", devid[i], prop.name); } #endif fprintf(stderr, "***********************\n"); #endif devinit = true; } static void GPUNB_open(int nbmax){ time_send = time_grav = time_reduce = 0.0; numInter = 0; nbodymax = nbmax; GPUNB_devinit(); if(is_open){ fprintf(stderr, "gpunb: it is already open\n"); return; } is_open = true; for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbmax) / numGPU; } // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ cudaSetDevice(devid[tid]); int nj = joff[tid+1] - joff[tid]; jpbuf [tid].allocate(nj + NTHREAD); ipbuf [tid].allocate(NIMAX); fpart [tid].allocate(NIMAX); ftot [tid].allocate(NIMAX); nbpart[tid].allocate(NIMAX); nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist nboff [tid].allocate(NIMAX+1); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Opened NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); for(int i=0; i<numGPU+1; i++){ fprintf(stderr, " %d", joff[i]); } fprintf(stderr, "\n"); fprintf(stderr, "nbmax = %d\n", nbmax); fprintf(stderr, "***********************\n"); #endif } void GPUNB_close(){ if(!is_open){ fprintf(stderr, "gpunb: it is already close\n"); return; } is_open = false; // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ jpbuf [tid].free(); ipbuf [tid].free(); fpart [tid].free(); ftot [tid].free(); nbpart[tid].free(); nblist[tid].free(); nboff [tid].free(); } } // omp_set_num_threads(numCPU); nbodymax = 0; #ifdef PROFILE fprintf(stderr, "Closed NBODY6/GPU library\n"); fprintf(stderr, "***********************\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "time reduce : %f sec\n", time_reduce); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif } void GPUNB_send( const int _nbody, const double mj[], const double xj[][3], const double vj[][3]){ assert(is_open); nbody = _nbody; assert(nbody <= nbodymax); time_send -= get_wtime(); for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbody) / numGPU; } #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ int nj = joff[tid+1] - joff[tid]; for(int j=0; j<nj; j++){ int jj = j + joff[tid]; jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]); } jpbuf[tid].htod(nj); } } time_send += get_wtime(); } #if 0 void GPUNB_regf( int ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nbmax, int *listbase){ assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // cudaSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); int nj = joff[tid+1] - joff[tid]; gravity_kernel <<< grid, threads >>> (nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); // CUDA_SAFE_THREAD_SYNC(); #if 0 dim3 rgrid(niblock, 1, 1); reduce_kernel <<< rgrid, threads >>> (nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]); #else const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); force_reduce_kernel <<< rgrid, rthreads >>> (ni, fpart[tid], ftot[tid]); #endif // CUDA_SAFE_THREAD_SYNC(); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); // debugging // for(int k=0; k<nbsum; k++) nblist[tid][k] = -1; // nblist[tid].htod(nbsum); gather_nb_kernel <<< rgrid, rthreads>>> (ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase // omp_set_num_threads(numCPU); #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ *nnbp = -1; }else{ *nnbp = nnb; } } time_reduce += get_wtime(); } #else void GPUNB_regf_1st( const int ni, const double h2[], const double dtr[], const double xi[][3], const double vi[][3]) { assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // cudaSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel const int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); const int nj = joff[tid+1] - joff[tid]; gravity_kernel <<< grid, threads >>> (nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); } } // end omp parallel } void GPUNB_regf_2nd( const int ni, double acc[][3], double jrk[][3], double pot[], const int lmax, const int nbmax, int *listbase) { assert(is_open); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); force_reduce_kernel <<< rgrid, rthreads >>> (ni, fpart[tid], ftot[tid]); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); const int nj = joff[tid+1] - joff[tid]; gather_nb_kernel <<< rgrid, rthreads >>> (ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } // end omp parallel const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ *nnbp = -1; }else{ *nnbp = nnb; } } // end omp parallel for time_reduce += get_wtime(); } #endif extern "C" { void gpunb_devinit_(){ GPUNB_devinit(); } void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] // GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); GPUNB_regf_1st(*ni, h2, dtr, xi, vi); GPUNB_regf_2nd(*ni, acc, jrk, pot, *lmax, *nbmax, list); } void gpunb_regf_1st_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3]) { GPUNB_regf_1st(*ni, h2, dtr, xi, vi); } void gpunb_regf_2nd_( int *ni, double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list) { GPUNB_regf_2nd(*ni, acc, jrk, pot, *lmax, *nbmax, list); } }
44404c035e3a73620f36683b920e894c9cf81542.hip
// !!! This is a file automatically generated by hipify!!! /** * * bashCGPU/CUDA * https://suzukiiichiro.github.io/search/?keyword= * -arch=sm_13 or -arch=sm_61 CPU $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -r CPU $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -c GPU $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -g GPU $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -n */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // //#define UINT64_C(c) c ## ULL // // unsigned long TOTAL=0; unsigned long UNIQUE=0; // // typedef struct { unsigned int size; unsigned int pres_a[930]; unsigned int pres_b[930]; // uint64_t COUNTER[3]; // // // unsigned int COUNT2; // unsigned int COUNT4; // unsigned int COUNT8; }Global; Global g; // typedef struct { uint64_t row; uint64_t down; uint64_t left; uint64_t right; long long x[MAX]; }Board ; typedef struct { Board B; Board nB; Board eB; Board sB; Board wB; unsigned n; unsigned e; unsigned s; unsigned w; uint64_t dimx; uint64_t dimy; uint64_t COUNTER[3]; // unsigned int COUNT2; unsigned int COUNT4; unsigned int COUNT8; }Local; /** CPU/CPUR */ // void listChain() { unsigned int idx=0; for(unsigned int a=0;a<(unsigned)g.size;++a){ for(unsigned int b=0;b<(unsigned)g.size;++b){ if(((a>=b)&&(a-b)<=1)||((b>a)&&(b-a)<=1)){ continue; } g.pres_a[idx]=a; g.pres_b[idx]=b; ++idx; } } } /** CPU */ // bool placement(void* args) { Local *l=(Local *)args; if(l->B.x[l->dimx]==l->dimy){ return true; } if (l->B.x[0]==0){ if (l->B.x[1]!=(uint64_t)-1){ if((l->B.x[1]>=l->dimx)&&(l->dimy==1)){ return false; } } }else{ if( (l->B.x[0]!=(uint64_t)-1) ){ if(( (l->dimx<l->B.x[0]||l->dimx>=g.size-l->B.x[0]) && (l->dimy==0 || l->dimy==g.size-1) )){ return 0; } if (( (l->dimx==g.size-1)&&((l->dimy<=l->B.x[0])|| l->dimy>=g.size-l->B.x[0]))){ return 0; } } } l->B.x[l->dimx]=l->dimy; //x y uint64_t row=UINT64_C(1)<<l->dimx; uint64_t down=UINT64_C(1)<<l->dimy; uint64_t left=UINT64_C(1)<<(g.size-1-l->dimx+l->dimy); // uint64_t right=UINT64_C(1)<<(l->dimx+l->dimy); // if((l->B.row&row)||(l->B.down&down)||(l->B.left&left)||(l->B.right&right)){ return false; } l->B.row|=row; l->B.down|=down; l->B.left|=left; l->B.right|=right; return true; } // uint64_t solve(int size,int current,uint64_t row,uint64_t left,uint64_t down,uint64_t right) { uint64_t row_a[MAX]; uint64_t right_a[MAX]; uint64_t left_a[MAX]; uint64_t down_a[MAX]; uint64_t bitmap_a[MAX]; for (int i=0;i<size;i++){ row_a[i]=0; left_a[i]=0; down_a[i]=0; right_a[i]=0; bitmap_a[i]=0; } row_a[current]=row; left_a[current]=left; down_a[current]=down; right_a[current]=right; uint64_t bitmap=bitmap_a[current]=~(left_a[current]|down_a[current]|right_a[current]); uint64_t total=0; uint64_t bit; while(current>-1){ if((bitmap!=0||row&1)&&current<size){ if(!(down+1)){ total++; current--; row=row_a[current]; left=left_a[current]; right=right_a[current]; down=down_a[current]; bitmap=bitmap_a[current]; continue; }else if(row&1){ while( row&1 ){ row>>=1; left<<=1; right>>=1; } bitmap=~(left|down|right); // continue; }else{ bit=-bitmap&bitmap; bitmap=bitmap^bit; if(current<size){ row_a[current]=row; left_a[current]=left; down_a[current]=down; right_a[current]=right; bitmap_a[current]=bitmap; current++; } row>>=1; // left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; bitmap=~(left|down|right); // } }else{ current--; row=row_a[current]; left=left_a[current]; right=right_a[current]; down=down_a[current]; bitmap=bitmap_a[current]; } } return total; } // void carryChain_symmetry(void* args) { Local *l=(Local *)args; // unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // COUNT8 if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w // w=n=e=sskip w=n=e=s90 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==w180 180n>=ssmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // pthread run() void thread_run(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!placement(l)){ continue; } if(!placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!placement(l)){ continue; } if(!placement(l)){ return; } // // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!placement(l)){ continue; } // // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!placement(l)){ continue; } // // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!placement(l)){ continue; } // carryChain_symmetry(l); } //w } //e } //n } // void buildChain() { Local l[(g.size/2)*(g.size-3)]; // l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[] for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } // // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_run(&l); } //w /** * */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } // void carryChain() { listChain(); // buildChain(); // // calcChain(&l); // } /** CPUR */ // uint64_t solveR(uint64_t row,uint64_t left,uint64_t down,uint64_t right) { if(down+1==0){ return 1; } while((row&1)!=0) { row>>=1; left<<=1; right>>=1; } row>>=1; uint64_t total=0; for(uint64_t carryChain=~(left|down|right);carryChain!=0;){ uint64_t const bit=carryChain&-carryChain; total+=solveR(row,(left|bit)<<1,down|bit,(right|bit)>>1); carryChain^=bit; } return total; } // void carryChain_symmetryR(void* args) { Local *l=(Local *)args; // unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // COUNT8 if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w // w=n=e=sskip w=n=e=s90 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==w180 180n>=ssmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // pthread run() void thread_runR(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!placement(l)){ continue; } if(!placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!placement(l)){ continue; } if(!placement(l)){ return; } // // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!placement(l)){ continue; } // // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!placement(l)){ continue; } // // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!placement(l)){ continue; } // carryChain_symmetryR(l); } //w } //e } //n } // void buildChainR() { Local l[(g.size/2)*(g.size-3)]; // l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[] for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } // // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_runR(&l); } //w /** * */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } // void carryChainR() { listChain(); // buildChainR(); // // calcChain(&l); // } /** GPU */ // CUDA bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ struct hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} hipSetDevice(i); return true; } // int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false; int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;} else{ gpuNodeLayer=true; } //gpu argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPU \n"); printf(" -c: CPU \n"); printf(" -g: GPU \n"); printf(" -n: GPU \n"); } if(cpur){ printf("\n\nCPU \n"); } else if(cpu){ printf("\n\nCPU \n"); } else if(gpu){ printf("\n\nGPU \n"); } else if(gpuNodeLayer){ printf("\n\nGPU \n"); } if(cpu||cpur) { int min=4; int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ TOTAL=UNIQUE=0; gettimeofday(&t0, NULL);// if(cpur){ // g.size=size; carryChainR(); } if(cpu){ // g.size=size; carryChain(); } // gettimeofday(&t1, NULL);// int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuNodeLayer) { if(!InitCUDA()){return 0;} /* int steps=24576; */ int min=4; int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); // if(gpu){ TOTAL=UNIQUE=0; g.size=size; TOTAL=carryChain_solve_nodeLayer(size,0,0,0); // }else if(gpuNodeLayer){ TOTAL=UNIQUE=0; g.size=size; carryChain_build_nodeLayer(size); // } gettimeofday(&t1,NULL); // int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
44404c035e3a73620f36683b920e894c9cf81542.cu
/** * * bash版キャリーチェーンのC言語版のGPU/CUDA移植版 * 詳しい説明はこちらをどうぞ https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題 * アーキテクチャの指定(なくても問題なし、あれば高速) -arch=sm_13 or -arch=sm_61 CPUの再帰での実行 $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -r CPUの非再帰での実行 $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -c GPUのシングルスレッド $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -g GPUのマルチスレッド $ nvcc -O3 -arch=sm_61 05CUDA_CarryChain.cu && ./a.out -n */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // システムによって以下のマクロが必要であればコメントを外してください。 //#define UINT64_C(c) c ## ULL // // グローバル変数 unsigned long TOTAL=0; unsigned long UNIQUE=0; // キャリーチェーン 非再帰版 // 構造体 typedef struct { unsigned int size; unsigned int pres_a[930]; unsigned int pres_b[930]; // uint64_t COUNTER[3]; // //カウンター配列 // unsigned int COUNT2; // unsigned int COUNT4; // unsigned int COUNT8; }Global; Global g; // 構造体 typedef struct { uint64_t row; uint64_t down; uint64_t left; uint64_t right; long long x[MAX]; }Board ; typedef struct { Board B; Board nB; Board eB; Board sB; Board wB; unsigned n; unsigned e; unsigned s; unsigned w; uint64_t dimx; uint64_t dimy; uint64_t COUNTER[3]; //カウンター配列 unsigned int COUNT2; unsigned int COUNT4; unsigned int COUNT8; }Local; /** CPU/CPUR 再帰・非再帰共通 */ // チェーンのリストを作成 void listChain() { unsigned int idx=0; for(unsigned int a=0;a<(unsigned)g.size;++a){ for(unsigned int b=0;b<(unsigned)g.size;++b){ if(((a>=b)&&(a-b)<=1)||((b>a)&&(b-a)<=1)){ continue; } g.pres_a[idx]=a; g.pres_b[idx]=b; ++idx; } } } /** CPU 非再帰 */ // クイーンの効きをチェック bool placement(void* args) { Local *l=(Local *)args; if(l->B.x[l->dimx]==l->dimy){ return true; } if (l->B.x[0]==0){ if (l->B.x[1]!=(uint64_t)-1){ if((l->B.x[1]>=l->dimx)&&(l->dimy==1)){ return false; } } }else{ if( (l->B.x[0]!=(uint64_t)-1) ){ if(( (l->dimx<l->B.x[0]||l->dimx>=g.size-l->B.x[0]) && (l->dimy==0 || l->dimy==g.size-1) )){ return 0; } if (( (l->dimx==g.size-1)&&((l->dimy<=l->B.x[0])|| l->dimy>=g.size-l->B.x[0]))){ return 0; } } } l->B.x[l->dimx]=l->dimy; //xは行 yは列 uint64_t row=UINT64_C(1)<<l->dimx; uint64_t down=UINT64_C(1)<<l->dimy; uint64_t left=UINT64_C(1)<<(g.size-1-l->dimx+l->dimy); //右上から左下 uint64_t right=UINT64_C(1)<<(l->dimx+l->dimy); // 左上から右下 if((l->B.row&row)||(l->B.down&down)||(l->B.left&left)||(l->B.right&right)){ return false; } l->B.row|=row; l->B.down|=down; l->B.left|=left; l->B.right|=right; return true; } //非再帰 uint64_t solve(int size,int current,uint64_t row,uint64_t left,uint64_t down,uint64_t right) { uint64_t row_a[MAX]; uint64_t right_a[MAX]; uint64_t left_a[MAX]; uint64_t down_a[MAX]; uint64_t bitmap_a[MAX]; for (int i=0;i<size;i++){ row_a[i]=0; left_a[i]=0; down_a[i]=0; right_a[i]=0; bitmap_a[i]=0; } row_a[current]=row; left_a[current]=left; down_a[current]=down; right_a[current]=right; uint64_t bitmap=bitmap_a[current]=~(left_a[current]|down_a[current]|right_a[current]); uint64_t total=0; uint64_t bit; while(current>-1){ if((bitmap!=0||row&1)&&current<size){ if(!(down+1)){ total++; current--; row=row_a[current]; left=left_a[current]; right=right_a[current]; down=down_a[current]; bitmap=bitmap_a[current]; continue; }else if(row&1){ while( row&1 ){ row>>=1; left<<=1; right>>=1; } bitmap=~(left|down|right); //再帰に必要な変数は必ず定義する必要があります。 continue; }else{ bit=-bitmap&bitmap; bitmap=bitmap^bit; if(current<size){ row_a[current]=row; left_a[current]=left; down_a[current]=down; right_a[current]=right; bitmap_a[current]=bitmap; current++; } row>>=1; //1行下に移動する left=(left|bit)<<1; down=down|bit; right=(right|bit)>>1; bitmap=~(left|down|right); //再帰に必要な変数は必ず定義する必要があります。 } }else{ current--; row=row_a[current]; left=left_a[current]; right=right_a[current]; down=down_a[current]; bitmap=bitmap_a[current]; } } return total; } //非再帰 対称解除法 void carryChain_symmetry(void* args) { Local *l=(Local *)args; // 対称解除法 unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # 対角線上の反転が小さいかどうか確認する if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # 垂直方向の中心に対する反転が小さいかを確認 if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # 斜め下方向への反転が小さいかをチェックする if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // 枝刈り 1行目が角の場合回転対称チェックせずCOUNT8にする if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w の場合は最小値を確認する。右回転で同じ場合は、 // w=n=e=sでなければ値が小さいのでskip w=n=e=sであれば90度回転で同じ可能性 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==wは180度回転して同じ 180度回転して同じ時n>=sの時はsmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solve(g.size,0,l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } //非再帰 pthread run() void thread_run(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!placement(l)){ continue; } if(!placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!placement(l)){ continue; } if(!placement(l)){ return; } //2 左2行に置く // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!placement(l)){ continue; } // 3 下2行に置く // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!placement(l)){ continue; } // 4 右2列に置く // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!placement(l)){ continue; } // 対称解除法 carryChain_symmetry(l); } //w } //e } //n } //非再帰 チェーンのビルド void buildChain() { Local l[(g.size/2)*(g.size-3)]; // カウンターの初期化 l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board の初期化 nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[]の初期化 for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } //1 上2行に置く // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_run(&l); } //w /** * 集計 */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } //非再帰 キャリーチェーン void carryChain() { listChain(); //チェーンのリストを作成 buildChain(); // チェーンのビルド // calcChain(&l); // 集計 } /** CPUR 再帰 */ //再帰 ボード外側2列を除く内側のクイーン配置処理 uint64_t solveR(uint64_t row,uint64_t left,uint64_t down,uint64_t right) { if(down+1==0){ return 1; } while((row&1)!=0) { row>>=1; left<<=1; right>>=1; } row>>=1; uint64_t total=0; for(uint64_t carryChain=~(left|down|right);carryChain!=0;){ uint64_t const bit=carryChain&-carryChain; total+=solveR(row,(left|bit)<<1,down|bit,(right|bit)>>1); carryChain^=bit; } return total; } //再帰 対称解除法 void carryChain_symmetryR(void* args) { Local *l=(Local *)args; // 対称解除法 unsigned const int ww=(g.size-2)*(g.size-1)-1-l->w; unsigned const int w2=(g.size-2)*(g.size-1)-1; // # 対角線上の反転が小さいかどうか確認する if((l->s==ww)&&(l->n<(w2-l->e))){ return ; } // # 垂直方向の中心に対する反転が小さいかを確認 if((l->e==ww)&&(l->n>(w2-l->n))){ return; } // # 斜め下方向への反転が小さいかをチェックする if((l->n==ww)&&(l->e>(w2-l->s))){ return; } // 枝刈り 1行目が角の場合回転対称チェックせずCOUNT8にする if(l->B.x[0]==0){ l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return ; } // n,e,s==w の場合は最小値を確認する。右回転で同じ場合は、 // w=n=e=sでなければ値が小さいのでskip w=n=e=sであれば90度回転で同じ可能性 if(l->s==l->w){ if((l->n!=l->w)||(l->e!=l->w)){ return; } l->COUNTER[l->COUNT2]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } // e==wは180度回転して同じ 180度回転して同じ時n>=sの時はsmaller? if((l->e==l->w)&&(l->n>=l->s)){ if(l->n>l->s){ return; } l->COUNTER[l->COUNT4]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } l->COUNTER[l->COUNT8]+=solveR(l->B.row>>2, l->B.left>>4,((((l->B.down>>2)|(~0<<(g.size-4)))+1)<<(g.size-5))-1,(l->B.right>>4)<<(g.size-5)); return; } //再帰 pthread run() void thread_runR(void* args) { Local *l=(Local *)args; // memcpy(&l->B,&l->wB,sizeof(Board)); // B=wB; l->B=l->wB; l->dimx=0; l->dimy=g.pres_a[l->w]; //if(!placement(l)){ continue; } if(!placement(l)){ return; } l->dimx=1; l->dimy=g.pres_b[l->w]; // if(!placement(l)){ continue; } if(!placement(l)){ return; } //2 左2行に置く // memcpy(&l->nB,&l->B,sizeof(Board)); // nB=B; l->nB=l->B; for(l->n=l->w;l->n<(g.size-2)*(g.size-1)-l->w;++l->n){ // memcpy(&l->B,&l->nB,sizeof(Board)); // B=nB; l->B=l->nB; l->dimx=g.pres_a[l->n]; l->dimy=g.size-1; if(!placement(l)){ continue; } l->dimx=g.pres_b[l->n]; l->dimy=g.size-2; if(!placement(l)){ continue; } // 3 下2行に置く // memcpy(&l->eB,&l->B,sizeof(Board)); // eB=B; l->eB=l->B; for(l->e=l->w;l->e<(g.size-2)*(g.size-1)-l->w;++l->e){ // memcpy(&l->B,&l->eB,sizeof(Board)); // B=eB; l->B=l->eB; l->dimx=g.size-1; l->dimy=g.size-1-g.pres_a[l->e]; if(!placement(l)){ continue; } l->dimx=g.size-2; l->dimy=g.size-1-g.pres_b[l->e]; if(!placement(l)){ continue; } // 4 右2列に置く // memcpy(&l->sB,&l->B,sizeof(Board)); // sB=B; l->sB=l->B; for(l->s=l->w;l->s<(g.size-2)*(g.size-1)-l->w;++l->s){ // memcpy(&l->B,&l->sB,sizeof(Board)); // B=sB; l->B=l->sB; l->dimx=g.size-1-g.pres_a[l->s]; l->dimy=0; if(!placement(l)){ continue; } l->dimx=g.size-1-g.pres_b[l->s]; l->dimy=1; if(!placement(l)){ continue; } // 対称解除法 carryChain_symmetryR(l); } //w } //e } //n } //再帰 チェーンのビルド void buildChainR() { Local l[(g.size/2)*(g.size-3)]; // カウンターの初期化 l->COUNT2=0; l->COUNT4=1; l->COUNT8=2; l->COUNTER[l->COUNT2]=l->COUNTER[l->COUNT4]=l->COUNTER[l->COUNT8]=0; // Board の初期化 nB,eB,sB,wB; l->B.row=l->B.down=l->B.left=l->B.right=0; // Board x[]の初期化 for(unsigned int i=0;i<g.size;++i){ l->B.x[i]=-1; } //1 上2行に置く // memcpy(&l->wB,&l->B,sizeof(Board)); // wB=B; l->wB=l->B; for(l->w=0;l->w<=(unsigned)(g.size/2)*(g.size-3);++l->w){ thread_runR(&l); } //w /** * 集計 */ UNIQUE= l->COUNTER[l->COUNT2]+ l->COUNTER[l->COUNT4]+ l->COUNTER[l->COUNT8]; TOTAL= l->COUNTER[l->COUNT2]*2+ l->COUNTER[l->COUNT4]*4+ l->COUNTER[l->COUNT8]*8; } //再帰 キャリーチェーン void carryChainR() { listChain(); //チェーンのリストを作成 buildChainR(); // チェーンのビルド // calcChain(&l); // 集計 } /** GPU */ // CUDA 初期化 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ struct cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } //メイン int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false; int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;} else{ gpuNodeLayer=true; } //デフォルトをgpuとする argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPU 再帰\n"); printf(" -c: CPU 非再帰\n"); printf(" -g: GPU 再帰\n"); printf(" -n: GPU キャリーチェーン\n"); } if(cpur){ printf("\n\nCPU キャリーチェーン 再帰 \n"); } else if(cpu){ printf("\n\nCPU キャリーチェーン 非再帰 \n"); } else if(gpu){ printf("\n\nGPU キャリーチェーン シングルスレッド\n"); } else if(gpuNodeLayer){ printf("\n\nGPU キャリーチェーン マルチスレッド\n"); } if(cpu||cpur) { int min=4; int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ TOTAL=UNIQUE=0; gettimeofday(&t0, NULL);//計測開始 if(cpur){ //再帰 g.size=size; carryChainR(); } if(cpu){ //非再帰 g.size=size; carryChain(); } // gettimeofday(&t1, NULL);//計測終了 int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuNodeLayer) { if(!InitCUDA()){return 0;} /* int steps=24576; */ int min=4; int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); // 計測開始 if(gpu){ TOTAL=UNIQUE=0; g.size=size; TOTAL=carryChain_solve_nodeLayer(size,0,0,0); //キャリーチェーン }else if(gpuNodeLayer){ TOTAL=UNIQUE=0; g.size=size; carryChain_build_nodeLayer(size); // キャリーチェーン } gettimeofday(&t1,NULL); // 計測終了 int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
4c7ba1ee119db1109022cc945285b53181b36531.hip
// !!! This is a file automatically generated by hipify!!! #define _POSIX_C_SOURCE 200809L #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <omp.h> #include "libcxl.h" #include "defines.h" #include "batch.h" #include "utils.h" #include "psl.h" #include "batch.c" #include "utils.c" #define BILLION 1000000000L //****************codes below added by shanshan*************// typedef struct struct_NUM_ADD { short read_number; short haplotype_number; int address_array; } NUM_ADD; typedef struct struct_parameters{ float distm_simi[32]; float distm_diff[32]; float alpha[32]; float beta[32]; float delta[32]; float upsilon[32]; float eta[32]; float zeta[32]; } t_parameters; __global__ void pairHMM( int size, char * data,NUM_ADD * num_add, float * result) // what is the maximum number of parameters? { int offset=blockIdx.x; while(offset<size) { //as each time it will deal with 2 read&haplotype pairs // each block deal with one pairs of haplotype & read NUM_ADD number_address; number_address=num_add[offset]; int read_number=number_address.read_number; int haplotype_number=number_address.haplotype_number; char * read_base_array=(char *)(data+number_address.address_array); // to caculate the address of read_base_array. char4 * haplotype_base_array=(char4 * )(read_base_array+(read_number+127)/128*128); int aa=(haplotype_number+3)/4; t_parameters *parameter_array=(t_parameters *) (read_base_array+(read_number+127)/128*128+ (aa*4+127)/128*128); __shared__ char haplotype_base_in_char[350]; int hh=(haplotype_number+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 haplotype_base_in_thread; haplotype_base_in_thread=haplotype_base_array[aa]; //Is it right to get data from global memory haplotype_base_in_char[aa*4]=haplotype_base_in_thread.x; haplotype_base_in_char[aa*4+1]=haplotype_base_in_thread.y; haplotype_base_in_char[aa*4+2]=haplotype_base_in_thread.z; haplotype_base_in_char[aa*4+3]=haplotype_base_in_thread.w; } } __syncthreads(); float MM, DD,II; float Qm,Qm_1,alpha,beta,delta,epsion,xiksi,thet; float D_0=(ldexpf(1.f, 120))/(float)haplotype_number; __shared__ float MM_stored[270];// as long as the haplotype __shared__ float DD_stored[270]; __shared__ float II_stored[270]; float result_block=0; int round=(read_number+blockDim.x-1)/blockDim.x; int round_size; char read_base; for(int i=0;i<round;i++) { round_size=(read_number>blockDim.x)?blockDim.x: read_number; read_number=(read_number>blockDim.x)?read_number-blockDim.x:0; // read_num is the remaining length at this round if(threadIdx.x<round_size ) // tid is from 0 ~ round_size-1 { read_base=read_base_array[threadIdx.x+blockDim.x*i]; Qm_1=parameter_array[i].distm_simi[threadIdx.x]; Qm=parameter_array[i].distm_diff[threadIdx.x]; alpha=parameter_array[i].alpha[threadIdx.x]; beta=parameter_array[i].beta[threadIdx.x]; delta=parameter_array[i].delta[threadIdx.x]; epsion=parameter_array[i].upsilon[threadIdx.x]; xiksi=parameter_array[i].eta[threadIdx.x]; thet=parameter_array[i].zeta[threadIdx.x]; } float M=0; //now float I=0; //now float D=0; //now float MMM=0;//up left float DDD=0;//up left float III=0;//up left if(threadIdx.x==0&&i==0) DDD=D_0; // Just in the first round, it need to be D_0 int current_haplotype_id=0; for(int j=0;j<round_size+haplotype_number-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_haplotype_id<haplotype_number)) { if(threadIdx.x==0) // if it is the second or more round { if(i>0) { MM=MM_stored[current_haplotype_id]; II=II_stored[current_haplotype_id]; DD=DD_stored[current_haplotype_id]; } else { MM=0; II=0; DD=D_0; } } float MID=__fadd_rn(III,DDD); DDD=DD; III=II; float DDM=__fmul_rn(M,xiksi); float IIMI=__fmul_rn(II,epsion); float MIIDD=__fmul_rn(beta,MID); char haplotype_base_each=haplotype_base_in_char[current_haplotype_id]; float aa=(haplotype_base_each==read_base)? Qm_1:Qm; D=__fmaf_rn(D,thet,DDM); I=__fmaf_rn(MM,delta,IIMI); float MMID=__fmaf_rn(alpha,MMM,MIIDD); MMM=MM; current_haplotype_id++; M=__fmul_rn(aa,MMID); II=I; DD=D; MM=M; } if(threadIdx.x==round_size-1 && i<round-1) // tid is the last thread but there are more round { MM_stored[current_haplotype_id-1]=M; II_stored[current_haplotype_id-1]=I; DD_stored[current_haplotype_id-1]=D; } if(threadIdx.x==round_size-1 && i==round-1) result_block=__fadd_rn(result_block,__fadd_rn(M,I)); MM=__shfl_up(MM,1); II=__shfl_up(II,1); DD=__shfl_up(DD,1); } } if(threadIdx.x==round_size-1) { result[offset]=result_block; } offset+=gridDim.x; } } //****************above codes added by shanshan*************// int main (int argc, char *argv[]) { struct timespec hwstart, hwend; //struct cxl_afu_h *afu; void *batch; t_result *result_hw; t_result *result_sw; t_workload *workload; t_batch *batches; unsigned char show_table = 0; unsigned char show_results = 0; unsigned char calculate_sw = 0; double clock_sw; double clock_hw; uint64_t threads = 1; DEBUG_PRINT("Parsing input arguments...\n"); if (argc < 5) { fprintf(stderr, "ERROR: Correct usage is: %s <-f = file, -m = manual> ... \n-m: <pairs> <X> <Y> ... \n-f: <input file>\n... <number of threads*> <sw solve?*> <show results?*> <show MID table?*> (* is optional)\n", APP_NAME); return -1; } else { if (strncmp(argv[1],"-f",2)==0) { if ((workload = load_workload(argv[2])) == NULL) { fprintf(stderr, "ERROR: %s cannot be opened.\n", argv[2]); return -1; } if (argc >= 4) threads = strtoul(argv[3], NULL, 0); if (argc >= 5) calculate_sw = strtoul(argv[4], NULL, 0); if (argc >= 6) show_results = strtoul(argv[5], NULL, 0); if (argc >= 7) show_table = strtoul(argv[6], NULL, 0); if (threads <= 0) threads = omp_get_max_threads(); BENCH_PRINT("%s, ", argv[2]); BENCH_PRINT("%8d, ", (int) workload->pairs); BENCH_PRINT("%8d, ", (int) threads); } else if (strncmp(argv[1],"-m",2)==0) { DEBUG_PRINT("Manual input mode selected. %d arguments supplied.\n", argc); int pairs = strtoul(argv[2], NULL, 0); int x = strtoul(argv[3], NULL, 0); int y = strtoul(argv[4], NULL, 0); workload = gen_workload(pairs, x, y); if (argc >= 6) threads = strtoul(argv[5], NULL, 0); if (argc >= 7) calculate_sw = strtoul(argv[6], NULL, 0); if (argc >= 8) show_results = strtoul(argv[7], NULL, 0); if (argc >= 9) show_table = strtoul(argv[8], NULL, 0); if (threads <= 0) threads = omp_get_max_threads(); BENCH_PRINT("M, "); BENCH_PRINT("%8d, %8d, %8d, ", workload->pairs, x, y); BENCH_PRINT("%8d, ", (int) threads); } else { fprintf(stderr, "ERROR: Correct usage is: %s <-f = file, -m = manual> ... \n-m: <pairs> <X> <Y> ... \n-f: <input file>\n... <number of threads*> <sw solve?*> <show results?*> <show MID table?*> (* is optional)\n", APP_NAME); return EXIT_FAILURE; } } BENCH_PRINT("%16lu, ",workload->cups_req); DEBUG_PRINT("Total workload bytes: %17d \n", (unsigned int) workload->bytes); DEBUG_PRINT("CUPS required : %17lu \n", workload->cups_req); DEBUG_PRINT("Allocating memory for %d batches and %d results...\n", (unsigned int) workload->batches, (unsigned int) workload->pairs); if (posix_memalign( (void **) &batch, CACHELINE_BYTES, workload->bytes)) { perror("Could not allocate memory to store the batches.\n"); return -1; } if (posix_memalign( (void **) &result_hw, CACHELINE_BYTES, sizeof(t_result) * workload->batches * PIPE_DEPTH)) { perror("Could not allocate memory to store hardware results.\n"); return -1; } if (posix_memalign( (void **) &result_sw, CACHELINE_BYTES, sizeof(t_result) * workload->batches * PIPE_DEPTH)) { perror("Could not allocate memory to store software results.\n"); return -1; } DEBUG_PRINT("Clearing batch and host result memory ...\n"); memset(result_sw, 0xFF, sizeof(t_result) * workload->batches * PIPE_DEPTH); memset(batch, 0x00, workload->bytes); DEBUG_PRINT("Filling batches...\n"); clock_sw = omp_get_wtime(); void * batch_cur = batch; batches = (t_batch*) malloc(sizeof(t_batch) * workload->batches); for (int q = 0; q < workload->batches; q++) { init_batch_address(&batches[q], batch_cur, workload->bx[q], workload->by[q]); fill_batch(&batches[q], workload->bx[q], workload->by[q], 1.0); print_batch_info(&batches[q]); batch_cur = (void*) ((uint64_t) batch_cur + (uint64_t) workload->bbytes[q]); } clock_sw = omp_get_wtime() - clock_sw; BENCH_PRINT("%16f,",clock_sw); DEBUG_PRINT("Calculating on host...\n"); // printf("\n software start \n") ; clock_sw = omp_get_wtime(); //print_batch_memory(batch, workload->bbytes[0] + workload->bbytes[1]); if (calculate_sw) { omp_set_num_threads(threads); #pragma omp parallel for for (int q = 0; q < workload->batches; q++) { int x = workload->bx[q]; int y = workload->by[q]; float * M = (float*)malloc(sizeof(float) * (y+1) * (x+1)); float * I = (float*)malloc(sizeof(float) * (y+1) * (x+1)); float * D = (float*)malloc(sizeof(float) * (y+1) * (x+1)); // Calculate results for (int p = 0; p < PIPE_DEPTH; p++) { calculate_mids(&batches[q], p, x, y, M, I, D); result_sw[q*PIPE_DEPTH+p].values[0] = 0.0; for (int c = 0; c < y+1; c++) { // WARNING: THIS IS BECAUSE FLOATING POINT ADDITION IS NOT ASSOCIATIVE result_sw[q*PIPE_DEPTH+p].values[0] += M[(y+1)*x+c]; result_sw[q*PIPE_DEPTH+p].values[0] += I[(y+1)*x+c]; } //printf("software result %e\n", result_sw[q*PIPE_DEPTH+p].values[0]); if (show_table != 0) { print_mid_table(&batches[q], p, x, y, M, I, D); fflush(stdout); } } free(M); free(I); free(D); } } clock_sw = omp_get_wtime() - clock_sw; if (calculate_sw) { BENCH_PRINT("%16f, ", clock_sw); BENCH_PRINT("%16f, ", workload->cups_req / clock_sw / 1000000); } else { BENCH_PRINT("%16f,",0.0); BENCH_PRINT("%16f,",0.0); } DEBUG_PRINT("%d %d\n",calculate_sw, show_results); if (calculate_sw && (show_results > 0)) { print_results(result_sw, workload->batches); } DEBUG_PRINT("Clearing result memory\n"); memset(result_hw, 0xFF, sizeof(t_result) * workload->batches * PIPE_DEPTH); //printf("\nSoftware end\n") ; // DEBUG_PRINT("Opening device: %s ...\n", DEVICE); // afu = cxl_afu_open_dev ((char*) (DEVICE)); // if (!afu) { perror ("cxl_afu_open_dev"); return -1; } hipSetDevice(0); //start GPU programming //change data format //**********in each batch, there is only one pair of read and haplotype. I change the value of PIPE_DEPTH in define.h file //memory on host struct timespec start,finish; double computation_time=0; int size=workload->batches; // how many pairs in the workloads char * data_h_total; data_h_total=(char*) malloc(size*10000*sizeof(char)+sizeof(NUM_ADD)*size); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; // to make sure the address is aligned //memory on GPU char * result_d_total; hipMalloc( (char **) &result_d_total, size*10000*sizeof(char)+(size*sizeof(NUM_ADD)+127)/128*128+(size*sizeof(float)+127)/128*128); char * data_d_total=result_d_total+(size*sizeof(float)+127)/128*128; // to make sure the address is aligned. int data_size=0; //for each pair for(int q=0;q<workload->batches;q++) { int read_size_new=workload->bx[q]; int haplotype_size_new=workload->by[q]; //change read char read_base_new[500]; for(int i=0;i<read_size_new;i++) { read_base_new[i]=batches[q].read[i].base[0]; } //change haplotype int haplotype_size_new_new=(haplotype_size_new+3)/4; char4 haplotype_base_new[150]; for(int i=0;i<haplotype_size_new_new;i++) { haplotype_base_new[i].x=batches[q].hapl[i*4].base[0]; if(i*4+1<haplotype_size_new) { haplotype_base_new[i].y=batches[q].hapl[i*4+1].base[0]; } if(i*4+2<haplotype_size_new) { haplotype_base_new[i].z=batches[q].hapl[i*4+2].base[0]; } if(i*4+3<haplotype_size_new) { haplotype_base_new[i].w=batches[q].hapl[i*4+3].base[0]; } } //change parameter t_parameters pa[20]; int aa=(read_size_new+31)/32; for(int i=0;i<aa;i++) { for(int j=0;j<32;j++) { if(i*32+j<read_size_new) { pa[i].distm_simi[j]=batches[q].prob[i*32+j].p[7].f; pa[i].distm_diff[j]=batches[q].prob[i*32+j].p[6].f; pa[i].alpha[j]=batches[q].prob[i*32+j].p[5].f; pa[i].beta[j]=batches[q].prob[i*32+j].p[4].f; pa[i].delta[j]=batches[q].prob[i*32+j].p[3].f; pa[i].upsilon[j]=batches[q].prob[i*32+j].p[2].f; pa[i].eta[j]=batches[q].prob[i*32+j].p[1].f; pa[i].zeta[j]=batches[q].prob[i*32+j].p[0].f; } } } data_num_add[q].read_number=read_size_new; data_num_add[q].haplotype_number=haplotype_size_new; data_num_add[q].address_array=data_size; memcpy(data_h,read_base_new,sizeof(char)*read_size_new); data_h+=(read_size_new+127)/128*128; data_size+=(read_size_new+127)/128*128; memcpy(data_h,haplotype_base_new,sizeof(char4)* haplotype_size_new_new); data_h+=(haplotype_size_new_new*sizeof(char4)+127)/128*128; data_size+=(haplotype_size_new_new*sizeof(char4)+127)/128*128; memcpy(data_h,pa,sizeof(t_parameters) *aa); data_h+=sizeof(t_parameters)*aa; data_size+=sizeof(t_parameters)*aa; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; float * result_h=(float *) malloc(sizeof(float)*size); clock_hw = omp_get_wtime(); clock_gettime(CLOCK_MONOTONIC_RAW, &hwstart); /* mark start time */ hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(sizeof(NUM_ADD)*size+127)/128*128; // call kernel dim3 block(32); dim3 grid(size); hipLaunchKernelGGL(( pairHMM), dim3(grid),dim3(block), 0, 0, size,data_d, num_add_d,(float *)result_d_total); hipMemcpy(result_h,result_d_total,size*sizeof(float),hipMemcpyDeviceToHost); // for(int i=0;i<size;i++) // printf("GPU result i=%d %e\n",i, result_h[i]); //hipDeviceReset(); clock_gettime(CLOCK_MONOTONIC_RAW, &hwend); clock_hw =omp_get_wtime() - clock_hw; uint64_t diff = BILLION * (hwend.tv_sec - hwstart.tv_sec) + hwend.tv_nsec - hwstart.tv_nsec; free(result_h); free(data_h_total); hipFree(result_d_total); int errs = 0; if (calculate_sw) { errs = count_errors((uint32_t *)result_hw,(uint32_t *)result_sw,workload->batches); } DEBUG_PRINT("Errors: %d\n",errs); BENCH_PRINT(" %16f,",clock_hw); BENCH_PRINT(" %16llu,", (long long unsigned int) diff); BENCH_PRINT(" %16f,", ((double)workload->cups_req / (double)clock_hw) / 1000000); if (calculate_sw) { BENCH_PRINT("%16f,",clock_sw / clock_hw); } else BENCH_PRINT(" %16f,",0.0); BENCH_PRINT("%16d",errs); BENCH_PRINT("\n"); free(workload); free(result_sw); free(batch); return 0; }
4c7ba1ee119db1109022cc945285b53181b36531.cu
#define _POSIX_C_SOURCE 200809L #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <omp.h> #include "libcxl.h" #include "defines.h" #include "batch.h" #include "utils.h" #include "psl.h" #include "batch.c" #include "utils.c" #define BILLION 1000000000L //****************codes below added by shanshan*************// typedef struct struct_NUM_ADD { short read_number; short haplotype_number; int address_array; } NUM_ADD; typedef struct struct_parameters{ float distm_simi[32]; float distm_diff[32]; float alpha[32]; float beta[32]; float delta[32]; float upsilon[32]; float eta[32]; float zeta[32]; } t_parameters; __global__ void pairHMM( int size, char * data,NUM_ADD * num_add, float * result) // what is the maximum number of parameters? { int offset=blockIdx.x; while(offset<size) { //as each time it will deal with 2 read&haplotype pairs // each block deal with one pairs of haplotype & read NUM_ADD number_address; number_address=num_add[offset]; int read_number=number_address.read_number; int haplotype_number=number_address.haplotype_number; char * read_base_array=(char *)(data+number_address.address_array); // to caculate the address of read_base_array. char4 * haplotype_base_array=(char4 * )(read_base_array+(read_number+127)/128*128); int aa=(haplotype_number+3)/4; t_parameters *parameter_array=(t_parameters *) (read_base_array+(read_number+127)/128*128+ (aa*4+127)/128*128); __shared__ char haplotype_base_in_char[350]; int hh=(haplotype_number+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 haplotype_base_in_thread; haplotype_base_in_thread=haplotype_base_array[aa]; //Is it right to get data from global memory haplotype_base_in_char[aa*4]=haplotype_base_in_thread.x; haplotype_base_in_char[aa*4+1]=haplotype_base_in_thread.y; haplotype_base_in_char[aa*4+2]=haplotype_base_in_thread.z; haplotype_base_in_char[aa*4+3]=haplotype_base_in_thread.w; } } __syncthreads(); float MM, DD,II; float Qm,Qm_1,alpha,beta,delta,epsion,xiksi,thet; float D_0=(ldexpf(1.f, 120))/(float)haplotype_number; __shared__ float MM_stored[270];// as long as the haplotype __shared__ float DD_stored[270]; __shared__ float II_stored[270]; float result_block=0; int round=(read_number+blockDim.x-1)/blockDim.x; int round_size; char read_base; for(int i=0;i<round;i++) { round_size=(read_number>blockDim.x)?blockDim.x: read_number; read_number=(read_number>blockDim.x)?read_number-blockDim.x:0; // read_num is the remaining length at this round if(threadIdx.x<round_size ) // tid is from 0 ~ round_size-1 { read_base=read_base_array[threadIdx.x+blockDim.x*i]; Qm_1=parameter_array[i].distm_simi[threadIdx.x]; Qm=parameter_array[i].distm_diff[threadIdx.x]; alpha=parameter_array[i].alpha[threadIdx.x]; beta=parameter_array[i].beta[threadIdx.x]; delta=parameter_array[i].delta[threadIdx.x]; epsion=parameter_array[i].upsilon[threadIdx.x]; xiksi=parameter_array[i].eta[threadIdx.x]; thet=parameter_array[i].zeta[threadIdx.x]; } float M=0; //now float I=0; //now float D=0; //now float MMM=0;//up left float DDD=0;//up left float III=0;//up left if(threadIdx.x==0&&i==0) DDD=D_0; // Just in the first round, it need to be D_0 int current_haplotype_id=0; for(int j=0;j<round_size+haplotype_number-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_haplotype_id<haplotype_number)) { if(threadIdx.x==0) // if it is the second or more round { if(i>0) { MM=MM_stored[current_haplotype_id]; II=II_stored[current_haplotype_id]; DD=DD_stored[current_haplotype_id]; } else { MM=0; II=0; DD=D_0; } } float MID=__fadd_rn(III,DDD); DDD=DD; III=II; float DDM=__fmul_rn(M,xiksi); float IIMI=__fmul_rn(II,epsion); float MIIDD=__fmul_rn(beta,MID); char haplotype_base_each=haplotype_base_in_char[current_haplotype_id]; float aa=(haplotype_base_each==read_base)? Qm_1:Qm; D=__fmaf_rn(D,thet,DDM); I=__fmaf_rn(MM,delta,IIMI); float MMID=__fmaf_rn(alpha,MMM,MIIDD); MMM=MM; current_haplotype_id++; M=__fmul_rn(aa,MMID); II=I; DD=D; MM=M; } if(threadIdx.x==round_size-1 && i<round-1) // tid is the last thread but there are more round { MM_stored[current_haplotype_id-1]=M; II_stored[current_haplotype_id-1]=I; DD_stored[current_haplotype_id-1]=D; } if(threadIdx.x==round_size-1 && i==round-1) result_block=__fadd_rn(result_block,__fadd_rn(M,I)); MM=__shfl_up(MM,1); II=__shfl_up(II,1); DD=__shfl_up(DD,1); } } if(threadIdx.x==round_size-1) { result[offset]=result_block; } offset+=gridDim.x; } } //****************above codes added by shanshan*************// int main (int argc, char *argv[]) { struct timespec hwstart, hwend; //struct cxl_afu_h *afu; void *batch; t_result *result_hw; t_result *result_sw; t_workload *workload; t_batch *batches; unsigned char show_table = 0; unsigned char show_results = 0; unsigned char calculate_sw = 0; double clock_sw; double clock_hw; uint64_t threads = 1; DEBUG_PRINT("Parsing input arguments...\n"); if (argc < 5) { fprintf(stderr, "ERROR: Correct usage is: %s <-f = file, -m = manual> ... \n-m: <pairs> <X> <Y> ... \n-f: <input file>\n... <number of threads*> <sw solve?*> <show results?*> <show MID table?*> (* is optional)\n", APP_NAME); return -1; } else { if (strncmp(argv[1],"-f",2)==0) { if ((workload = load_workload(argv[2])) == NULL) { fprintf(stderr, "ERROR: %s cannot be opened.\n", argv[2]); return -1; } if (argc >= 4) threads = strtoul(argv[3], NULL, 0); if (argc >= 5) calculate_sw = strtoul(argv[4], NULL, 0); if (argc >= 6) show_results = strtoul(argv[5], NULL, 0); if (argc >= 7) show_table = strtoul(argv[6], NULL, 0); if (threads <= 0) threads = omp_get_max_threads(); BENCH_PRINT("%s, ", argv[2]); BENCH_PRINT("%8d, ", (int) workload->pairs); BENCH_PRINT("%8d, ", (int) threads); } else if (strncmp(argv[1],"-m",2)==0) { DEBUG_PRINT("Manual input mode selected. %d arguments supplied.\n", argc); int pairs = strtoul(argv[2], NULL, 0); int x = strtoul(argv[3], NULL, 0); int y = strtoul(argv[4], NULL, 0); workload = gen_workload(pairs, x, y); if (argc >= 6) threads = strtoul(argv[5], NULL, 0); if (argc >= 7) calculate_sw = strtoul(argv[6], NULL, 0); if (argc >= 8) show_results = strtoul(argv[7], NULL, 0); if (argc >= 9) show_table = strtoul(argv[8], NULL, 0); if (threads <= 0) threads = omp_get_max_threads(); BENCH_PRINT("M, "); BENCH_PRINT("%8d, %8d, %8d, ", workload->pairs, x, y); BENCH_PRINT("%8d, ", (int) threads); } else { fprintf(stderr, "ERROR: Correct usage is: %s <-f = file, -m = manual> ... \n-m: <pairs> <X> <Y> ... \n-f: <input file>\n... <number of threads*> <sw solve?*> <show results?*> <show MID table?*> (* is optional)\n", APP_NAME); return EXIT_FAILURE; } } BENCH_PRINT("%16lu, ",workload->cups_req); DEBUG_PRINT("Total workload bytes: %17d \n", (unsigned int) workload->bytes); DEBUG_PRINT("CUPS required : %17lu \n", workload->cups_req); DEBUG_PRINT("Allocating memory for %d batches and %d results...\n", (unsigned int) workload->batches, (unsigned int) workload->pairs); if (posix_memalign( (void **) &batch, CACHELINE_BYTES, workload->bytes)) { perror("Could not allocate memory to store the batches.\n"); return -1; } if (posix_memalign( (void **) &result_hw, CACHELINE_BYTES, sizeof(t_result) * workload->batches * PIPE_DEPTH)) { perror("Could not allocate memory to store hardware results.\n"); return -1; } if (posix_memalign( (void **) &result_sw, CACHELINE_BYTES, sizeof(t_result) * workload->batches * PIPE_DEPTH)) { perror("Could not allocate memory to store software results.\n"); return -1; } DEBUG_PRINT("Clearing batch and host result memory ...\n"); memset(result_sw, 0xFF, sizeof(t_result) * workload->batches * PIPE_DEPTH); memset(batch, 0x00, workload->bytes); DEBUG_PRINT("Filling batches...\n"); clock_sw = omp_get_wtime(); void * batch_cur = batch; batches = (t_batch*) malloc(sizeof(t_batch) * workload->batches); for (int q = 0; q < workload->batches; q++) { init_batch_address(&batches[q], batch_cur, workload->bx[q], workload->by[q]); fill_batch(&batches[q], workload->bx[q], workload->by[q], 1.0); print_batch_info(&batches[q]); batch_cur = (void*) ((uint64_t) batch_cur + (uint64_t) workload->bbytes[q]); } clock_sw = omp_get_wtime() - clock_sw; BENCH_PRINT("%16f,",clock_sw); DEBUG_PRINT("Calculating on host...\n"); // printf("\n software start \n") ; clock_sw = omp_get_wtime(); //print_batch_memory(batch, workload->bbytes[0] + workload->bbytes[1]); if (calculate_sw) { omp_set_num_threads(threads); #pragma omp parallel for for (int q = 0; q < workload->batches; q++) { int x = workload->bx[q]; int y = workload->by[q]; float * M = (float*)malloc(sizeof(float) * (y+1) * (x+1)); float * I = (float*)malloc(sizeof(float) * (y+1) * (x+1)); float * D = (float*)malloc(sizeof(float) * (y+1) * (x+1)); // Calculate results for (int p = 0; p < PIPE_DEPTH; p++) { calculate_mids(&batches[q], p, x, y, M, I, D); result_sw[q*PIPE_DEPTH+p].values[0] = 0.0; for (int c = 0; c < y+1; c++) { // WARNING: THIS IS BECAUSE FLOATING POINT ADDITION IS NOT ASSOCIATIVE result_sw[q*PIPE_DEPTH+p].values[0] += M[(y+1)*x+c]; result_sw[q*PIPE_DEPTH+p].values[0] += I[(y+1)*x+c]; } //printf("software result %e\n", result_sw[q*PIPE_DEPTH+p].values[0]); if (show_table != 0) { print_mid_table(&batches[q], p, x, y, M, I, D); fflush(stdout); } } free(M); free(I); free(D); } } clock_sw = omp_get_wtime() - clock_sw; if (calculate_sw) { BENCH_PRINT("%16f, ", clock_sw); BENCH_PRINT("%16f, ", workload->cups_req / clock_sw / 1000000); } else { BENCH_PRINT("%16f,",0.0); BENCH_PRINT("%16f,",0.0); } DEBUG_PRINT("%d %d\n",calculate_sw, show_results); if (calculate_sw && (show_results > 0)) { print_results(result_sw, workload->batches); } DEBUG_PRINT("Clearing result memory\n"); memset(result_hw, 0xFF, sizeof(t_result) * workload->batches * PIPE_DEPTH); //printf("\nSoftware end\n") ; // DEBUG_PRINT("Opening device: %s ...\n", DEVICE); // afu = cxl_afu_open_dev ((char*) (DEVICE)); // if (!afu) { perror ("cxl_afu_open_dev"); return -1; } cudaSetDevice(0); //start GPU programming //change data format //**********in each batch, there is only one pair of read and haplotype. I change the value of PIPE_DEPTH in define.h file //memory on host struct timespec start,finish; double computation_time=0; int size=workload->batches; // how many pairs in the workloads char * data_h_total; data_h_total=(char*) malloc(size*10000*sizeof(char)+sizeof(NUM_ADD)*size); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; // to make sure the address is aligned //memory on GPU char * result_d_total; cudaMalloc( (char **) &result_d_total, size*10000*sizeof(char)+(size*sizeof(NUM_ADD)+127)/128*128+(size*sizeof(float)+127)/128*128); char * data_d_total=result_d_total+(size*sizeof(float)+127)/128*128; // to make sure the address is aligned. int data_size=0; //for each pair for(int q=0;q<workload->batches;q++) { int read_size_new=workload->bx[q]; int haplotype_size_new=workload->by[q]; //change read char read_base_new[500]; for(int i=0;i<read_size_new;i++) { read_base_new[i]=batches[q].read[i].base[0]; } //change haplotype int haplotype_size_new_new=(haplotype_size_new+3)/4; char4 haplotype_base_new[150]; for(int i=0;i<haplotype_size_new_new;i++) { haplotype_base_new[i].x=batches[q].hapl[i*4].base[0]; if(i*4+1<haplotype_size_new) { haplotype_base_new[i].y=batches[q].hapl[i*4+1].base[0]; } if(i*4+2<haplotype_size_new) { haplotype_base_new[i].z=batches[q].hapl[i*4+2].base[0]; } if(i*4+3<haplotype_size_new) { haplotype_base_new[i].w=batches[q].hapl[i*4+3].base[0]; } } //change parameter t_parameters pa[20]; int aa=(read_size_new+31)/32; for(int i=0;i<aa;i++) { for(int j=0;j<32;j++) { if(i*32+j<read_size_new) { pa[i].distm_simi[j]=batches[q].prob[i*32+j].p[7].f; pa[i].distm_diff[j]=batches[q].prob[i*32+j].p[6].f; pa[i].alpha[j]=batches[q].prob[i*32+j].p[5].f; pa[i].beta[j]=batches[q].prob[i*32+j].p[4].f; pa[i].delta[j]=batches[q].prob[i*32+j].p[3].f; pa[i].upsilon[j]=batches[q].prob[i*32+j].p[2].f; pa[i].eta[j]=batches[q].prob[i*32+j].p[1].f; pa[i].zeta[j]=batches[q].prob[i*32+j].p[0].f; } } } data_num_add[q].read_number=read_size_new; data_num_add[q].haplotype_number=haplotype_size_new; data_num_add[q].address_array=data_size; memcpy(data_h,read_base_new,sizeof(char)*read_size_new); data_h+=(read_size_new+127)/128*128; data_size+=(read_size_new+127)/128*128; memcpy(data_h,haplotype_base_new,sizeof(char4)* haplotype_size_new_new); data_h+=(haplotype_size_new_new*sizeof(char4)+127)/128*128; data_size+=(haplotype_size_new_new*sizeof(char4)+127)/128*128; memcpy(data_h,pa,sizeof(t_parameters) *aa); data_h+=sizeof(t_parameters)*aa; data_size+=sizeof(t_parameters)*aa; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; float * result_h=(float *) malloc(sizeof(float)*size); clock_hw = omp_get_wtime(); clock_gettime(CLOCK_MONOTONIC_RAW, &hwstart); /* mark start time */ cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(sizeof(NUM_ADD)*size+127)/128*128; // call kernel dim3 block(32); dim3 grid(size); pairHMM<<<grid,block>>> (size,data_d, num_add_d,(float *)result_d_total); cudaMemcpy(result_h,result_d_total,size*sizeof(float),cudaMemcpyDeviceToHost); // for(int i=0;i<size;i++) // printf("GPU result i=%d %e\n",i, result_h[i]); //cudaDeviceReset(); clock_gettime(CLOCK_MONOTONIC_RAW, &hwend); clock_hw =omp_get_wtime() - clock_hw; uint64_t diff = BILLION * (hwend.tv_sec - hwstart.tv_sec) + hwend.tv_nsec - hwstart.tv_nsec; free(result_h); free(data_h_total); cudaFree(result_d_total); int errs = 0; if (calculate_sw) { errs = count_errors((uint32_t *)result_hw,(uint32_t *)result_sw,workload->batches); } DEBUG_PRINT("Errors: %d\n",errs); BENCH_PRINT(" %16f,",clock_hw); BENCH_PRINT(" %16llu,", (long long unsigned int) diff); BENCH_PRINT(" %16f,", ((double)workload->cups_req / (double)clock_hw) / 1000000); if (calculate_sw) { BENCH_PRINT("%16f,",clock_sw / clock_hw); } else BENCH_PRINT(" %16f,",0.0); BENCH_PRINT("%16d",errs); BENCH_PRINT("\n"); free(workload); free(result_sw); free(batch); return 0; }
ed71075e4865c86fccce3b1ff1d27abe57e8bdd6.hip
// !!! This is a file automatically generated by hipify!!! // TO DO: // 1. coalesce memory accesses for m and nm // 2. put FP_ptr_copy into pointer form // 3. split kernels into edges and middle blocks (middle do not if checks in flow) -- started // 4. try arrays for more coalesced accesses // 5. reduce if statements #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "cPFkernel_ptr.cuh" #include "utils.h" #include "common.h" #include <glew.h> #include <freeglut.h> #include "book.h" #include "gpu_anim.h" #include "hipsparse.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #define SoF sizeof(float) #define CI(x,y,z,width,height) ((x) + (y)*(width) + (z) * (height) * (width)) #define WWAL_DIMx 4 #define WWAL_DIMy WWAL_DIMx #define W_DIMx 4 #define W_DIMy W_DIMx #define THREADx 16 #define THREADy 16 #define BLOCK_DIMx ((MATRIX_DIM>THREADx)?THREADx:MATRIX_DIM) // vary this #define BLOCK_DIMy ((MATRIX_DIM>THREADy)?THREADy:MATRIX_DIM) #define GRID_DIMx ((MATRIX_DIM + BLOCK_DIMx - 1)/BLOCK_DIMx) #define GRID_DIMy ((MATRIX_DIM + BLOCK_DIMy - 1)/BLOCK_DIMy) byte * host_Wall; float * host_WWall; float * host_W; double coef = 1.0; int gpu_iterations; float *m_host; double src_amplitude; double src_frequency; dim3 src_loc; texture<float,2,hipReadModeElementType> tex_m0; texture<float,2,hipReadModeElementType> tex_m1; texture<float,2,hipReadModeElementType> tex_m2; texture<float,2,hipReadModeElementType> tex_m3; texture<float,2,hipReadModeElementType> tex_nm0; texture<float,2,hipReadModeElementType> tex_nm1; texture<float,2,hipReadModeElementType> tex_nm2; texture<float,2,hipReadModeElementType> tex_nm3; texture<float,2,hipReadModeElementType> tex_WWall; texture<float,2,hipReadModeElementType> tex_W; texture<float,2,hipReadModeElementType> tex_avg_m; hipStream_t v_stream1, v_stream2, v_stream3, v_stream4; dim3 v_threads(BLOCK_DIMx,BLOCK_DIMy,1); dim3 v_grids(GRID_DIMx,GRID_DIMy,1); dim3 v_matdim; float * v_p_src_m0; float * v_p_src_m1; float * v_p_src_m2; float * v_p_src_m3; size_t v_pitch; int v_shared_mem_size; float * dev_m0, *dev_m1, *dev_m2, *dev_m3, *dev_avg_m; float * dev_nm0, *dev_nm1, *dev_nm2, *dev_nm3; float * dev_WWall, * dev_W; byte * dev_wall; byte * host_src; byte * dev_src; void addSrc(int x, int y) { if (!(host_src[x + (MATRIX_DIM - 1 - y) * MATRIX_DIM])) { host_src[x + (MATRIX_DIM - 1 - y) * MATRIX_DIM] = 1; checkCudaErrors(hipMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); } } #define DEL_MIN -10 #define DEL_MAX 10 #define WALL_R 2 void addWall(int x, int y) { int ry = (MATRIX_DIM - 1 - y); for (int iy = -WALL_R; iy < WALL_R; iy++) { for (int ix = -WALL_R; ix < WALL_R; ix++) { if (((x + ix)>0) && ((ry + iy) > 0) && ((x + ix)<MATRIX_DIM) && ((y + iy)<MATRIX_DIM)) { host_Wall[x + ix + (ry + iy) * MATRIX_DIM] = 1; } } } checkCudaErrors(hipMemcpy(dev_wall, host_Wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); } void removeWall(int x, int y) { int ry = (MATRIX_DIM - 1 - y); for (int iy = -WALL_R*2; iy < WALL_R*2; iy++) { for (int ix = -WALL_R*2; ix < WALL_R*2; ix++) { if (((x + ix)>0) && ((ry + iy) > 0) && ((x + ix)<MATRIX_DIM) && ((y + iy)<MATRIX_DIM)) { host_Wall[x + ix + (ry + iy) * MATRIX_DIM] = 0; } } } checkCudaErrors(hipMemcpy(dev_wall, host_Wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); } void removeSrc(int x, int y) { int ry = (MATRIX_DIM - 1 - y); for (int iy = DEL_MIN; iy < DEL_MAX; iy++) { for (int ix = DEL_MIN; ix < DEL_MAX; ix++) { if (((x + ix)>0) && ((ry + iy) > 0)) { host_src[x + ix + (ry + iy) * MATRIX_DIM] = 0; //checkCudaErrors(hipMemset2D(dev_m0 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_m1 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_m2 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_m3 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_nm0 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_nm1 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_nm2 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(hipMemset2D(dev_nm3 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); } } } checkCudaErrors(hipMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); //if (host_src[x + ry * MATRIX_DIM]) //{ // host_src[x + ry * MATRIX_DIM] = 0; // checkCudaErrors(hipMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); checkCudaErrors(hipMemset2D(dev_m0, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemset2D(dev_m1, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemset2D(dev_m2, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemset2D(dev_m3, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // /*checkCudaErrors(hipMemset((dev_m0 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_m1 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_m2 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_m3 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_nm0 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_nm1 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_nm2 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(hipMemset((dev_nm3 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float)));*/ //} } void removeAllSrc(void) { memset(host_src, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte)); checkCudaErrors(hipMemset(dev_src, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte))); checkCudaErrors(hipMemset2D(dev_m0, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemset2D(dev_m1, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemset2D(dev_m2, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemset2D(dev_m3, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); //checkCudaErrors(hipMemset2D(dev_avg_m, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); //checkCudaErrors(hipMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); } __global__ void PF_ptr_copy(hipPitchedPtr mPtr, hipPitchedPtr nmPtr, hipExtent mExt, dim3 matdim) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float *m = (float*)mPtr.ptr; float *nm = (float*)nmPtr.ptr; size_t pitch = mPtr.pitch; unsigned int e_per_row = pitch / SoF; size_t slice_pitch = pitch*mExt.height; if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) { m[CI(x, y, 0, e_per_row, matdim.y)] = nm[CI(x, y, 0, e_per_row, matdim.y)]; m[CI(x, y, 1, e_per_row, matdim.y)] = nm[CI(x, y, 1, e_per_row, matdim.y)]; m[CI(x, y, 2, e_per_row, matdim.y)] = nm[CI(x, y, 2, e_per_row, matdim.y)]; m[CI(x, y, 3, e_per_row, matdim.y)] = nm[CI(x, y, 3, e_per_row, matdim.y)]; //__syncthreads(); // Edge Cases if (x == 0) { if (nm[CI(0, y, 0, e_per_row, matdim.y)] == 0) { m[CI(0, y, 0, e_per_row, matdim.y)] = nm[CI(1, y, 0, e_per_row, matdim.y)]; } } if (x == MATRIX_DIM-1) { if (nm[CI(MATRIX_DIM-1, y, 1, e_per_row, matdim.y)] == 0) { m[CI(MATRIX_DIM-1, y, 1, e_per_row, matdim.y)] = nm[CI(MATRIX_DIM-2, y, 1, e_per_row, matdim.y)]; } } if (y == 0) { if (nm[CI(x, 0, 2, e_per_row, matdim.y)] == 0) { m[CI(x, 0, 2, e_per_row, matdim.y)] = nm[CI(x, 1, 2, e_per_row, matdim.y)]; } } if (y == MATRIX_DIM-1) { if (nm[CI(x, MATRIX_DIM-1, 3, e_per_row, matdim.y)] == 0) { m[CI(x, MATRIX_DIM-1, 3, e_per_row, matdim.y)] = nm[CI(x, MATRIX_DIM-2, 3, e_per_row, matdim.y)]; } } } } __global__ void PF_copy_withWall(float*m0, float*m1, float*m2, float*m3, byte * wall, dim3 matdim, size_t pitch, byte * src, float source_val) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int loc = x + y * pitch/sizeof(float); if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { float t0 = tex2D(tex_nm0, x+0.5f, y+0.5f); float t1 = tex2D(tex_nm1, x+0.5f, y+0.5f); float t2 = tex2D(tex_nm2, x+0.5f, y+0.5f); float t3 = tex2D(tex_nm3, x+0.5f, y+0.5f); // edge cases if ((x == 0) && (t0 == 0)) { t0 = tex2D(tex_nm0, x+1.5f, y+0.5f); } if ((x == MATRIX_DIM-1) && (t1 == 0)) { t1 = tex2D(tex_nm1, x-1.0f+0.5f, y+0.5f); } if ((y == 0) && (t2 == 0)) { t2 = tex2D(tex_nm2, x+0.5f, y+1.0f+0.5f); } if ((y == MATRIX_DIM-1) && (t3 == 0)) { t3 = tex2D(tex_nm3, x+0.5f, y-1.0f+0.5f); } // write values if (wall[x + y * MATRIX_DIM] == 1) { m0[loc] = WALL_DEC*t0; m1[loc] = WALL_DEC*t1; m2[loc] = WALL_DEC*t2; m3[loc] = WALL_DEC*t3; //printf("wall!\n"); } else if (src[x + y * MATRIX_DIM] == 1) { //printf("source at %d, %d", x, y); m0[loc] = source_val + t0; m1[loc] = source_val + t1; m2[loc] = source_val + t2; m3[loc] = source_val + t3; } else { m0[loc] = t0; m1[loc] = t1; m2[loc] = t2; m3[loc] = t3; } } } __global__ void PF_padded_texture_copy(float*m0, float*m1, float*m2, float*m3, dim3 matdim, size_t pitch) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int loc = x + y * pitch/sizeof(float); if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { float t0 = tex2D(tex_nm0, x+0.5f, y+0.5f); float t1 = tex2D(tex_nm1, x+0.5f, y+0.5f); float t2 = tex2D(tex_nm2, x+0.5f, y+0.5f); float t3 = tex2D(tex_nm3, x+0.5f, y+0.5f); // edge cases if ((x == 0) && (t0 == 0)) { t0 = tex2D(tex_nm0, x+1.5f, y+0.5f); } if ((x == MATRIX_DIM-1) && (t1 == 0)) { t1 = tex2D(tex_nm1, x-1.0f+0.5f, y+0.5f); } if ((y == 0) && (t2 == 0)) { t2 = tex2D(tex_nm2, x+0.5f, y+1.0f+0.5f); } if ((y == MATRIX_DIM-1) && (t3 == 0)) { t3 = tex2D(tex_nm3, x+0.5f, y-1.0f+0.5f); } // write values m0[loc] = t0; m1[loc] = t1; m2[loc] = t2; m3[loc] = t3; } } __constant__ float cW[16]; #define STR 0.0 #define BND 0.5 #define INC_EAST tex_m1 #define INC_WEST tex_m0 #define INC_NORTH tex_m2 #define INC_SOUTH tex_m3 __global__ void PF_roundscatter(float *nm0, float *nm1, float *nm2, float *nm3, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; float xt = x + 0.5f; float yt = y + 0.5f; //ScatteredNorth[x, y-1] = 0.5m * (IEast - INorth + IWest + ISouth); float sn = 0.5 * ( tex2D(INC_EAST, xt, yt - 1) - tex2D(INC_NORTH, xt, yt - 1) + tex2D(INC_WEST, xt, yt - 1) + tex2D(INC_SOUTH, xt, yt - 1)); //ScatteredEast[x-1, y] = 0.5m * (-IEast + INorth + IWest + ISouth); float se = 0.5 * (0-tex2D(INC_EAST, xt - 1, yt) + tex2D(INC_NORTH, xt - 1, yt) + tex2D(INC_WEST, xt - 1, yt) + tex2D(INC_SOUTH, xt - 1, yt)); //ScatteredWest[x+1, y] = 0.5m * (IEast + INorth - IWest + ISouth); float sw = 0.5 * ( tex2D(INC_EAST, xt + 1, yt) + tex2D(INC_NORTH, xt + 1, yt) - tex2D(INC_WEST, xt + 1, yt) + tex2D(INC_SOUTH, xt + 1, yt)); //ScatteredSouth[x, y+1] = 0.5m * (IEast + INorth + IWest - ISouth); float ss = 0.5 * ( tex2D(INC_EAST, xt, yt + 1) + tex2D(INC_NORTH, xt, yt + 1) + tex2D(INC_WEST, xt, yt + 1) - tex2D(INC_SOUTH, xt, yt + 1)); //IncomingEast[x, y] = ScatteredWest[x + 1, y]; nm1[x + y * pitch/sizeof(float)] = sw; //IncomingNorth[x, y] = ScatteredSouth[x, y + 1]; nm2[x + y * pitch/sizeof(float)] = ss; //IncomingWest[x, y] = ScatteredEast[x - 1, y]; nm0[x + y * pitch/sizeof(float)] = se; //IncomingSouth[x, y] = ScatteredNorth[x, y - 1];*/ nm3[x + y * pitch/sizeof(float)] = sn; } __global__ void PF_texture_slideright(float *nm0, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm0[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[4] + tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[5] + tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[6] + tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[7]; //printf("nm0[%d] = %f \n", x + y * pitch/sizeof(float), nm0[x + y * pitch/sizeof(float)]); #else //nm0[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) + 0.5f) *(-STR) + // tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) + 0.5f) *(STR) + // tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) + 0.5f) *(STR) + // tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) + 0.5f) *(STR) + // (0 + // tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) + 1.5f) - // tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) + 1.5f) - // tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) - 0.5f) + // tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) - 0.5f) // ) * BND; #endif //printf("x %d, y %d \n", x, y); } __global__ void PF_texture_slideleft(float *nm1, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm1[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[0] + tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[1] + tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[2] + tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[3]; # else nm1[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) + 0.5f)*(STR) + tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) + 0.5f)*(-STR) + tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) + 0.5f)*STR + tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) + 0.5f)*STR + (0 + tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) - 0.5f) - tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) - 0.5f) - tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) + 1.5f) + tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) + 1.5f) ) * BND; #endif } __global__ void PF_texture_slideup(float *nm3, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm3[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[8] + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[9] + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[10] + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[11]; #else nm3[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) + 1.5f)*STR + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) + 1.5f)*STR + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) + 1.5f)*STR + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) + 1.5f)*(-STR)+ (0 + tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) + 1.5f) - tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) + 1.5f) + tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) - 0.5f) - tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) - 0.5f) ) * BND; #endif } __global__ void PF_texture_slidedown(float *nm2, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm2[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[12] + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[13] + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[14] + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[15]; #else nm2[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) - 0.5f)*STR + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) - 0.5f)*STR + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) - 0.5f)*(-STR) + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) - 0.5f)*STR + (0 + tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) - 0.5f) - tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) - 0.5f) + tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) - 0.5f) - tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) - 0.5f) ) * BND; #endif } __global__ void PF_registers_texture_flow(float * nm0, float * nm1, float * nm2, float * nm3, float * W, size_t pitch) { __shared__ float sW[16]; float t0, t1, t2, t3; float x = threadIdx.x + blockIdx.x * blockDim.x; float y = threadIdx.y + blockIdx.y * blockDim.y; int loc = x + y * pitch/sizeof(float); x += 0.5f; y += 0.5f; if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; } if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) { //t0 = tex2D(tex_m0, x-1, y); t1 = tex2D(tex_m1, x-1, y); t2 = tex2D(tex_m2, x-1, y); t3 = tex2D(tex_m3, x-1, y); //nm0[loc] = t0*W[4] + t1*W[5] + t2*W[6] + t3*W[7]; nm0[loc] = tex2D(tex_m0, x-1,y)*sW[4] + tex2D(tex_m1, x-1, y)*sW[5] + tex2D(tex_m2, x-1, y)*sW[6] + tex2D(tex_m3, x-1, y)*sW[7]; nm1[loc] = tex2D(tex_m0, x+1,y)*sW[0] + tex2D(tex_m1, x+1, y)*sW[1] + tex2D(tex_m2, x+1, y)*sW[2] + tex2D(tex_m3, x+1, y)*sW[3]; nm2[loc] = tex2D(tex_m0, x,y-1)*sW[12] + tex2D(tex_m1, x, y-1)*sW[13] + tex2D(tex_m2, x, y-1)*sW[14] + tex2D(tex_m3, x, y-1)*sW[15]; nm3[loc] = tex2D(tex_m0, x,y+1)*sW[8] + tex2D(tex_m1, x, y+1)*sW[9] + tex2D(tex_m2, x, y+1)*sW[10] + tex2D(tex_m3, x, y+1)*sW[11]; //t0 = tex2D(tex_m0, x+1, y); t1 = tex2D(tex_m1, x+1, y); t2 = tex2D(tex_m2, x+1, y); t3 = tex2D(tex_m3, x+1, y); //nm1[loc] = t0*W[0] + t1*W[1] + t2*W[2] + t3*W[3]; //t0 = tex2D(tex_m0, x, y-1); t1 = tex2D(tex_m1, x, y-1); t2 = tex2D(tex_m2, x, y-1); t3 = tex2D(tex_m3, x, y-1); //nm0[loc] = t0*W[12] + t1*W[13] + t2*W[14] + t3*W[15]; //t0 = tex2D(tex_m0, x, y+1); t1 = tex2D(tex_m1, x, y+1); t2 = tex2D(tex_m2, x, y+1); t3 = tex2D(tex_m3, x, y+1); //nm0[loc] = t0*W[8] + t1*W[9] + t2*W[10] + t3*W[11]; } } __global__ void PF_mindlesspadded_texture_flow(dim3 srcloc, float src, bool* wallLoc, float*nm0, float*nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float *W, size_t pitch) { __shared__ float sW[16]; __shared__ float sMemM0[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side __shared__ float sMemM1[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side __shared__ float sMemM2[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side __shared__ float sMemM3[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side float x = threadIdx.x + blockIdx.x * blockDim.x + 0.5f; float y = threadIdx.y + blockIdx.y * blockDim.y + 0.5f; unsigned int shX = threadIdx.x + 1; unsigned int shY = threadIdx.y + 1; int loc = x + y * pitch/sizeof(float); } __global__ void PF_padded_texture_flow(dim3 srcloc, float src, bool* wallLoc, float*nm0, float*nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float *W, size_t pitch) { __shared__ float sWWall[16]; __shared__ float sW[16]; __shared__ float sMemM0[BLOCK_DIMx+2][BLOCK_DIMy+2]; __shared__ float sMemM1[BLOCK_DIMx+2][BLOCK_DIMy+2]; __shared__ float sMemM2[BLOCK_DIMx+2][BLOCK_DIMy+2]; __shared__ float sMemM3[BLOCK_DIMx+2][BLOCK_DIMy+2]; float x = threadIdx.x + blockIdx.x * blockDim.x + 0.5f; float y = threadIdx.y + blockIdx.y * blockDim.y + 0.5f; unsigned int shX = threadIdx.x + 1; unsigned int shY = threadIdx.y + 1; int loc = x + y * pitch/sizeof(float); // copy coefficients to shared memory: if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sWWall[threadIdx.x + threadIdx.y * 4] = WWall[threadIdx.x + threadIdx.y * 4]; sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; } //__syncthreads(); if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { sMemM0[shX][shY] = tex2D(tex_m0, x, y); sMemM1[shX][shY] = tex2D(tex_m1, x, y); sMemM2[shX][shY] = tex2D(tex_m2, x, y); sMemM3[shX][shY] = tex2D(tex_m3, x, y); // handle edges if (threadIdx.x == 0) // left { sMemM0[0][shY] = tex2D(tex_m0, x-1.0f, y); sMemM1[0][shY] = tex2D(tex_m1, x-1.0f, y); sMemM2[0][shY] = tex2D(tex_m2, x-1.0f, y); sMemM3[0][shY] = tex2D(tex_m3, x-1.0f, y); } else if (threadIdx.x == (BLOCK_DIMx - 1)) // right { sMemM0[BLOCK_DIMx+1][shY] = tex2D(tex_m0, x+1.0f, y); sMemM1[BLOCK_DIMx+1][shY] = tex2D(tex_m1, x+1.0f, y); sMemM2[BLOCK_DIMx+1][shY] = tex2D(tex_m2, x+1.0f, y); sMemM3[BLOCK_DIMx+1][shY] = tex2D(tex_m3, x+1.0f, y); } // MISSING THE CORNER BLOCK~ FIX IT if (threadIdx.y == 0) // up { sMemM0[shX][0] = tex2D(tex_m0, x, y-1); sMemM1[shX][0] = tex2D(tex_m1, x, y-1); sMemM2[shX][0] = tex2D(tex_m2, x, y-1); sMemM3[shX][0] = tex2D(tex_m3, x, y-1); } else if (threadIdx.y == (BLOCK_DIMy - 1)) // down { sMemM0[shX][BLOCK_DIMy+1] = tex2D(tex_m0, x, y+1); sMemM1[shX][BLOCK_DIMy+1] = tex2D(tex_m1, x, y+1); sMemM2[shX][BLOCK_DIMy+1] = tex2D(tex_m2, x, y+1); sMemM3[shX][BLOCK_DIMy+1] = tex2D(tex_m3, x, y+1); } } __syncthreads(); // sync the shared memory writes if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { // calculate nm nm0[loc] = sW[4]*sMemM0[shX-1][shY] + sW[5]*sMemM1[shX-1][shY] + sW[6]*sMemM2[shX-1][shY] + sW[7]*sMemM3[shX-1][shY]; nm1[loc] = sW[0]*sMemM0[shX+1][shY] + sW[1]*sMemM1[shX+1][shY] + sW[2]*sMemM2[shX+1][shY] + sW[3]*sMemM3[shX+1][shY]; nm2[loc] = sW[12]*sMemM0[shX][shY-1] + sW[13]*sMemM1[shX][shY-1] + sW[14]*sMemM2[shX][shY-1] + sW[15]*sMemM3[shX][shY-1]; nm3[loc] = sW[8]*sMemM0[shX][shY+1] + sW[9]*sMemM1[shX][shY+1] + sW[10]*sMemM2[shX][shY+1] + sW[11]*sMemM3[shX][shY+1]; } //__syncthreads(); //printf("loc %d, %d, val %f, %f, %f, %f. \n", x, y, (nm0[loc]), (float)(nm1[loc]), (float)(nm2[loc]), (float)(nm3[loc])); } __global__ void PF_texture_flow(dim3 srcloc, float src, bool* wallLoc, float* nm0, float* nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float * W) { __shared__ float sWWall[16]; __shared__ float sW[16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // copy coefficients to shared memory: if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sWWall[threadIdx.x + threadIdx.y * 4] = WWall[threadIdx.x + threadIdx.y * 4]; sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; } __syncthreads(); float m0, m1, m2, m3; if ((x < MATRIX_DIM-1) && (y < MATRIX_DIM-1) && (x > 0) && (y > 0)) // Make sure cell is within the environment grid { m0 = tex2D(tex_m0, x, y); m1 = tex2D(tex_m1, x, y); m2 = tex2D(tex_m2, x, y); m3 = tex2D(tex_m3, x, y); float newF[4] = {0}; if ((x == srcloc.x) && (y == srcloc.y)) { m0 = src; m1 = src; m2 = src; m3 = src; } // Check if wall bool isWall = wallLoc[x + y * matdim.x]; //bool isWall = *(wallLoc + x * sof if (isWall) { // prefetch WWall into __shared__ -- commented out above (maybe textures are faster, check though) /*newF[0] = tex2D(tex_WWall,0,0)*m0 + tex2D(tex_WWall,1,0)*m1 + tex2D(tex_WWall,2,0)*m2 + tex2D(tex_WWall,3,0)*m3; newF[1] = tex2D(tex_WWall,0,1)*m0 + tex2D(tex_WWall,1,1)*m1 + tex2D(tex_WWall,2,1)*m2 + tex2D(tex_WWall,3,1)*m3; newF[2] = tex2D(tex_WWall,0,2)*m0 + tex2D(tex_WWall,1,2)*m1 + tex2D(tex_WWall,2,2)*m2 + tex2D(tex_WWall,3,2)*m3; newF[3] = tex2D(tex_WWall,0,3)*m0 + tex2D(tex_WWall,1,3)*m1 + tex2D(tex_WWall,2,3)*m2 + tex2D(tex_WWall,3,3)*m3;*/ newF[0] = sWWall[0] *m0 + sWWall[1] *m1 + sWWall[2] *m2 + sWWall[3] *m3; newF[1] = sWWall[4] *m0 + sWWall[5] *m1 + sWWall[6] *m2 + sWWall[7] *m3; newF[2] = sWWall[8] *m0 + sWWall[9] *m1 + sWWall[10]*m2 + sWWall[11]*m3; newF[3] = sWWall[12]*m0 + sWWall[13]*m1 + sWWall[14]*m2 + sWWall[15]*m3; } else { // prefetch W into __shared__ /*newF[0] = tex2D(tex_W,0,0)*m0 + tex2D(tex_W,1,0)*m1 + tex2D(tex_W,2,0)*m2 + tex2D(tex_W,3,0)*m3; newF[1] = tex2D(tex_W,0,1)*m0 + tex2D(tex_W,1,1)*m1 + tex2D(tex_W,2,1)*m2 + tex2D(tex_W,3,1)*m3; newF[2] = tex2D(tex_W,0,2)*m0 + tex2D(tex_W,1,2)*m1 + tex2D(tex_W,2,2)*m2 + tex2D(tex_W,3,2)*m3; newF[3] = tex2D(tex_W,0,3)*m0 + tex2D(tex_W,1,3)*m1 + tex2D(tex_W,2,3)*m2 + tex2D(tex_W,3,3)*m3;*/ newF[0] = sW[0]*m0 + sW[1]*m1 + sW[2]*m2 + sW[3]*m3; newF[1] = sW[4]*m0 + sW[5]*m1 + sW[6]*m2 + sW[7]*m3; newF[2] = sW[8]*m0 + sW[9]*m1 + sW[10]*m2 + sW[11]*m3; newF[3] = sW[12]*m0 + sW[13]*m1 + sW[14]*m2 + sW[15]*m3; } // if (x < MATRIX_DIM-1) nm0[x+1][y] = newF[1]; if (x < MATRIX_DIM - 1) nm0[offset + 1] = newF[1]; // if (x > 0) nm1[x-1][y] = newF[0]; if (x > 0) nm1[offset - 1] = newF[0]; // if (y < MATRIX_DIM-1) nm2[x][y+1] = newF[3]; if (y < MATRIX_DIM - 1) nm2[offset + blockDim.x * gridDim.x] = newF[3]; // if (y > 0) nm3[x][y-1] = newF[2]; if (y > 0) nm3[offset - blockDim.x * gridDim.x] = newF[2]; } } __global__ void float_to_color_power_dBm( uchar4 *optr, size_t pitch, byte* walls) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * MATRIX_DIM; //int poffset = x + y * pitch/sizeof(float); float l = tex2D(tex_avg_m, (x)+0.5f, (y)+0.5f); // Convert l to dBm: l = 10 * log10f(abs(l)); // abs == 0 -l when negative, faster? if (l < -100) { optr[offset].x = 255; optr[offset].y = 0; optr[offset].z = 0; } else if (l < -90) { optr[offset].x = 0; optr[offset].y = 9; optr[offset].z = 255; } else if (l < -80) { optr[offset].x = 255; optr[offset].y = 154; optr[offset].z = 0; } else if (l < -70) { optr[offset].x = 255; optr[offset].y = 247; optr[offset].z = 0; } else { optr[offset].x = 40; optr[offset].y = 172; optr[offset].z = 7; } if (walls[offset]) { optr[offset].x = 0; optr[offset].y = 0; optr[offset].z = 0; } } __global__ void clean_bitmap(uchar4 *optr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y*512; optr[offset].w = 0xff; optr[offset].x = 255; optr[offset].y = 255; optr[offset].z = 255; } __global__ void float_to_color_dBm( uchar4 *optr, size_t pitch) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * MATRIX_DIM; int poffset = x + y * pitch/sizeof(float); float l = (tex2D(tex_m0, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m1, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m2, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m3, (x)+0.5f, (y)+0.5f)); // Convert l to dBm: l = 20 * log10f(abs(l/4)); // abs == 0 -l when negative, faster? if (l < -100) { optr[offset].x = 255; optr[offset].y = 0; optr[offset].z = 0; } else if (l < -90) { optr[offset].x = 0; optr[offset].y = 9; optr[offset].z = 255; } else if (l < -80) { optr[offset].x = 255; optr[offset].y = 154; optr[offset].z = 0; } else if (l < -70) { optr[offset].x = 255; optr[offset].y = 247; optr[offset].z = 0; } else { optr[offset].x = 40; optr[offset].y = 172; optr[offset].z = 7; } //l += 120; // put l between 0 and 100dBm (offset of 100dBm) //l /= 120; // divide by 100 to put between 0 and 1 //optr[offset].w = 255; //if (l < 0) //{ // optr[offset].x = 10; // optr[offset].y = 0; // optr[offset].z = 155; //} //else if (l < 0.125) // { // optr[offset].x = (unsigned char)(10.0f - 80.0f*l); // optr[offset].y = (unsigned char)(1000.0f * l); // optr[offset].z = 155; // } // else if (l < 0.375) // { // optr[offset].x = 0; // optr[offset].y = (unsigned char)(125.0f + (l - 0.125f) * 120.0f); // optr[offset].z = (unsigned char)(155.0f - (l - 0.125f) * 476.0f); // } // else if (l < 0.625) // { // optr[offset].x = (unsigned char)(820 * (l - 0.375f)); // optr[offset].y = (unsigned char)(155 + (l - 0.375f)*400.0f); // optr[offset].z = (unsigned char)(36 - 144 * (l - 0.375f)); // } // else if (l < 0.875) // { // optr[offset].x = (unsigned char)(205 + 200 * (l - 0.625f)); // optr[offset].y = (unsigned char)(255 - 472 * (l - 0.625f)); // optr[offset].z = 0; // } // else if (l <= 1) // { // optr[offset].x = 255; // optr[offset].y = (unsigned char)(137 - (l -0.875) * 1096); // optr[offset].z = 0; // } // else // { // optr[offset].x = 255; // optr[offset].y = 255; // optr[offset].z = 255; // } } __global__ void add_and_average_signal(size_t pitch, int iter, float * avg_m) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int poffset = x + y * pitch/sizeof(float); float total=((tex2D(tex_m0, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m1, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m2, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m3, (x)+0.5f, (y)+0.5f)))*0.5; total = total*total; // square for power //if (iter % SAMPLES_TO_AVERAGE) if (1) { float oldavg = tex2D(tex_avg_m, (x)+0.5, (y)+0.5); total = oldavg*(SAMPLES_TO_AVERAGE-1) + total; avg_m[poffset] = total/SAMPLES_TO_AVERAGE; } else { avg_m[poffset] = total; } /*if (iter != 0) total += tex2D(tex_avg_m, (x)+0.5f, (y)+0.5f); if (iter == SAMPLES_TO_AVERAGE-1) { total = total / SAMPLES_TO_AVERAGE; } avg_m[poffset] = total;*/ } __global__ void float_to_color_dBm_pixelate( uchar4 *optr, size_t pitch, int ticks ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * 512; int poffset = x + y * pitch/sizeof(float); float l = (tex2D(tex_m0, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f))+ (tex2D(tex_m1, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f))+ (tex2D(tex_m2, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f))+ (tex2D(tex_m3, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f)); // Convert l to dBm: l = 20 * log10f(abs(l/4)); // abs == 0 -l when negative, faster? l += 100; // put l between 0 and 100dBm (offset of 100dBm) l /= 100; // divide by 100 to put between 0 and 1 optr[offset].w = 255; if (l < 0) { optr[offset].x = 10; optr[offset].y = 0; optr[offset].z = 155; } else if (l < 0.125) { optr[offset].x = (unsigned char)(10.0f - 80.0f*l); optr[offset].y = (unsigned char)(1000.0f * l); optr[offset].z = 155; } else if (l < 0.375) { optr[offset].x = 0; optr[offset].y = (unsigned char)(125.0f + (l - 0.125f) * 120.0f); optr[offset].z = (unsigned char)(155.0f - (l - 0.125f) * 476.0f); } else if (l < 0.625) { optr[offset].x = (unsigned char)(820 * (l - 0.375f)); optr[offset].y = (unsigned char)(155 + (l - 0.375f)*400.0f); optr[offset].z = (unsigned char)(36 - 144 * (l - 0.375f)); } else if (l < 0.875) { optr[offset].x = (unsigned char)(205 + 200 * (l - 0.625f)); optr[offset].y = (unsigned char)(255 - 472 * (l - 0.625f)); optr[offset].z = 0; } else if (l <= 1) { optr[offset].x = 255; optr[offset].y = (unsigned char)(137 - (l -0.875) * 1096); optr[offset].z = 0; } else { optr[offset].x = 255; optr[offset].y = 255; optr[offset].z = 255; } } __global__ void float_to_color_pitched( uchar4 *optr, size_t pitch, int ticks ) { // map from threadIdx/BlockIdx to pixel position //int x = threadIdx.x + blockIdx.x * blockDim.x; //int y = threadIdx.y + blockIdx.y * blockDim.y; //int offset = x + y * blockDim.x * gridDim.x; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //int offset = x + y * blockDim.x * gridDim.x; int offset = x + y * 512; int poffset = x + y * pitch/sizeof(float); //if ((threadIdx.x==0) && (threadIdx.y == 0)) //{ // //printf("Blockdim.x: %d, blockdim.y: %d\n", blockDim.x, blockDim.y); //} //x = blockIdx.x; y = blockIdx.y; //float l = (tex2D(tex_m0, x+0.5f, y+0.5f)); float l = (tex2D(tex_m0, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f)); //printf("blockidx = %d, blockidy = %d \n", blockIdx.x, blockIdx.y); if ((blockIdx.x==0) && (blockIdx.y == 0)) { //printf("Blockdim.x: %d, blockdim.y: %d\n", blockDim.x, blockDim.y); //printf("x = %d, y = %d, val = %f \n", x, y, l); } //printf("x = %d, y = %d, val = %f \n", x, y, l); //l = (tex2D(tex_m0, x+0.5f, y+0.5f)+1.0f)/2.0f; //l = (l+1.0f)*0.5f; /*if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) {*/ optr[offset].w = 256; l = (l + SRC_MAG)/(2 * SRC_MAG); if (l < 0.125) { optr[offset].x = (unsigned char)(10.0f - 80.0f*l); optr[offset].y = (unsigned char)(1000.0f * l); optr[offset].z = 155; } else if (l < 0.375) { optr[offset].x = 0; optr[offset].y = (unsigned char)(125.0f + (l - 0.125f) * 120.0f); optr[offset].z = (unsigned char)(155.0f - (l - 0.125f) * 476.0f); } else if (l < 0.625) { optr[offset].x = (unsigned char)(820 * (l - 0.375f)); optr[offset].y = (unsigned char)(155 + (l - 0.375f)*400.0f); optr[offset].z = (unsigned char)(36 - 144 * (l - 0.375f)); } else if (l < 0.875) { optr[offset].x = (unsigned char)(205 + 200 * (l - 0.625f)); optr[offset].y = (unsigned char)(255 - 472 * (l - 0.625f)); optr[offset].z = 0; } else if (l <= 1) { optr[offset].x = 255; optr[offset].y = (unsigned char)(137 - (l -0.875) * 1096); optr[offset].z = 0; } else { optr[offset].x = 255; optr[offset].y = 255; optr[offset].z = 255; } /*if (l < 0.0f) { optr[offset].y = (unsigned char)(255.0f*(1.0f+l)); optr[offset].z = (unsigned char)(255.0f*(1.0f+l));optr[offset].x = 0; } else { optr[offset].y = 0; optr[offset].z = (unsigned char)(255.0f*(1.0f-l)); optr[offset].x = (unsigned char)(255.0f*l); }*/ /*optr[offset].x = (unsigned char)(255.0f * l); optr[offset].y = (unsigned char)(255.0f * 0.67f*l); optr[offset].z = (unsigned char)(255.0f * (1.0f-l));*/ /*}*/ //float s = 1; //int h = (180 + (int)(360.0f * outSrc[poffset])) % 360; //float m1, m2; //if (l <= 0.5f) // m2 = l * (1 + s); //else // m2 = l + s - l * s; //m1 = 2 * l - m2; /*optr[offset].x = value( m1, m2, h+120 ); optr[offset].y = value( m1, m2, h ); optr[offset].z = value( m1, m2, h -120 ); optr[offset].w = 255;*/ } __global__ void testTexturesLoop(void) { for (int x = 0; x < MATRIX_DIM; x++) { for (int y = 0; y < MATRIX_DIM; y++) { printf("%f, ",(float)(tex2D(tex_m0, (float)(x)+0.5f, (float)(y)+0.5f))); } printf("\n"); } } __global__ void PF_ptr_flow(hipPitchedPtr mPtr, hipExtent mExt, dim3 matrix_dimensions, double src, dim3 srcloc, bool * wallLoc, float * WWall, float * W, hipPitchedPtr nmPtr) { __shared__ float sWWall[16]; __shared__ float sW[16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sWWall[threadIdx.x + threadIdx.y * 4] = WWall[threadIdx.x + threadIdx.y * 4]; sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; //if ((threadIdx.x == 0) && (threadIdx.y == 0)) printf("Block %d,%d \n", blockIdx.x,blockIdx.y); //printf("x=%d, y=%d, WWall %f, sWWall %f, W %f, sW %f \n", x, y, WWall[x + y * 4], sWWall[x+y * 4], W[x+y*4], sW[x+y*4]); } __syncthreads(); if ((x < MATRIX_DIM-1) && (y < MATRIX_DIM-1) && (x > 0) && (y > 0)) // Make sure cell is within the environment grid { // Find location within the pitched memory float *m = (float*)mPtr.ptr; float *nm = (float*)nmPtr.ptr; size_t pitch = mPtr.pitch; unsigned int e_per_row = pitch / SoF; size_t slice_pitch = e_per_row * matrix_dimensions.y; size_t one_sp = 1 * slice_pitch; size_t two_sp = 2 * slice_pitch; size_t three_sp = 3 * slice_pitch; size_t yep = y * e_per_row; float *mxy = m + x + yep; float *nmxy = nm + x + yep; //float m0 = m[CI(x, y, 0, e_per_row, matrix_dimensions.y)]; //float * m0ptr = (m + x + y * e_per_row + 0 * slice_pitch); //printf("m0ptr, x %d, y %d, is %d \n", x, y, m0ptr); //float m0 = *m0ptr; float m0 = *(mxy); //float m1 = m[CI(x, y, 1, e_per_row, matrix_dimensions.y)]; float m1 = *(mxy + one_sp); //float m2 = m[CI(x, y, 2, e_per_row, matrix_dimensions.y)]; float m2 = *(mxy + two_sp); //float m3 = m[CI(x, y, 3, e_per_row, matrix_dimensions.y)]; float m3 = *(mxy + three_sp); float newF[4] = {0}; // Check if source, assign value if it is if ((x == srcloc.x) && (y == srcloc.y)) { m0 = src; m1 = src; m2 = src; m3 = src; } // Check if wall bool isWall = wallLoc[x + y * matrix_dimensions.x]; //bool isWall = *(wallLoc + x * sof if (isWall) { // prefetch WWall into __shared__ -- done newF[0] = sWWall[0] *m0 + sWWall[1] *m1 + sWWall[2] *m2 + sWWall[3] *m3; newF[1] = sWWall[4] *m0 + sWWall[5] *m1 + sWWall[6] *m2 + sWWall[7] *m3; newF[2] = sWWall[8] *m0 + sWWall[9] *m1 + sWWall[10]*m2 + sWWall[11]*m3; newF[3] = sWWall[12]*m0 + sWWall[13]*m1 + sWWall[14]*m2 + sWWall[15]*m3; } else { // prefetch W into __shared__ -- done newF[0] = sW[0]*m0 + sW[1]*m1 + sW[2]*m2 + sW[3]*m3; newF[1] = sW[4]*m0 + sW[5]*m1 + sW[6]*m2 + sW[7]*m3; newF[2] = sW[8]*m0 + sW[9]*m1 + sW[10]*m2 + sW[11]*m3; newF[3] = sW[12]*m0 + sW[13]*m1 + sW[14]*m2 + sW[15]*m3; } //if (x < MATRIX_DIM-1) nm[CI(x + 1, y, 0, e_per_row, matrix_dimensions.y)] = newF[1]; // if (x < MATRIX_DIM-1) nm0[x+1][y] = newF[1]; if (x < MATRIX_DIM - 1) *(nmxy + 1) = newF[1]; //if (x > 0) nm[CI(x - 1, y, 1, e_per_row, matrix_dimensions.y)] = newF[0]; // if (x > 0) nm1[x-1][y] = newF[0]; if (x > 0) *(nmxy - 1 + one_sp) = newF[0]; //if (y < MATRIX_DIM-1) nm[CI(x, y + 1, 2, e_per_row, matrix_dimensions.y)] = newF[3]; // if (y < MATRIX_DIM-1) nm2[x][y+1] = newF[3]; if (y < MATRIX_DIM - 1) *(nmxy + e_per_row + two_sp) = newF[3]; //if (y > 0) nm[CI(x, y - 1, 3, e_per_row, matrix_dimensions.y)] = newF[2]; // if (y > 0) nm3[x][y-1] = newF[2]; if (y > 0) *(nmxy - e_per_row + three_sp) = newF[2]; } } //PFNGLBINDBUFFERARBPROC glBindBuffer = NULL; //PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL; //PFNGLGENBUFFERSARBPROC glGenBuffers = NULL; //PFNGLBUFFERDATAARBPROC glBufferData = NULL; //GLuint bufferObj; //cudaGraphicsResource *resource; //GLuint disp_texture; // //void cPFsetupDisplay(void) //{ // // Initialize the CUDA context // hipDeviceProp_t prop; // memset(&prop, 0, sizeof(hipDeviceProp_t)); // prop.major = 1; prop.minor = 0; // checkCudaErrors(hipChooseDevice(&dev, &prop)); // checkCudaErrors(hipGLSetGLDevice(dev)); // // glGenTextures(1, &disp_texture); // glBindTexture(GL_TEXTURE_2D, disp_texture); // // int dev; int c = 1; // char* dummy = ""; // glutInit(&c, &dummy); // glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); // glutInitWindowSize(DIM, DIM); // glutCreateWindow("PixelFlow"); // // glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer"); // glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers"); // glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers"); // glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData"); // // /*glGenBuffers(1, &bufferObj); // glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); // glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM*DIM*4, NULL, GL_DYNAMIC_DRAW_ARB); // // memset(&prop, 0, sizeof(hipDeviceProp_t)); // prop.major = 1; prop.minor = 0; // checkCudaErrors(hipChooseDevice(&dev, &prop)); // checkCudaErrors(hipGLSetGLDevice(dev)); // checkCudaErrors(hipGraphicsGLRegisterBuffer(&resource, bufferObj, hipGraphicsRegisterFlagsWriteDiscard));*/ // //} void cPFcaller_display_exit(void) { // Free all allocated memory (move into separate delete function later) hipUnbindTexture(tex_m0); hipUnbindTexture(tex_m1); hipUnbindTexture(tex_m2); hipUnbindTexture(tex_m3); hipUnbindTexture(tex_nm0); hipUnbindTexture(tex_nm1); hipUnbindTexture(tex_nm2); hipUnbindTexture(tex_nm3); hipUnbindTexture(tex_WWall); hipUnbindTexture(tex_W); hipFree(dev_m0); hipFree(dev_m1); hipFree(dev_m2); hipFree(dev_m3); hipFree(dev_nm0); hipFree(dev_nm1); hipFree(dev_nm2); hipFree(dev_nm3); hipFree(dev_WWall); hipFree(dev_W); } #if (MATRIX_DIM < 512) #define BIT_DIM 512 #else #define BIT_DIM MATRIX_DIM #endif void cPFcaller_display(unsigned int num_iterations, float * &m_ptr) { //#if (MATRIX_DIM < 512) GPUAnimBitmap bitmap(512, 512, NULL); //#else GPUAnimBitmap bitmap(BIT_DIM, BIT_DIM, NULL); //#endif uchar4 * devPtr; size_t size; // init textures gpu_iterations = num_iterations; hipError_t status = hipSuccess; float source = 0.0f; dim3 matdim; matdim.x = MATRIX_DIM; matdim.y = MATRIX_DIM; matdim.z = 4; dim3 threads(BLOCK_DIMx,BLOCK_DIMy,1); dim3 grids(GRID_DIMx,GRID_DIMy,1); size_t pitch; checkCudaErrors(hipMallocPitch((void**)&dev_m0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_m1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_m2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_m3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_avg_m, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMalloc( (void**)&dev_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float))); // WWall checkCudaErrors(hipMalloc( (void**)&dev_W, W_DIMx*W_DIMy*sizeof(float))); // W checkCudaErrors(hipMemset2D(dev_m0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_m1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_m2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_m3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_avg_m, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMemcpy(dev_WWall, host_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_W, host_W, W_DIMx*W_DIMy*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(cW, host_W, W_DIMx*W_DIMy*sizeof(float), 0U, hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc( (void**)&dev_wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte))); checkCudaErrors(hipMemcpy(dev_wall, host_Wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&dev_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte))); checkCudaErrors(hipMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), hipMemcpyHostToDevice)); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); // not happy? tex_m0.normalized = false; tex_m0.filterMode = hipFilterModeLinear; tex_m0.addressMode[0] = hipAddressModeBorder; tex_m1.normalized = false; tex_m1.filterMode = hipFilterModeLinear;tex_m1.addressMode[0] = hipAddressModeBorder; tex_m2.normalized = false; tex_m2.filterMode = hipFilterModeLinear;tex_m2.addressMode[0] = hipAddressModeBorder; tex_m3.normalized = false; tex_m3.filterMode = hipFilterModeLinear;tex_m3.addressMode[0] = hipAddressModeBorder; tex_nm0.normalized = false; tex_nm0.filterMode = hipFilterModeLinear;tex_nm0.addressMode[0] = hipAddressModeBorder; tex_nm1.normalized = false; tex_nm1.filterMode = hipFilterModeLinear;tex_nm1.addressMode[0] = hipAddressModeBorder; tex_nm2.normalized = false; tex_nm2.filterMode = hipFilterModeLinear;tex_nm2.addressMode[0] = hipAddressModeBorder; tex_nm3.normalized = false; tex_nm3.filterMode = hipFilterModeLinear;tex_nm3.addressMode[0] = hipAddressModeBorder; tex_avg_m.normalized = false; tex_avg_m.filterMode = hipFilterModeLinear;tex_avg_m.addressMode[0] = hipAddressModeBorder; checkCudaErrors(hipBindTexture2D(NULL, tex_m0, dev_m0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_m1, dev_m1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_m2, dev_m2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_m3, dev_m3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm0, dev_nm0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm1, dev_nm1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm2, dev_nm2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm3, dev_nm3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_avg_m, dev_avg_m, desc, MATRIX_DIM, MATRIX_DIM, pitch)); // Allocate 2D array for wall (unrolled to 1D) -- implement hash table //checkCudaErrors(hipMalloc((void**)&dev_wall, matdim.x*matdim.y*sizeof(bool))); // x*y elements in a 1D array //checkCudaErrors(hipMemcpy(dev_wall, host_Wall, matdim.x*matdim.y*sizeof(bool), hipMemcpyHostToDevice)); source = 0.0f; checkCudaErrors(hipDeviceSynchronize()); v_shared_mem_size = 2 * WWAL_DIMx * WWAL_DIMy * sizeof(float) + BLOCK_DIMx*BLOCK_DIMy*4*sizeof(float); v_p_src_m0 = dev_m0 + src_loc.y * pitch/sizeof(float) + src_loc.x; v_p_src_m1 = dev_m1 + src_loc.y * pitch/sizeof(float) + src_loc.x; v_p_src_m2 = dev_m2 + src_loc.y * pitch/sizeof(float) + src_loc.x; v_p_src_m3 = dev_m3 + src_loc.y * pitch/sizeof(float) + src_loc.x; hipStreamCreate(&v_stream1); hipStreamCreate(&v_stream2); hipStreamCreate(&v_stream3); hipStreamCreate(&v_stream4); dim3 stream_threads; dim3 stream_blocks; stream_threads.x = 1; stream_threads.y = 256; stream_threads.z = 1; stream_blocks.x = 1; stream_blocks.y = (MATRIX_DIM + stream_threads.y - 1) /stream_threads.y; //((MATRIX_DIM + BLOCK_DIMy - 1)/BLOCK_DIMy) v_pitch = pitch; v_matdim.x = matdim.x; v_matdim.y = matdim.y; v_matdim.z = matdim.z; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); /* HANDLE_ERROR( hipGraphicsMapResources( 1, &(bitmap->resource), NULL ) ); HANDLE_ERROR( hipGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, bitmap->resource) ); bitmap->fAnim( devPtr, bitmap->dataBlock, ticks++ ); // HANDLE_ERROR( hipGraphicsUnmapResources( 1, &(bitmap->resource), NULL ) );*/ //hipGraphicsMapResources( 1, &(bitmap.resource), NULL ) ; //hipGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, bitmap.resource); ////clean_bitmap<<<(32,32,1),(16,16,1)>>>(devPtr); ////bitmap.Draw(); //hipGraphicsUnmapResources( 1, &(bitmap.resource), NULL ); //glClearColor( 0.0, 0.0, 0.0, 1.0 ); // glClear( GL_COLOR_BUFFER_BIT ); //glutSwapBuffers(); bitmap.anim_and_exit((void(*)(uchar4*,void*,int))cPFcaller_generateFrame, (void(*)(void*))cPFcaller_display_exit); bitmap.free_resources(); } dim3 colorgrid(MATRIX_DIM,MATRIX_DIM,1); dim3 colorthreads(512/MATRIX_DIM,512/MATRIX_DIM,1); void cPFcaller_generateFrame(uchar4 * dispPixels, void*, int ticks) { static int t = 0; hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start, 0)); float source = 0.0f; for (int i = 0; i < SAMPLING; i++) { //__global__ void PF_roundscatter(float *nm0, float *nm1, float *nm2, float *nm3, size_t pitch) hipLaunchKernelGGL(( PF_roundscatter), dim3(v_grids), dim3(v_threads), 0, 0, dev_nm0, dev_nm1, dev_nm2, dev_nm3, v_pitch); /*PF_texture_slideright<<<v_grids, v_threads, 0, v_stream1>>>(dev_nm0, v_pitch); PF_texture_slideleft<<<v_grids, v_threads, 0, v_stream2>>>(dev_nm1, v_pitch); PF_texture_slidedown<<<v_grids, v_threads, 0, v_stream3>>>(dev_nm2, v_pitch); PF_texture_slideup<<<v_grids, v_threads, 0, v_stream4>>>(dev_nm3, v_pitch);*/ //PF_padded_texture_flow<<<v_grids,v_threads,v_shared_mem_size>>>(src_loc, source, dev_wall, dev_nm0, dev_nm1, dev_nm2, dev_nm3, v_matdim, dev_WWall, dev_W, v_pitch); hipDeviceSynchronize(); source = SRC_MAG * sin(PI * (i+t) * DELTA_LENGTH * SRC_FREQ/CT); hipLaunchKernelGGL(( PF_copy_withWall), dim3(v_grids),dim3(v_threads), 0, 0, dev_m0, dev_m1, dev_m2, dev_m3, dev_wall, v_matdim, v_pitch, dev_src, source); hipLaunchKernelGGL(( add_and_average_signal), dim3(v_grids), dim3(v_threads), 0, 0, v_pitch, i, dev_avg_m); //(size_t pitch, int iter, float * avg_m) hipDeviceSynchronize(); //float source = SRC_MAG * sin(2 * PI * 1 * (float)(i+t) / SAMPLING); //float source = 1.0; /*source = SRC_MAG * sin(PI * (i+t) * DELTA_LENGTH * SRC_FREQ/CT);*/ //float zero= 0; //hipMemcpy(v_p_src_m0, &source, sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(v_p_src_m1, &source, sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(v_p_src_m2, &source, sizeof(float), hipMemcpyHostToDevice); ////hipMemcpy(v_p_src_m2++, &source, sizeof(float), hipMemcpyHostToDevice); ////hipMemcpy(v_p_src_m2++, &source, sizeof(float), hipMemcpyHostToDevice); ////hipMemcpy(v_p_src_m2++, &source, sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(v_p_src_m3, &source, sizeof(float), hipMemcpyHostToDevice); //hipDeviceSynchronize(); } checkCudaErrors(hipEventRecord(stop, 0)); hipEventSynchronize(stop); //v_p_src_m2 = dev_m2 + src_loc.y * v_pitch/sizeof(float) + src_loc.x; float elapsed; hipEventElapsedTime(&elapsed, start, stop); //printf("Time for frame: %3.1f ms \n", elapsed); t += SAMPLING; //printf("source at %d is %f\n", (t), source); if (MATRIX_DIM<512) { hipLaunchKernelGGL(( float_to_color_dBm_pixelate), dim3(colorgrid), dim3(colorthreads), 0, 0, dispPixels, v_pitch, ticks); } else { //float_to_color_dBm<<<v_grids,v_threads>>>(dispPixels, v_pitch); hipLaunchKernelGGL(( float_to_color_power_dBm), dim3(v_grids), dim3(v_threads), 0, 0, dispPixels, v_pitch, dev_wall); } } void cPFcaller(unsigned int num_iterations, float * &m_ptr) { gpu_iterations = num_iterations; hipError_t status = hipSuccess; float source = 0.0f; dim3 matdim; matdim.x = MATRIX_DIM; matdim.y = MATRIX_DIM; matdim.z = 4; dim3 threads(BLOCK_DIMx,BLOCK_DIMy,1); dim3 grids(GRID_DIMx,GRID_DIMy,1); size_t pitch; checkCudaErrors(hipMallocPitch((void**)&dev_m0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_m1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_m2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_m3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMallocPitch((void**)&dev_nm3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(hipMalloc( (void**)&dev_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float))); // WWall checkCudaErrors(hipMalloc( (void**)&dev_W, W_DIMx*W_DIMy*sizeof(float))); // W checkCudaErrors(hipMemset2D(dev_m0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_m1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_m2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_m3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemset2D(dev_nm3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(hipMemcpy(dev_WWall, host_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_W, host_W, W_DIMx*W_DIMy*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(cW, host_W, W_DIMx*W_DIMy*sizeof(float), 0U, hipMemcpyHostToDevice)); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); // not happy? tex_m0.normalized = false; tex_m0.filterMode = hipFilterModeLinear; tex_m0.addressMode[0] = hipAddressModeBorder; tex_m1.normalized = false; tex_m1.filterMode = hipFilterModeLinear;tex_m1.addressMode[0] = hipAddressModeBorder; tex_m2.normalized = false; tex_m2.filterMode = hipFilterModeLinear;tex_m2.addressMode[0] = hipAddressModeBorder; tex_m3.normalized = false; tex_m3.filterMode = hipFilterModeLinear;tex_m3.addressMode[0] = hipAddressModeBorder; tex_nm0.normalized = false; tex_nm0.filterMode = hipFilterModeLinear;tex_nm0.addressMode[0] = hipAddressModeBorder; tex_nm1.normalized = false; tex_nm1.filterMode = hipFilterModeLinear;tex_nm1.addressMode[0] = hipAddressModeBorder; tex_nm2.normalized = false; tex_nm2.filterMode = hipFilterModeLinear;tex_nm2.addressMode[0] = hipAddressModeBorder; tex_nm3.normalized = false; tex_nm3.filterMode = hipFilterModeLinear;tex_nm3.addressMode[0] = hipAddressModeBorder; checkCudaErrors(hipBindTexture2D(NULL, tex_m0, dev_m0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_m1, dev_m1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_m2, dev_m2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_m3, dev_m3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm0, dev_nm0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm1, dev_nm1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm2, dev_nm2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(hipBindTexture2D(NULL, tex_nm3, dev_nm3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); // Allocate 2D array for wall (unrolled to 1D) -- implement hash table checkCudaErrors(hipMalloc((void**)&dev_wall, matdim.x*matdim.y*sizeof(bool))); // x*y elements in a 1D array checkCudaErrors(hipMemcpy(dev_wall, host_Wall, matdim.x*matdim.y*sizeof(bool), hipMemcpyHostToDevice)); source = 0.0f; checkCudaErrors(hipDeviceSynchronize()); int shared_mem_size = 2 * WWAL_DIMx * WWAL_DIMy * sizeof(float) + BLOCK_DIMx*BLOCK_DIMy*4*sizeof(float); float * p_src_m0 = dev_m0 + src_loc.y * pitch/sizeof(float) + src_loc.x; float * p_src_m1 = dev_m1 + src_loc.y * pitch/sizeof(float) + src_loc.y; float * p_src_m2 = dev_m2 + src_loc.y * pitch/sizeof(float) + src_loc.x; float * p_src_m3 = dev_m3 + src_loc.y * pitch/sizeof(float) + src_loc.y; hipStream_t stream1, stream2, stream3, stream4; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); dim3 stream_threads; dim3 stream_blocks; stream_threads.x = 1; stream_threads.y = 256; stream_threads.z = 1; stream_blocks.x = 1; stream_blocks.y = (MATRIX_DIM + stream_threads.y - 1) /stream_threads.y; //((MATRIX_DIM + BLOCK_DIMy - 1)/BLOCK_DIMy) hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); clock_t t2; t2=clock(); // begin timing for (int iter = 0; iter < gpu_iterations; iter++) { source = src_amplitude * sin(2 * PI * src_frequency * (double)(iter) * 0.01); hipMemcpy(p_src_m0, &source, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(p_src_m1, &source, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(p_src_m2, &source, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(p_src_m3, &source, sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); /*checkCudaErrors(hipMemcpy(p_src_m0, &source, sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(p_src_m1, &source, sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(p_src_m2, &source, sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(p_src_m3, &source, sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipDeviceSynchronize());*/ //printf("Calculation \n"); //PF_padded_texture_flow<<<grids,threads,shared_mem_size>>>(src_loc, source, dev_wall, dev_nm0, dev_nm1, dev_nm2, dev_nm3, matdim, dev_WWall, dev_W, pitch); // PF_padded_texture_flow(dim3 srcloc, float src, bool* wallLoc, float*nm0, float*nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float *W) //PF_registers_texture_flow<<<grids,threads, (W_DIMx*W_DIMy*sizeof(float))>>>(dev_nm0, dev_nm1, dev_nm2, dev_nm3, dev_W, pitch); //__global__ void PF_registers_texture_flow(float * nm0, float * nm1, float * nm2, float * nm3, float * W, size_t pitch) //checkCudaErrors(hipPeekAtLastError()); /*PF_texture_slideright<<<stream_blocks, stream_threads, 0, stream1>>>(dev_nm0, pitch); PF_texture_slideleft<<<stream_blocks, stream_threads, 0, stream2>>>(dev_nm1, pitch); PF_texture_slidedown<<<stream_blocks, stream_threads, 0, stream3>>>(dev_nm2, pitch); PF_texture_slideup<<<stream_blocks, stream_threads, 0, stream4>>>(dev_nm3, pitch);*/ hipLaunchKernelGGL(( PF_texture_slideright), dim3(grids), dim3(threads), 0, stream1, dev_nm0, pitch); hipLaunchKernelGGL(( PF_texture_slideleft), dim3(grids), dim3(threads), 0, stream2, dev_nm1, pitch); hipLaunchKernelGGL(( PF_texture_slidedown), dim3(grids), dim3(threads), 0, stream3, dev_nm2, pitch); hipLaunchKernelGGL(( PF_texture_slideup), dim3(grids), dim3(threads), 0, stream4, dev_nm3, pitch); //checkCudaErrors(hipDeviceSynchronize()); hipDeviceSynchronize(); /*printf("NM texture values \n"); testTexturesLoop<<<1,1>>>(); hipDeviceSynchronize();*/ hipLaunchKernelGGL(( PF_padded_texture_copy), dim3(grids),dim3(threads), 0, 0, dev_m0, dev_m1, dev_m2, dev_m3, matdim, pitch); hipDeviceSynchronize(); /*printf("M texture values \n"); testTexturesLoop<<<1,1>>>(); hipDeviceSynchronize();*/ } hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedtime; hipEventElapsedTime(&elapsedtime, start, stop); printf("CUDA measured: %3.1f ms \n", elapsedtime); hipEventDestroy(start); hipEventDestroy(stop); long int final=clock()-t2; printf("GPU iterations took %li ticks (%f seconds) \n", final, ((float)final)/CLOCKS_PER_SEC); m_host = (float *)malloc(sizeof(float)*MATRIX_DIM*MATRIX_DIM); m_ptr = m_host; // So that the class can access M values //checkCudaErrors(hipMemcpy(m_host, dev_m0, MATRIX_DIM*MATRIX_DIM*sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy2D(m_host, MATRIX_DIM*sizeof(float), dev_m0, pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM, hipMemcpyDeviceToHost)); hipDeviceSynchronize(); //status = hipMemcpy3D(&hm_p); //if (status != hipSuccess){printf("Uhoh: %s \n", hipGetErrorString(status));} // Free all allocated memory (move into separate delete function later) hipFree(dev_m0); hipFree(dev_m1); hipFree(dev_m2); hipFree(dev_m3); hipFree(dev_nm0); hipFree(dev_nm1); hipFree(dev_nm2); hipFree(dev_nm3); hipFree(dev_WWall); hipFree(dev_W); hipUnbindTexture(tex_m0); hipUnbindTexture(tex_m1); hipUnbindTexture(tex_m2); hipUnbindTexture(tex_m3); hipUnbindTexture(tex_nm0); hipUnbindTexture(tex_nm1); hipUnbindTexture(tex_nm2); hipUnbindTexture(tex_nm3); hipUnbindTexture(tex_WWall); hipUnbindTexture(tex_W); //hipFree(m_device.ptr); //hipFree(nm_device.ptr); //hipFree(dev_wall); //hipFree(dev_WWall); //hipFree(dev_W); } using namespace cv; void cPFinit(float matrixFlow[][4], float matrixWall[][4], float in_sourceLoc[]) { // Initialize some values coef = 1; src_amplitude = 1.0; src_frequency = 1.0; Mat image; image = imread("test.bmp", CV_LOAD_IMAGE_GRAYSCALE); if(! image.data ) // Check for invalid input { printf("invalid file :( \n"); return; } //namedWindow("Imported environment", WINDOW_AUTOSIZE); //imshow("Imported environment", image); //printf("Image has %d columns and %d rows \n", image.cols, image.rows); host_Wall = (byte *)malloc(sizeof(byte)*MATRIX_DIM*MATRIX_DIM); memset(host_Wall, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte)); host_src = (byte*) malloc(sizeof(byte)*MATRIX_DIM*MATRIX_DIM); memset(host_src, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte)); for (int r = 0; r < image.rows ; r++) { for (int c = 0; c < image.cols; c++) { if (image.at<uchar>(r, c) == 0) // 0 is black, { //host_Wall[c + r * MATRIX_DIM] = 1; host_Wall[c + (MATRIX_DIM - 1 - r) * MATRIX_DIM] = 1; } //else //{ // host_Wall[c + r * MATRIX_DIM] = 1; // //printf("wall at %d, %d\n", c, r); //} } } host_WWall = (float *)malloc(sizeof(float)*WWAL_DIMx*WWAL_DIMy); host_W = (float *)malloc(sizeof(float)*W_DIMx*W_DIMy); for (int y = 0; y < WWAL_DIMy; y++) { for (int x = 0; x < WWAL_DIMx; x++) { host_WWall[x+y*WWAL_DIMx] = matrixWall[x][y]* (coef/2.0); host_W[x+y*W_DIMx] = matrixFlow[x][y]* (coef/2.0); } } // copy source loc: //src_loc.x = in_sourceLoc[0]; //src_loc.y = in_sourceLoc[1]; } void cPFaddWallLocation(int x, int y, bool val) { if (host_Wall != NULL) host_Wall[x+y*MATRIX_DIM] = val; } void cPFdelete(void) { ///*if (host_W != NULL) */free(host_W); ///*if (host_WWall != NULL) */free(host_WWall); ///*if (host_Wall != NULL) */free(host_Wall); //free(m_host); }
ed71075e4865c86fccce3b1ff1d27abe57e8bdd6.cu
// TO DO: // 1. coalesce memory accesses for m and nm // 2. put FP_ptr_copy into pointer form // 3. split kernels into edges and middle blocks (middle do not if checks in flow) -- started // 4. try arrays for more coalesced accesses // 5. reduce if statements #include "cuda_runtime.h" #include "cuda.h" #include "cuda_runtime_api.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "cPFkernel_ptr.cuh" #include "utils.h" #include "common.h" #include <glew.h> #include <freeglut.h> #include "book.h" #include "gpu_anim.h" #include "cusparse.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #define SoF sizeof(float) #define CI(x,y,z,width,height) ((x) + (y)*(width) + (z) * (height) * (width)) #define WWAL_DIMx 4 #define WWAL_DIMy WWAL_DIMx #define W_DIMx 4 #define W_DIMy W_DIMx #define THREADx 16 #define THREADy 16 #define BLOCK_DIMx ((MATRIX_DIM>THREADx)?THREADx:MATRIX_DIM) // vary this #define BLOCK_DIMy ((MATRIX_DIM>THREADy)?THREADy:MATRIX_DIM) #define GRID_DIMx ((MATRIX_DIM + BLOCK_DIMx - 1)/BLOCK_DIMx) #define GRID_DIMy ((MATRIX_DIM + BLOCK_DIMy - 1)/BLOCK_DIMy) byte * host_Wall; float * host_WWall; float * host_W; double coef = 1.0; int gpu_iterations; float *m_host; double src_amplitude; double src_frequency; dim3 src_loc; texture<float,2,cudaReadModeElementType> tex_m0; texture<float,2,cudaReadModeElementType> tex_m1; texture<float,2,cudaReadModeElementType> tex_m2; texture<float,2,cudaReadModeElementType> tex_m3; texture<float,2,cudaReadModeElementType> tex_nm0; texture<float,2,cudaReadModeElementType> tex_nm1; texture<float,2,cudaReadModeElementType> tex_nm2; texture<float,2,cudaReadModeElementType> tex_nm3; texture<float,2,cudaReadModeElementType> tex_WWall; texture<float,2,cudaReadModeElementType> tex_W; texture<float,2,cudaReadModeElementType> tex_avg_m; cudaStream_t v_stream1, v_stream2, v_stream3, v_stream4; dim3 v_threads(BLOCK_DIMx,BLOCK_DIMy,1); dim3 v_grids(GRID_DIMx,GRID_DIMy,1); dim3 v_matdim; float * v_p_src_m0; float * v_p_src_m1; float * v_p_src_m2; float * v_p_src_m3; size_t v_pitch; int v_shared_mem_size; float * dev_m0, *dev_m1, *dev_m2, *dev_m3, *dev_avg_m; float * dev_nm0, *dev_nm1, *dev_nm2, *dev_nm3; float * dev_WWall, * dev_W; byte * dev_wall; byte * host_src; byte * dev_src; void addSrc(int x, int y) { if (!(host_src[x + (MATRIX_DIM - 1 - y) * MATRIX_DIM])) { host_src[x + (MATRIX_DIM - 1 - y) * MATRIX_DIM] = 1; checkCudaErrors(cudaMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); } } #define DEL_MIN -10 #define DEL_MAX 10 #define WALL_R 2 void addWall(int x, int y) { int ry = (MATRIX_DIM - 1 - y); for (int iy = -WALL_R; iy < WALL_R; iy++) { for (int ix = -WALL_R; ix < WALL_R; ix++) { if (((x + ix)>0) && ((ry + iy) > 0) && ((x + ix)<MATRIX_DIM) && ((y + iy)<MATRIX_DIM)) { host_Wall[x + ix + (ry + iy) * MATRIX_DIM] = 1; } } } checkCudaErrors(cudaMemcpy(dev_wall, host_Wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); } void removeWall(int x, int y) { int ry = (MATRIX_DIM - 1 - y); for (int iy = -WALL_R*2; iy < WALL_R*2; iy++) { for (int ix = -WALL_R*2; ix < WALL_R*2; ix++) { if (((x + ix)>0) && ((ry + iy) > 0) && ((x + ix)<MATRIX_DIM) && ((y + iy)<MATRIX_DIM)) { host_Wall[x + ix + (ry + iy) * MATRIX_DIM] = 0; } } } checkCudaErrors(cudaMemcpy(dev_wall, host_Wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); } void removeSrc(int x, int y) { int ry = (MATRIX_DIM - 1 - y); for (int iy = DEL_MIN; iy < DEL_MAX; iy++) { for (int ix = DEL_MIN; ix < DEL_MAX; ix++) { if (((x + ix)>0) && ((ry + iy) > 0)) { host_src[x + ix + (ry + iy) * MATRIX_DIM] = 0; //checkCudaErrors(cudaMemset2D(dev_m0 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_m1 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_m2 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_m3 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_nm0 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_nm1 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_nm2 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); //checkCudaErrors(cudaMemset2D(dev_nm3 + (ry + iy) * v_pitch/sizeof(float) + x + ix, v_pitch, 0, sizeof(float), 1)); } } } checkCudaErrors(cudaMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); //if (host_src[x + ry * MATRIX_DIM]) //{ // host_src[x + ry * MATRIX_DIM] = 0; // checkCudaErrors(cudaMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset2D(dev_m0, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemset2D(dev_m1, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemset2D(dev_m2, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemset2D(dev_m3, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // /*checkCudaErrors(cudaMemset((dev_m0 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_m1 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_m2 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_m3 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_nm0 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_nm1 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_nm2 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float))); // checkCudaErrors(cudaMemset((dev_nm3 + ry * v_pitch/sizeof(float) + x), 0, sizeof(float)));*/ //} } void removeAllSrc(void) { memset(host_src, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte)); checkCudaErrors(cudaMemset(dev_src, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte))); checkCudaErrors(cudaMemset2D(dev_m0, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemset2D(dev_m1, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemset2D(dev_m2, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemset2D(dev_m3, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); //checkCudaErrors(cudaMemset2D(dev_avg_m, v_pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); //checkCudaErrors(cudaMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); } __global__ void PF_ptr_copy(cudaPitchedPtr mPtr, cudaPitchedPtr nmPtr, cudaExtent mExt, dim3 matdim) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float *m = (float*)mPtr.ptr; float *nm = (float*)nmPtr.ptr; size_t pitch = mPtr.pitch; unsigned int e_per_row = pitch / SoF; size_t slice_pitch = pitch*mExt.height; if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) { m[CI(x, y, 0, e_per_row, matdim.y)] = nm[CI(x, y, 0, e_per_row, matdim.y)]; m[CI(x, y, 1, e_per_row, matdim.y)] = nm[CI(x, y, 1, e_per_row, matdim.y)]; m[CI(x, y, 2, e_per_row, matdim.y)] = nm[CI(x, y, 2, e_per_row, matdim.y)]; m[CI(x, y, 3, e_per_row, matdim.y)] = nm[CI(x, y, 3, e_per_row, matdim.y)]; //__syncthreads(); // Edge Cases if (x == 0) { if (nm[CI(0, y, 0, e_per_row, matdim.y)] == 0) { m[CI(0, y, 0, e_per_row, matdim.y)] = nm[CI(1, y, 0, e_per_row, matdim.y)]; } } if (x == MATRIX_DIM-1) { if (nm[CI(MATRIX_DIM-1, y, 1, e_per_row, matdim.y)] == 0) { m[CI(MATRIX_DIM-1, y, 1, e_per_row, matdim.y)] = nm[CI(MATRIX_DIM-2, y, 1, e_per_row, matdim.y)]; } } if (y == 0) { if (nm[CI(x, 0, 2, e_per_row, matdim.y)] == 0) { m[CI(x, 0, 2, e_per_row, matdim.y)] = nm[CI(x, 1, 2, e_per_row, matdim.y)]; } } if (y == MATRIX_DIM-1) { if (nm[CI(x, MATRIX_DIM-1, 3, e_per_row, matdim.y)] == 0) { m[CI(x, MATRIX_DIM-1, 3, e_per_row, matdim.y)] = nm[CI(x, MATRIX_DIM-2, 3, e_per_row, matdim.y)]; } } } } __global__ void PF_copy_withWall(float*m0, float*m1, float*m2, float*m3, byte * wall, dim3 matdim, size_t pitch, byte * src, float source_val) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int loc = x + y * pitch/sizeof(float); if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { float t0 = tex2D(tex_nm0, x+0.5f, y+0.5f); float t1 = tex2D(tex_nm1, x+0.5f, y+0.5f); float t2 = tex2D(tex_nm2, x+0.5f, y+0.5f); float t3 = tex2D(tex_nm3, x+0.5f, y+0.5f); // edge cases if ((x == 0) && (t0 == 0)) { t0 = tex2D(tex_nm0, x+1.5f, y+0.5f); } if ((x == MATRIX_DIM-1) && (t1 == 0)) { t1 = tex2D(tex_nm1, x-1.0f+0.5f, y+0.5f); } if ((y == 0) && (t2 == 0)) { t2 = tex2D(tex_nm2, x+0.5f, y+1.0f+0.5f); } if ((y == MATRIX_DIM-1) && (t3 == 0)) { t3 = tex2D(tex_nm3, x+0.5f, y-1.0f+0.5f); } // write values if (wall[x + y * MATRIX_DIM] == 1) { m0[loc] = WALL_DEC*t0; m1[loc] = WALL_DEC*t1; m2[loc] = WALL_DEC*t2; m3[loc] = WALL_DEC*t3; //printf("wall!\n"); } else if (src[x + y * MATRIX_DIM] == 1) { //printf("source at %d, %d", x, y); m0[loc] = source_val + t0; m1[loc] = source_val + t1; m2[loc] = source_val + t2; m3[loc] = source_val + t3; } else { m0[loc] = t0; m1[loc] = t1; m2[loc] = t2; m3[loc] = t3; } } } __global__ void PF_padded_texture_copy(float*m0, float*m1, float*m2, float*m3, dim3 matdim, size_t pitch) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int loc = x + y * pitch/sizeof(float); if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { float t0 = tex2D(tex_nm0, x+0.5f, y+0.5f); float t1 = tex2D(tex_nm1, x+0.5f, y+0.5f); float t2 = tex2D(tex_nm2, x+0.5f, y+0.5f); float t3 = tex2D(tex_nm3, x+0.5f, y+0.5f); // edge cases if ((x == 0) && (t0 == 0)) { t0 = tex2D(tex_nm0, x+1.5f, y+0.5f); } if ((x == MATRIX_DIM-1) && (t1 == 0)) { t1 = tex2D(tex_nm1, x-1.0f+0.5f, y+0.5f); } if ((y == 0) && (t2 == 0)) { t2 = tex2D(tex_nm2, x+0.5f, y+1.0f+0.5f); } if ((y == MATRIX_DIM-1) && (t3 == 0)) { t3 = tex2D(tex_nm3, x+0.5f, y-1.0f+0.5f); } // write values m0[loc] = t0; m1[loc] = t1; m2[loc] = t2; m3[loc] = t3; } } __constant__ float cW[16]; #define STR 0.0 #define BND 0.5 #define INC_EAST tex_m1 #define INC_WEST tex_m0 #define INC_NORTH tex_m2 #define INC_SOUTH tex_m3 __global__ void PF_roundscatter(float *nm0, float *nm1, float *nm2, float *nm3, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; float xt = x + 0.5f; float yt = y + 0.5f; //ScatteredNorth[x, y-1] = 0.5m * (IEast - INorth + IWest + ISouth); float sn = 0.5 * ( tex2D(INC_EAST, xt, yt - 1) - tex2D(INC_NORTH, xt, yt - 1) + tex2D(INC_WEST, xt, yt - 1) + tex2D(INC_SOUTH, xt, yt - 1)); //ScatteredEast[x-1, y] = 0.5m * (-IEast + INorth + IWest + ISouth); float se = 0.5 * (0-tex2D(INC_EAST, xt - 1, yt) + tex2D(INC_NORTH, xt - 1, yt) + tex2D(INC_WEST, xt - 1, yt) + tex2D(INC_SOUTH, xt - 1, yt)); //ScatteredWest[x+1, y] = 0.5m * (IEast + INorth - IWest + ISouth); float sw = 0.5 * ( tex2D(INC_EAST, xt + 1, yt) + tex2D(INC_NORTH, xt + 1, yt) - tex2D(INC_WEST, xt + 1, yt) + tex2D(INC_SOUTH, xt + 1, yt)); //ScatteredSouth[x, y+1] = 0.5m * (IEast + INorth + IWest - ISouth); float ss = 0.5 * ( tex2D(INC_EAST, xt, yt + 1) + tex2D(INC_NORTH, xt, yt + 1) + tex2D(INC_WEST, xt, yt + 1) - tex2D(INC_SOUTH, xt, yt + 1)); //IncomingEast[x, y] = ScatteredWest[x + 1, y]; nm1[x + y * pitch/sizeof(float)] = sw; //IncomingNorth[x, y] = ScatteredSouth[x, y + 1]; nm2[x + y * pitch/sizeof(float)] = ss; //IncomingWest[x, y] = ScatteredEast[x - 1, y]; nm0[x + y * pitch/sizeof(float)] = se; //IncomingSouth[x, y] = ScatteredNorth[x, y - 1];*/ nm3[x + y * pitch/sizeof(float)] = sn; } __global__ void PF_texture_slideright(float *nm0, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm0[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[4] + tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[5] + tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[6] + tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) + 0.5f)*cW[7]; //printf("nm0[%d] = %f \n", x + y * pitch/sizeof(float), nm0[x + y * pitch/sizeof(float)]); #else //nm0[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) + 0.5f) *(-STR) + // tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) + 0.5f) *(STR) + // tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) + 0.5f) *(STR) + // tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) + 0.5f) *(STR) + // (0 + // tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) + 1.5f) - // tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) + 1.5f) - // tex2D(tex_m0, (float)(x) - 0.5f, (float)(y) - 0.5f) + // tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) - 0.5f) // ) * BND; #endif //printf("x %d, y %d \n", x, y); } __global__ void PF_texture_slideleft(float *nm1, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm1[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[0] + tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[1] + tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[2] + tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) + 0.5f)*cW[3]; # else nm1[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) + 0.5f)*(STR) + tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) + 0.5f)*(-STR) + tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) + 0.5f)*STR + tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) + 0.5f)*STR + (0 + tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) - 0.5f) - tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) - 0.5f) - tex2D(tex_m1, (float)(x) + 1.5f, (float)(y) + 1.5f) + tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) + 1.5f) ) * BND; #endif } __global__ void PF_texture_slideup(float *nm3, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm3[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[8] + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[9] + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[10] + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) - 0.5f)*cW[11]; #else nm3[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) + 1.5f)*STR + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) + 1.5f)*STR + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) + 1.5f)*STR + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) + 1.5f)*(-STR)+ (0 + tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) + 1.5f) - tex2D(tex_m3, (float)(x) + 1.5f, (float)(y) + 1.5f) + tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) - 0.5f) - tex2D(tex_m3, (float)(x) - 0.5f, (float)(y) - 0.5f) ) * BND; #endif } __global__ void PF_texture_slidedown(float *nm2, size_t pitch) { int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; #if 0 nm2[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[12] + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[13] + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[14] + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) + 1.5f)*cW[15]; #else nm2[x + y * pitch/sizeof(float)] = tex2D(tex_m0, (float)(x) + 0.5f, (float)(y) - 0.5f)*STR + tex2D(tex_m1, (float)(x) + 0.5f, (float)(y) - 0.5f)*STR + tex2D(tex_m2, (float)(x) + 0.5f, (float)(y) - 0.5f)*(-STR) + tex2D(tex_m3, (float)(x) + 0.5f, (float)(y) - 0.5f)*STR + (0 + tex2D(tex_m1, (float)(x) - 0.5f, (float)(y) - 0.5f) - tex2D(tex_m2, (float)(x) - 0.5f, (float)(y) - 0.5f) + tex2D(tex_m0, (float)(x) + 1.5f, (float)(y) - 0.5f) - tex2D(tex_m2, (float)(x) + 1.5f, (float)(y) - 0.5f) ) * BND; #endif } __global__ void PF_registers_texture_flow(float * nm0, float * nm1, float * nm2, float * nm3, float * W, size_t pitch) { __shared__ float sW[16]; float t0, t1, t2, t3; float x = threadIdx.x + blockIdx.x * blockDim.x; float y = threadIdx.y + blockIdx.y * blockDim.y; int loc = x + y * pitch/sizeof(float); x += 0.5f; y += 0.5f; if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; } if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) { //t0 = tex2D(tex_m0, x-1, y); t1 = tex2D(tex_m1, x-1, y); t2 = tex2D(tex_m2, x-1, y); t3 = tex2D(tex_m3, x-1, y); //nm0[loc] = t0*W[4] + t1*W[5] + t2*W[6] + t3*W[7]; nm0[loc] = tex2D(tex_m0, x-1,y)*sW[4] + tex2D(tex_m1, x-1, y)*sW[5] + tex2D(tex_m2, x-1, y)*sW[6] + tex2D(tex_m3, x-1, y)*sW[7]; nm1[loc] = tex2D(tex_m0, x+1,y)*sW[0] + tex2D(tex_m1, x+1, y)*sW[1] + tex2D(tex_m2, x+1, y)*sW[2] + tex2D(tex_m3, x+1, y)*sW[3]; nm2[loc] = tex2D(tex_m0, x,y-1)*sW[12] + tex2D(tex_m1, x, y-1)*sW[13] + tex2D(tex_m2, x, y-1)*sW[14] + tex2D(tex_m3, x, y-1)*sW[15]; nm3[loc] = tex2D(tex_m0, x,y+1)*sW[8] + tex2D(tex_m1, x, y+1)*sW[9] + tex2D(tex_m2, x, y+1)*sW[10] + tex2D(tex_m3, x, y+1)*sW[11]; //t0 = tex2D(tex_m0, x+1, y); t1 = tex2D(tex_m1, x+1, y); t2 = tex2D(tex_m2, x+1, y); t3 = tex2D(tex_m3, x+1, y); //nm1[loc] = t0*W[0] + t1*W[1] + t2*W[2] + t3*W[3]; //t0 = tex2D(tex_m0, x, y-1); t1 = tex2D(tex_m1, x, y-1); t2 = tex2D(tex_m2, x, y-1); t3 = tex2D(tex_m3, x, y-1); //nm0[loc] = t0*W[12] + t1*W[13] + t2*W[14] + t3*W[15]; //t0 = tex2D(tex_m0, x, y+1); t1 = tex2D(tex_m1, x, y+1); t2 = tex2D(tex_m2, x, y+1); t3 = tex2D(tex_m3, x, y+1); //nm0[loc] = t0*W[8] + t1*W[9] + t2*W[10] + t3*W[11]; } } __global__ void PF_mindlesspadded_texture_flow(dim3 srcloc, float src, bool* wallLoc, float*nm0, float*nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float *W, size_t pitch) { __shared__ float sW[16]; __shared__ float sMemM0[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side __shared__ float sMemM1[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side __shared__ float sMemM2[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side __shared__ float sMemM3[BLOCK_DIMx][BLOCK_DIMy]; // old block dimension + 1 on each side float x = threadIdx.x + blockIdx.x * blockDim.x + 0.5f; float y = threadIdx.y + blockIdx.y * blockDim.y + 0.5f; unsigned int shX = threadIdx.x + 1; unsigned int shY = threadIdx.y + 1; int loc = x + y * pitch/sizeof(float); } __global__ void PF_padded_texture_flow(dim3 srcloc, float src, bool* wallLoc, float*nm0, float*nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float *W, size_t pitch) { __shared__ float sWWall[16]; __shared__ float sW[16]; __shared__ float sMemM0[BLOCK_DIMx+2][BLOCK_DIMy+2]; __shared__ float sMemM1[BLOCK_DIMx+2][BLOCK_DIMy+2]; __shared__ float sMemM2[BLOCK_DIMx+2][BLOCK_DIMy+2]; __shared__ float sMemM3[BLOCK_DIMx+2][BLOCK_DIMy+2]; float x = threadIdx.x + blockIdx.x * blockDim.x + 0.5f; float y = threadIdx.y + blockIdx.y * blockDim.y + 0.5f; unsigned int shX = threadIdx.x + 1; unsigned int shY = threadIdx.y + 1; int loc = x + y * pitch/sizeof(float); // copy coefficients to shared memory: if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sWWall[threadIdx.x + threadIdx.y * 4] = WWall[threadIdx.x + threadIdx.y * 4]; sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; } //__syncthreads(); if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { sMemM0[shX][shY] = tex2D(tex_m0, x, y); sMemM1[shX][shY] = tex2D(tex_m1, x, y); sMemM2[shX][shY] = tex2D(tex_m2, x, y); sMemM3[shX][shY] = tex2D(tex_m3, x, y); // handle edges if (threadIdx.x == 0) // left { sMemM0[0][shY] = tex2D(tex_m0, x-1.0f, y); sMemM1[0][shY] = tex2D(tex_m1, x-1.0f, y); sMemM2[0][shY] = tex2D(tex_m2, x-1.0f, y); sMemM3[0][shY] = tex2D(tex_m3, x-1.0f, y); } else if (threadIdx.x == (BLOCK_DIMx - 1)) // right { sMemM0[BLOCK_DIMx+1][shY] = tex2D(tex_m0, x+1.0f, y); sMemM1[BLOCK_DIMx+1][shY] = tex2D(tex_m1, x+1.0f, y); sMemM2[BLOCK_DIMx+1][shY] = tex2D(tex_m2, x+1.0f, y); sMemM3[BLOCK_DIMx+1][shY] = tex2D(tex_m3, x+1.0f, y); } // MISSING THE CORNER BLOCK~ FIX IT if (threadIdx.y == 0) // up { sMemM0[shX][0] = tex2D(tex_m0, x, y-1); sMemM1[shX][0] = tex2D(tex_m1, x, y-1); sMemM2[shX][0] = tex2D(tex_m2, x, y-1); sMemM3[shX][0] = tex2D(tex_m3, x, y-1); } else if (threadIdx.y == (BLOCK_DIMy - 1)) // down { sMemM0[shX][BLOCK_DIMy+1] = tex2D(tex_m0, x, y+1); sMemM1[shX][BLOCK_DIMy+1] = tex2D(tex_m1, x, y+1); sMemM2[shX][BLOCK_DIMy+1] = tex2D(tex_m2, x, y+1); sMemM3[shX][BLOCK_DIMy+1] = tex2D(tex_m3, x, y+1); } } __syncthreads(); // sync the shared memory writes if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) // Make sure cell is within the environment grid { // calculate nm nm0[loc] = sW[4]*sMemM0[shX-1][shY] + sW[5]*sMemM1[shX-1][shY] + sW[6]*sMemM2[shX-1][shY] + sW[7]*sMemM3[shX-1][shY]; nm1[loc] = sW[0]*sMemM0[shX+1][shY] + sW[1]*sMemM1[shX+1][shY] + sW[2]*sMemM2[shX+1][shY] + sW[3]*sMemM3[shX+1][shY]; nm2[loc] = sW[12]*sMemM0[shX][shY-1] + sW[13]*sMemM1[shX][shY-1] + sW[14]*sMemM2[shX][shY-1] + sW[15]*sMemM3[shX][shY-1]; nm3[loc] = sW[8]*sMemM0[shX][shY+1] + sW[9]*sMemM1[shX][shY+1] + sW[10]*sMemM2[shX][shY+1] + sW[11]*sMemM3[shX][shY+1]; } //__syncthreads(); //printf("loc %d, %d, val %f, %f, %f, %f. \n", x, y, (nm0[loc]), (float)(nm1[loc]), (float)(nm2[loc]), (float)(nm3[loc])); } __global__ void PF_texture_flow(dim3 srcloc, float src, bool* wallLoc, float* nm0, float* nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float * W) { __shared__ float sWWall[16]; __shared__ float sW[16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // copy coefficients to shared memory: if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sWWall[threadIdx.x + threadIdx.y * 4] = WWall[threadIdx.x + threadIdx.y * 4]; sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; } __syncthreads(); float m0, m1, m2, m3; if ((x < MATRIX_DIM-1) && (y < MATRIX_DIM-1) && (x > 0) && (y > 0)) // Make sure cell is within the environment grid { m0 = tex2D(tex_m0, x, y); m1 = tex2D(tex_m1, x, y); m2 = tex2D(tex_m2, x, y); m3 = tex2D(tex_m3, x, y); float newF[4] = {0}; if ((x == srcloc.x) && (y == srcloc.y)) { m0 = src; m1 = src; m2 = src; m3 = src; } // Check if wall bool isWall = wallLoc[x + y * matdim.x]; //bool isWall = *(wallLoc + x * sof if (isWall) { // prefetch WWall into __shared__ -- commented out above (maybe textures are faster, check though) /*newF[0] = tex2D(tex_WWall,0,0)*m0 + tex2D(tex_WWall,1,0)*m1 + tex2D(tex_WWall,2,0)*m2 + tex2D(tex_WWall,3,0)*m3; newF[1] = tex2D(tex_WWall,0,1)*m0 + tex2D(tex_WWall,1,1)*m1 + tex2D(tex_WWall,2,1)*m2 + tex2D(tex_WWall,3,1)*m3; newF[2] = tex2D(tex_WWall,0,2)*m0 + tex2D(tex_WWall,1,2)*m1 + tex2D(tex_WWall,2,2)*m2 + tex2D(tex_WWall,3,2)*m3; newF[3] = tex2D(tex_WWall,0,3)*m0 + tex2D(tex_WWall,1,3)*m1 + tex2D(tex_WWall,2,3)*m2 + tex2D(tex_WWall,3,3)*m3;*/ newF[0] = sWWall[0] *m0 + sWWall[1] *m1 + sWWall[2] *m2 + sWWall[3] *m3; newF[1] = sWWall[4] *m0 + sWWall[5] *m1 + sWWall[6] *m2 + sWWall[7] *m3; newF[2] = sWWall[8] *m0 + sWWall[9] *m1 + sWWall[10]*m2 + sWWall[11]*m3; newF[3] = sWWall[12]*m0 + sWWall[13]*m1 + sWWall[14]*m2 + sWWall[15]*m3; } else { // prefetch W into __shared__ /*newF[0] = tex2D(tex_W,0,0)*m0 + tex2D(tex_W,1,0)*m1 + tex2D(tex_W,2,0)*m2 + tex2D(tex_W,3,0)*m3; newF[1] = tex2D(tex_W,0,1)*m0 + tex2D(tex_W,1,1)*m1 + tex2D(tex_W,2,1)*m2 + tex2D(tex_W,3,1)*m3; newF[2] = tex2D(tex_W,0,2)*m0 + tex2D(tex_W,1,2)*m1 + tex2D(tex_W,2,2)*m2 + tex2D(tex_W,3,2)*m3; newF[3] = tex2D(tex_W,0,3)*m0 + tex2D(tex_W,1,3)*m1 + tex2D(tex_W,2,3)*m2 + tex2D(tex_W,3,3)*m3;*/ newF[0] = sW[0]*m0 + sW[1]*m1 + sW[2]*m2 + sW[3]*m3; newF[1] = sW[4]*m0 + sW[5]*m1 + sW[6]*m2 + sW[7]*m3; newF[2] = sW[8]*m0 + sW[9]*m1 + sW[10]*m2 + sW[11]*m3; newF[3] = sW[12]*m0 + sW[13]*m1 + sW[14]*m2 + sW[15]*m3; } // if (x < MATRIX_DIM-1) nm0[x+1][y] = newF[1]; if (x < MATRIX_DIM - 1) nm0[offset + 1] = newF[1]; // if (x > 0) nm1[x-1][y] = newF[0]; if (x > 0) nm1[offset - 1] = newF[0]; // if (y < MATRIX_DIM-1) nm2[x][y+1] = newF[3]; if (y < MATRIX_DIM - 1) nm2[offset + blockDim.x * gridDim.x] = newF[3]; // if (y > 0) nm3[x][y-1] = newF[2]; if (y > 0) nm3[offset - blockDim.x * gridDim.x] = newF[2]; } } __global__ void float_to_color_power_dBm( uchar4 *optr, size_t pitch, byte* walls) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * MATRIX_DIM; //int poffset = x + y * pitch/sizeof(float); float l = tex2D(tex_avg_m, (x)+0.5f, (y)+0.5f); // Convert l to dBm: l = 10 * log10f(abs(l)); // abs == 0 -l when negative, faster? if (l < -100) { optr[offset].x = 255; optr[offset].y = 0; optr[offset].z = 0; } else if (l < -90) { optr[offset].x = 0; optr[offset].y = 9; optr[offset].z = 255; } else if (l < -80) { optr[offset].x = 255; optr[offset].y = 154; optr[offset].z = 0; } else if (l < -70) { optr[offset].x = 255; optr[offset].y = 247; optr[offset].z = 0; } else { optr[offset].x = 40; optr[offset].y = 172; optr[offset].z = 7; } if (walls[offset]) { optr[offset].x = 0; optr[offset].y = 0; optr[offset].z = 0; } } __global__ void clean_bitmap(uchar4 *optr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y*512; optr[offset].w = 0xff; optr[offset].x = 255; optr[offset].y = 255; optr[offset].z = 255; } __global__ void float_to_color_dBm( uchar4 *optr, size_t pitch) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * MATRIX_DIM; int poffset = x + y * pitch/sizeof(float); float l = (tex2D(tex_m0, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m1, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m2, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m3, (x)+0.5f, (y)+0.5f)); // Convert l to dBm: l = 20 * log10f(abs(l/4)); // abs == 0 -l when negative, faster? if (l < -100) { optr[offset].x = 255; optr[offset].y = 0; optr[offset].z = 0; } else if (l < -90) { optr[offset].x = 0; optr[offset].y = 9; optr[offset].z = 255; } else if (l < -80) { optr[offset].x = 255; optr[offset].y = 154; optr[offset].z = 0; } else if (l < -70) { optr[offset].x = 255; optr[offset].y = 247; optr[offset].z = 0; } else { optr[offset].x = 40; optr[offset].y = 172; optr[offset].z = 7; } //l += 120; // put l between 0 and 100dBm (offset of 100dBm) //l /= 120; // divide by 100 to put between 0 and 1 //optr[offset].w = 255; //if (l < 0) //{ // optr[offset].x = 10; // optr[offset].y = 0; // optr[offset].z = 155; //} //else if (l < 0.125) // { // optr[offset].x = (unsigned char)(10.0f - 80.0f*l); // optr[offset].y = (unsigned char)(1000.0f * l); // optr[offset].z = 155; // } // else if (l < 0.375) // { // optr[offset].x = 0; // optr[offset].y = (unsigned char)(125.0f + (l - 0.125f) * 120.0f); // optr[offset].z = (unsigned char)(155.0f - (l - 0.125f) * 476.0f); // } // else if (l < 0.625) // { // optr[offset].x = (unsigned char)(820 * (l - 0.375f)); // optr[offset].y = (unsigned char)(155 + (l - 0.375f)*400.0f); // optr[offset].z = (unsigned char)(36 - 144 * (l - 0.375f)); // } // else if (l < 0.875) // { // optr[offset].x = (unsigned char)(205 + 200 * (l - 0.625f)); // optr[offset].y = (unsigned char)(255 - 472 * (l - 0.625f)); // optr[offset].z = 0; // } // else if (l <= 1) // { // optr[offset].x = 255; // optr[offset].y = (unsigned char)(137 - (l -0.875) * 1096); // optr[offset].z = 0; // } // else // { // optr[offset].x = 255; // optr[offset].y = 255; // optr[offset].z = 255; // } } __global__ void add_and_average_signal(size_t pitch, int iter, float * avg_m) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int poffset = x + y * pitch/sizeof(float); float total=((tex2D(tex_m0, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m1, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m2, (x)+0.5f, (y)+0.5f))+ (tex2D(tex_m3, (x)+0.5f, (y)+0.5f)))*0.5; total = total*total; // square for power //if (iter % SAMPLES_TO_AVERAGE) if (1) { float oldavg = tex2D(tex_avg_m, (x)+0.5, (y)+0.5); total = oldavg*(SAMPLES_TO_AVERAGE-1) + total; avg_m[poffset] = total/SAMPLES_TO_AVERAGE; } else { avg_m[poffset] = total; } /*if (iter != 0) total += tex2D(tex_avg_m, (x)+0.5f, (y)+0.5f); if (iter == SAMPLES_TO_AVERAGE-1) { total = total / SAMPLES_TO_AVERAGE; } avg_m[poffset] = total;*/ } __global__ void float_to_color_dBm_pixelate( uchar4 *optr, size_t pitch, int ticks ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * 512; int poffset = x + y * pitch/sizeof(float); float l = (tex2D(tex_m0, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f))+ (tex2D(tex_m1, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f))+ (tex2D(tex_m2, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f))+ (tex2D(tex_m3, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f)); // Convert l to dBm: l = 20 * log10f(abs(l/4)); // abs == 0 -l when negative, faster? l += 100; // put l between 0 and 100dBm (offset of 100dBm) l /= 100; // divide by 100 to put between 0 and 1 optr[offset].w = 255; if (l < 0) { optr[offset].x = 10; optr[offset].y = 0; optr[offset].z = 155; } else if (l < 0.125) { optr[offset].x = (unsigned char)(10.0f - 80.0f*l); optr[offset].y = (unsigned char)(1000.0f * l); optr[offset].z = 155; } else if (l < 0.375) { optr[offset].x = 0; optr[offset].y = (unsigned char)(125.0f + (l - 0.125f) * 120.0f); optr[offset].z = (unsigned char)(155.0f - (l - 0.125f) * 476.0f); } else if (l < 0.625) { optr[offset].x = (unsigned char)(820 * (l - 0.375f)); optr[offset].y = (unsigned char)(155 + (l - 0.375f)*400.0f); optr[offset].z = (unsigned char)(36 - 144 * (l - 0.375f)); } else if (l < 0.875) { optr[offset].x = (unsigned char)(205 + 200 * (l - 0.625f)); optr[offset].y = (unsigned char)(255 - 472 * (l - 0.625f)); optr[offset].z = 0; } else if (l <= 1) { optr[offset].x = 255; optr[offset].y = (unsigned char)(137 - (l -0.875) * 1096); optr[offset].z = 0; } else { optr[offset].x = 255; optr[offset].y = 255; optr[offset].z = 255; } } __global__ void float_to_color_pitched( uchar4 *optr, size_t pitch, int ticks ) { // map from threadIdx/BlockIdx to pixel position //int x = threadIdx.x + blockIdx.x * blockDim.x; //int y = threadIdx.y + blockIdx.y * blockDim.y; //int offset = x + y * blockDim.x * gridDim.x; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //int offset = x + y * blockDim.x * gridDim.x; int offset = x + y * 512; int poffset = x + y * pitch/sizeof(float); //if ((threadIdx.x==0) && (threadIdx.y == 0)) //{ // //printf("Blockdim.x: %d, blockdim.y: %d\n", blockDim.x, blockDim.y); //} //x = blockIdx.x; y = blockIdx.y; //float l = (tex2D(tex_m0, x+0.5f, y+0.5f)); float l = (tex2D(tex_m0, (blockIdx.x)+0.5f, (blockIdx.y)+0.5f)); //printf("blockidx = %d, blockidy = %d \n", blockIdx.x, blockIdx.y); if ((blockIdx.x==0) && (blockIdx.y == 0)) { //printf("Blockdim.x: %d, blockdim.y: %d\n", blockDim.x, blockDim.y); //printf("x = %d, y = %d, val = %f \n", x, y, l); } //printf("x = %d, y = %d, val = %f \n", x, y, l); //l = (tex2D(tex_m0, x+0.5f, y+0.5f)+1.0f)/2.0f; //l = (l+1.0f)*0.5f; /*if ((x < MATRIX_DIM) && (y < MATRIX_DIM)) {*/ optr[offset].w = 256; l = (l + SRC_MAG)/(2 * SRC_MAG); if (l < 0.125) { optr[offset].x = (unsigned char)(10.0f - 80.0f*l); optr[offset].y = (unsigned char)(1000.0f * l); optr[offset].z = 155; } else if (l < 0.375) { optr[offset].x = 0; optr[offset].y = (unsigned char)(125.0f + (l - 0.125f) * 120.0f); optr[offset].z = (unsigned char)(155.0f - (l - 0.125f) * 476.0f); } else if (l < 0.625) { optr[offset].x = (unsigned char)(820 * (l - 0.375f)); optr[offset].y = (unsigned char)(155 + (l - 0.375f)*400.0f); optr[offset].z = (unsigned char)(36 - 144 * (l - 0.375f)); } else if (l < 0.875) { optr[offset].x = (unsigned char)(205 + 200 * (l - 0.625f)); optr[offset].y = (unsigned char)(255 - 472 * (l - 0.625f)); optr[offset].z = 0; } else if (l <= 1) { optr[offset].x = 255; optr[offset].y = (unsigned char)(137 - (l -0.875) * 1096); optr[offset].z = 0; } else { optr[offset].x = 255; optr[offset].y = 255; optr[offset].z = 255; } /*if (l < 0.0f) { optr[offset].y = (unsigned char)(255.0f*(1.0f+l)); optr[offset].z = (unsigned char)(255.0f*(1.0f+l));optr[offset].x = 0; } else { optr[offset].y = 0; optr[offset].z = (unsigned char)(255.0f*(1.0f-l)); optr[offset].x = (unsigned char)(255.0f*l); }*/ /*optr[offset].x = (unsigned char)(255.0f * l); optr[offset].y = (unsigned char)(255.0f * 0.67f*l); optr[offset].z = (unsigned char)(255.0f * (1.0f-l));*/ /*}*/ //float s = 1; //int h = (180 + (int)(360.0f * outSrc[poffset])) % 360; //float m1, m2; //if (l <= 0.5f) // m2 = l * (1 + s); //else // m2 = l + s - l * s; //m1 = 2 * l - m2; /*optr[offset].x = value( m1, m2, h+120 ); optr[offset].y = value( m1, m2, h ); optr[offset].z = value( m1, m2, h -120 ); optr[offset].w = 255;*/ } __global__ void testTexturesLoop(void) { for (int x = 0; x < MATRIX_DIM; x++) { for (int y = 0; y < MATRIX_DIM; y++) { printf("%f, ",(float)(tex2D(tex_m0, (float)(x)+0.5f, (float)(y)+0.5f))); } printf("\n"); } } __global__ void PF_ptr_flow(cudaPitchedPtr mPtr, cudaExtent mExt, dim3 matrix_dimensions, double src, dim3 srcloc, bool * wallLoc, float * WWall, float * W, cudaPitchedPtr nmPtr) { __shared__ float sWWall[16]; __shared__ float sW[16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ((threadIdx.x < 4) && (threadIdx.y < 4)) { sWWall[threadIdx.x + threadIdx.y * 4] = WWall[threadIdx.x + threadIdx.y * 4]; sW[threadIdx.x + threadIdx.y * 4] = W[threadIdx.x + threadIdx.y * 4]; //if ((threadIdx.x == 0) && (threadIdx.y == 0)) printf("Block %d,%d \n", blockIdx.x,blockIdx.y); //printf("x=%d, y=%d, WWall %f, sWWall %f, W %f, sW %f \n", x, y, WWall[x + y * 4], sWWall[x+y * 4], W[x+y*4], sW[x+y*4]); } __syncthreads(); if ((x < MATRIX_DIM-1) && (y < MATRIX_DIM-1) && (x > 0) && (y > 0)) // Make sure cell is within the environment grid { // Find location within the pitched memory float *m = (float*)mPtr.ptr; float *nm = (float*)nmPtr.ptr; size_t pitch = mPtr.pitch; unsigned int e_per_row = pitch / SoF; size_t slice_pitch = e_per_row * matrix_dimensions.y; size_t one_sp = 1 * slice_pitch; size_t two_sp = 2 * slice_pitch; size_t three_sp = 3 * slice_pitch; size_t yep = y * e_per_row; float *mxy = m + x + yep; float *nmxy = nm + x + yep; //float m0 = m[CI(x, y, 0, e_per_row, matrix_dimensions.y)]; //float * m0ptr = (m + x + y * e_per_row + 0 * slice_pitch); //printf("m0ptr, x %d, y %d, is %d \n", x, y, m0ptr); //float m0 = *m0ptr; float m0 = *(mxy); //float m1 = m[CI(x, y, 1, e_per_row, matrix_dimensions.y)]; float m1 = *(mxy + one_sp); //float m2 = m[CI(x, y, 2, e_per_row, matrix_dimensions.y)]; float m2 = *(mxy + two_sp); //float m3 = m[CI(x, y, 3, e_per_row, matrix_dimensions.y)]; float m3 = *(mxy + three_sp); float newF[4] = {0}; // Check if source, assign value if it is if ((x == srcloc.x) && (y == srcloc.y)) { m0 = src; m1 = src; m2 = src; m3 = src; } // Check if wall bool isWall = wallLoc[x + y * matrix_dimensions.x]; //bool isWall = *(wallLoc + x * sof if (isWall) { // prefetch WWall into __shared__ -- done newF[0] = sWWall[0] *m0 + sWWall[1] *m1 + sWWall[2] *m2 + sWWall[3] *m3; newF[1] = sWWall[4] *m0 + sWWall[5] *m1 + sWWall[6] *m2 + sWWall[7] *m3; newF[2] = sWWall[8] *m0 + sWWall[9] *m1 + sWWall[10]*m2 + sWWall[11]*m3; newF[3] = sWWall[12]*m0 + sWWall[13]*m1 + sWWall[14]*m2 + sWWall[15]*m3; } else { // prefetch W into __shared__ -- done newF[0] = sW[0]*m0 + sW[1]*m1 + sW[2]*m2 + sW[3]*m3; newF[1] = sW[4]*m0 + sW[5]*m1 + sW[6]*m2 + sW[7]*m3; newF[2] = sW[8]*m0 + sW[9]*m1 + sW[10]*m2 + sW[11]*m3; newF[3] = sW[12]*m0 + sW[13]*m1 + sW[14]*m2 + sW[15]*m3; } //if (x < MATRIX_DIM-1) nm[CI(x + 1, y, 0, e_per_row, matrix_dimensions.y)] = newF[1]; // if (x < MATRIX_DIM-1) nm0[x+1][y] = newF[1]; if (x < MATRIX_DIM - 1) *(nmxy + 1) = newF[1]; //if (x > 0) nm[CI(x - 1, y, 1, e_per_row, matrix_dimensions.y)] = newF[0]; // if (x > 0) nm1[x-1][y] = newF[0]; if (x > 0) *(nmxy - 1 + one_sp) = newF[0]; //if (y < MATRIX_DIM-1) nm[CI(x, y + 1, 2, e_per_row, matrix_dimensions.y)] = newF[3]; // if (y < MATRIX_DIM-1) nm2[x][y+1] = newF[3]; if (y < MATRIX_DIM - 1) *(nmxy + e_per_row + two_sp) = newF[3]; //if (y > 0) nm[CI(x, y - 1, 3, e_per_row, matrix_dimensions.y)] = newF[2]; // if (y > 0) nm3[x][y-1] = newF[2]; if (y > 0) *(nmxy - e_per_row + three_sp) = newF[2]; } } //PFNGLBINDBUFFERARBPROC glBindBuffer = NULL; //PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL; //PFNGLGENBUFFERSARBPROC glGenBuffers = NULL; //PFNGLBUFFERDATAARBPROC glBufferData = NULL; //GLuint bufferObj; //cudaGraphicsResource *resource; //GLuint disp_texture; // //void cPFsetupDisplay(void) //{ // // Initialize the CUDA context // cudaDeviceProp prop; // memset(&prop, 0, sizeof(cudaDeviceProp)); // prop.major = 1; prop.minor = 0; // checkCudaErrors(cudaChooseDevice(&dev, &prop)); // checkCudaErrors(cudaGLSetGLDevice(dev)); // // glGenTextures(1, &disp_texture); // glBindTexture(GL_TEXTURE_2D, disp_texture); // // int dev; int c = 1; // char* dummy = ""; // glutInit(&c, &dummy); // glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); // glutInitWindowSize(DIM, DIM); // glutCreateWindow("PixelFlow"); // // glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer"); // glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers"); // glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers"); // glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData"); // // /*glGenBuffers(1, &bufferObj); // glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); // glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM*DIM*4, NULL, GL_DYNAMIC_DRAW_ARB); // // memset(&prop, 0, sizeof(cudaDeviceProp)); // prop.major = 1; prop.minor = 0; // checkCudaErrors(cudaChooseDevice(&dev, &prop)); // checkCudaErrors(cudaGLSetGLDevice(dev)); // checkCudaErrors(cudaGraphicsGLRegisterBuffer(&resource, bufferObj, cudaGraphicsRegisterFlagsWriteDiscard));*/ // //} void cPFcaller_display_exit(void) { // Free all allocated memory (move into separate delete function later) cudaUnbindTexture(tex_m0); cudaUnbindTexture(tex_m1); cudaUnbindTexture(tex_m2); cudaUnbindTexture(tex_m3); cudaUnbindTexture(tex_nm0); cudaUnbindTexture(tex_nm1); cudaUnbindTexture(tex_nm2); cudaUnbindTexture(tex_nm3); cudaUnbindTexture(tex_WWall); cudaUnbindTexture(tex_W); cudaFree(dev_m0); cudaFree(dev_m1); cudaFree(dev_m2); cudaFree(dev_m3); cudaFree(dev_nm0); cudaFree(dev_nm1); cudaFree(dev_nm2); cudaFree(dev_nm3); cudaFree(dev_WWall); cudaFree(dev_W); } #if (MATRIX_DIM < 512) #define BIT_DIM 512 #else #define BIT_DIM MATRIX_DIM #endif void cPFcaller_display(unsigned int num_iterations, float * &m_ptr) { //#if (MATRIX_DIM < 512) GPUAnimBitmap bitmap(512, 512, NULL); //#else GPUAnimBitmap bitmap(BIT_DIM, BIT_DIM, NULL); //#endif uchar4 * devPtr; size_t size; // init textures gpu_iterations = num_iterations; cudaError_t status = cudaSuccess; float source = 0.0f; dim3 matdim; matdim.x = MATRIX_DIM; matdim.y = MATRIX_DIM; matdim.z = 4; dim3 threads(BLOCK_DIMx,BLOCK_DIMy,1); dim3 grids(GRID_DIMx,GRID_DIMy,1); size_t pitch; checkCudaErrors(cudaMallocPitch((void**)&dev_m0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_m1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_m2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_m3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_avg_m, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMalloc( (void**)&dev_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float))); // WWall checkCudaErrors(cudaMalloc( (void**)&dev_W, W_DIMx*W_DIMy*sizeof(float))); // W checkCudaErrors(cudaMemset2D(dev_m0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_m1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_m2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_m3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_avg_m, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMemcpy(dev_WWall, host_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_W, host_W, W_DIMx*W_DIMy*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(cW, host_W, W_DIMx*W_DIMy*sizeof(float), 0U, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc( (void**)&dev_wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte))); checkCudaErrors(cudaMemcpy(dev_wall, host_Wall, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&dev_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte))); checkCudaErrors(cudaMemcpy(dev_src, host_src, MATRIX_DIM*MATRIX_DIM*sizeof(byte), cudaMemcpyHostToDevice)); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); // not happy? tex_m0.normalized = false; tex_m0.filterMode = cudaFilterModeLinear; tex_m0.addressMode[0] = cudaAddressModeBorder; tex_m1.normalized = false; tex_m1.filterMode = cudaFilterModeLinear;tex_m1.addressMode[0] = cudaAddressModeBorder; tex_m2.normalized = false; tex_m2.filterMode = cudaFilterModeLinear;tex_m2.addressMode[0] = cudaAddressModeBorder; tex_m3.normalized = false; tex_m3.filterMode = cudaFilterModeLinear;tex_m3.addressMode[0] = cudaAddressModeBorder; tex_nm0.normalized = false; tex_nm0.filterMode = cudaFilterModeLinear;tex_nm0.addressMode[0] = cudaAddressModeBorder; tex_nm1.normalized = false; tex_nm1.filterMode = cudaFilterModeLinear;tex_nm1.addressMode[0] = cudaAddressModeBorder; tex_nm2.normalized = false; tex_nm2.filterMode = cudaFilterModeLinear;tex_nm2.addressMode[0] = cudaAddressModeBorder; tex_nm3.normalized = false; tex_nm3.filterMode = cudaFilterModeLinear;tex_nm3.addressMode[0] = cudaAddressModeBorder; tex_avg_m.normalized = false; tex_avg_m.filterMode = cudaFilterModeLinear;tex_avg_m.addressMode[0] = cudaAddressModeBorder; checkCudaErrors(cudaBindTexture2D(NULL, tex_m0, dev_m0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_m1, dev_m1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_m2, dev_m2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_m3, dev_m3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm0, dev_nm0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm1, dev_nm1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm2, dev_nm2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm3, dev_nm3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_avg_m, dev_avg_m, desc, MATRIX_DIM, MATRIX_DIM, pitch)); // Allocate 2D array for wall (unrolled to 1D) -- implement hash table //checkCudaErrors(cudaMalloc((void**)&dev_wall, matdim.x*matdim.y*sizeof(bool))); // x*y elements in a 1D array //checkCudaErrors(cudaMemcpy(dev_wall, host_Wall, matdim.x*matdim.y*sizeof(bool), cudaMemcpyHostToDevice)); source = 0.0f; checkCudaErrors(cudaDeviceSynchronize()); v_shared_mem_size = 2 * WWAL_DIMx * WWAL_DIMy * sizeof(float) + BLOCK_DIMx*BLOCK_DIMy*4*sizeof(float); v_p_src_m0 = dev_m0 + src_loc.y * pitch/sizeof(float) + src_loc.x; v_p_src_m1 = dev_m1 + src_loc.y * pitch/sizeof(float) + src_loc.x; v_p_src_m2 = dev_m2 + src_loc.y * pitch/sizeof(float) + src_loc.x; v_p_src_m3 = dev_m3 + src_loc.y * pitch/sizeof(float) + src_loc.x; cudaStreamCreate(&v_stream1); cudaStreamCreate(&v_stream2); cudaStreamCreate(&v_stream3); cudaStreamCreate(&v_stream4); dim3 stream_threads; dim3 stream_blocks; stream_threads.x = 1; stream_threads.y = 256; stream_threads.z = 1; stream_blocks.x = 1; stream_blocks.y = (MATRIX_DIM + stream_threads.y - 1) /stream_threads.y; //((MATRIX_DIM + BLOCK_DIMy - 1)/BLOCK_DIMy) v_pitch = pitch; v_matdim.x = matdim.x; v_matdim.y = matdim.y; v_matdim.z = matdim.z; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); /* HANDLE_ERROR( cudaGraphicsMapResources( 1, &(bitmap->resource), NULL ) ); HANDLE_ERROR( cudaGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, bitmap->resource) ); bitmap->fAnim( devPtr, bitmap->dataBlock, ticks++ ); // HANDLE_ERROR( cudaGraphicsUnmapResources( 1, &(bitmap->resource), NULL ) );*/ //cudaGraphicsMapResources( 1, &(bitmap.resource), NULL ) ; //cudaGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, bitmap.resource); ////clean_bitmap<<<(32,32,1),(16,16,1)>>>(devPtr); ////bitmap.Draw(); //cudaGraphicsUnmapResources( 1, &(bitmap.resource), NULL ); //glClearColor( 0.0, 0.0, 0.0, 1.0 ); // glClear( GL_COLOR_BUFFER_BIT ); //glutSwapBuffers(); bitmap.anim_and_exit((void(*)(uchar4*,void*,int))cPFcaller_generateFrame, (void(*)(void*))cPFcaller_display_exit); bitmap.free_resources(); } dim3 colorgrid(MATRIX_DIM,MATRIX_DIM,1); dim3 colorthreads(512/MATRIX_DIM,512/MATRIX_DIM,1); void cPFcaller_generateFrame(uchar4 * dispPixels, void*, int ticks) { static int t = 0; cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start, 0)); float source = 0.0f; for (int i = 0; i < SAMPLING; i++) { //__global__ void PF_roundscatter(float *nm0, float *nm1, float *nm2, float *nm3, size_t pitch) PF_roundscatter<<<v_grids, v_threads>>>(dev_nm0, dev_nm1, dev_nm2, dev_nm3, v_pitch); /*PF_texture_slideright<<<v_grids, v_threads, 0, v_stream1>>>(dev_nm0, v_pitch); PF_texture_slideleft<<<v_grids, v_threads, 0, v_stream2>>>(dev_nm1, v_pitch); PF_texture_slidedown<<<v_grids, v_threads, 0, v_stream3>>>(dev_nm2, v_pitch); PF_texture_slideup<<<v_grids, v_threads, 0, v_stream4>>>(dev_nm3, v_pitch);*/ //PF_padded_texture_flow<<<v_grids,v_threads,v_shared_mem_size>>>(src_loc, source, dev_wall, dev_nm0, dev_nm1, dev_nm2, dev_nm3, v_matdim, dev_WWall, dev_W, v_pitch); cudaDeviceSynchronize(); source = SRC_MAG * sin(PI * (i+t) * DELTA_LENGTH * SRC_FREQ/CT); PF_copy_withWall<<<v_grids,v_threads>>>(dev_m0, dev_m1, dev_m2, dev_m3, dev_wall, v_matdim, v_pitch, dev_src, source); add_and_average_signal<<<v_grids, v_threads>>>(v_pitch, i, dev_avg_m); //(size_t pitch, int iter, float * avg_m) cudaDeviceSynchronize(); //float source = SRC_MAG * sin(2 * PI * 1 * (float)(i+t) / SAMPLING); //float source = 1.0; /*source = SRC_MAG * sin(PI * (i+t) * DELTA_LENGTH * SRC_FREQ/CT);*/ //float zero= 0; //cudaMemcpy(v_p_src_m0, &source, sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(v_p_src_m1, &source, sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(v_p_src_m2, &source, sizeof(float), cudaMemcpyHostToDevice); ////cudaMemcpy(v_p_src_m2++, &source, sizeof(float), cudaMemcpyHostToDevice); ////cudaMemcpy(v_p_src_m2++, &source, sizeof(float), cudaMemcpyHostToDevice); ////cudaMemcpy(v_p_src_m2++, &source, sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(v_p_src_m3, &source, sizeof(float), cudaMemcpyHostToDevice); //cudaDeviceSynchronize(); } checkCudaErrors(cudaEventRecord(stop, 0)); cudaEventSynchronize(stop); //v_p_src_m2 = dev_m2 + src_loc.y * v_pitch/sizeof(float) + src_loc.x; float elapsed; cudaEventElapsedTime(&elapsed, start, stop); //printf("Time for frame: %3.1f ms \n", elapsed); t += SAMPLING; //printf("source at %d is %f\n", (t), source); if (MATRIX_DIM<512) { float_to_color_dBm_pixelate<<<colorgrid, colorthreads>>>(dispPixels, v_pitch, ticks); } else { //float_to_color_dBm<<<v_grids,v_threads>>>(dispPixels, v_pitch); float_to_color_power_dBm<<<v_grids, v_threads>>>(dispPixels, v_pitch, dev_wall); } } void cPFcaller(unsigned int num_iterations, float * &m_ptr) { gpu_iterations = num_iterations; cudaError_t status = cudaSuccess; float source = 0.0f; dim3 matdim; matdim.x = MATRIX_DIM; matdim.y = MATRIX_DIM; matdim.z = 4; dim3 threads(BLOCK_DIMx,BLOCK_DIMy,1); dim3 grids(GRID_DIMx,GRID_DIMy,1); size_t pitch; checkCudaErrors(cudaMallocPitch((void**)&dev_m0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_m1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_m2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_m3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm0, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm1, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm2, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMallocPitch((void**)&dev_nm3, &pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM)); checkCudaErrors(cudaMalloc( (void**)&dev_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float))); // WWall checkCudaErrors(cudaMalloc( (void**)&dev_W, W_DIMx*W_DIMy*sizeof(float))); // W checkCudaErrors(cudaMemset2D(dev_m0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_m1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_m2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_m3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm0, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm1, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm2, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemset2D(dev_nm3, pitch, 0, MATRIX_DIM*sizeof(float), MATRIX_DIM)); // set 0 to every BYTE checkCudaErrors(cudaMemcpy(dev_WWall, host_WWall, WWAL_DIMx*WWAL_DIMy*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_W, host_W, W_DIMx*W_DIMy*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(cW, host_W, W_DIMx*W_DIMy*sizeof(float), 0U, cudaMemcpyHostToDevice)); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); // not happy? tex_m0.normalized = false; tex_m0.filterMode = cudaFilterModeLinear; tex_m0.addressMode[0] = cudaAddressModeBorder; tex_m1.normalized = false; tex_m1.filterMode = cudaFilterModeLinear;tex_m1.addressMode[0] = cudaAddressModeBorder; tex_m2.normalized = false; tex_m2.filterMode = cudaFilterModeLinear;tex_m2.addressMode[0] = cudaAddressModeBorder; tex_m3.normalized = false; tex_m3.filterMode = cudaFilterModeLinear;tex_m3.addressMode[0] = cudaAddressModeBorder; tex_nm0.normalized = false; tex_nm0.filterMode = cudaFilterModeLinear;tex_nm0.addressMode[0] = cudaAddressModeBorder; tex_nm1.normalized = false; tex_nm1.filterMode = cudaFilterModeLinear;tex_nm1.addressMode[0] = cudaAddressModeBorder; tex_nm2.normalized = false; tex_nm2.filterMode = cudaFilterModeLinear;tex_nm2.addressMode[0] = cudaAddressModeBorder; tex_nm3.normalized = false; tex_nm3.filterMode = cudaFilterModeLinear;tex_nm3.addressMode[0] = cudaAddressModeBorder; checkCudaErrors(cudaBindTexture2D(NULL, tex_m0, dev_m0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_m1, dev_m1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_m2, dev_m2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_m3, dev_m3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm0, dev_nm0, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm1, dev_nm1, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm2, dev_nm2, desc, MATRIX_DIM, MATRIX_DIM, pitch)); checkCudaErrors(cudaBindTexture2D(NULL, tex_nm3, dev_nm3, desc, MATRIX_DIM, MATRIX_DIM, pitch)); // Allocate 2D array for wall (unrolled to 1D) -- implement hash table checkCudaErrors(cudaMalloc((void**)&dev_wall, matdim.x*matdim.y*sizeof(bool))); // x*y elements in a 1D array checkCudaErrors(cudaMemcpy(dev_wall, host_Wall, matdim.x*matdim.y*sizeof(bool), cudaMemcpyHostToDevice)); source = 0.0f; checkCudaErrors(cudaDeviceSynchronize()); int shared_mem_size = 2 * WWAL_DIMx * WWAL_DIMy * sizeof(float) + BLOCK_DIMx*BLOCK_DIMy*4*sizeof(float); float * p_src_m0 = dev_m0 + src_loc.y * pitch/sizeof(float) + src_loc.x; float * p_src_m1 = dev_m1 + src_loc.y * pitch/sizeof(float) + src_loc.y; float * p_src_m2 = dev_m2 + src_loc.y * pitch/sizeof(float) + src_loc.x; float * p_src_m3 = dev_m3 + src_loc.y * pitch/sizeof(float) + src_loc.y; cudaStream_t stream1, stream2, stream3, stream4; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); dim3 stream_threads; dim3 stream_blocks; stream_threads.x = 1; stream_threads.y = 256; stream_threads.z = 1; stream_blocks.x = 1; stream_blocks.y = (MATRIX_DIM + stream_threads.y - 1) /stream_threads.y; //((MATRIX_DIM + BLOCK_DIMy - 1)/BLOCK_DIMy) cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); clock_t t2; t2=clock(); // begin timing for (int iter = 0; iter < gpu_iterations; iter++) { source = src_amplitude * sin(2 * PI * src_frequency * (double)(iter) * 0.01); cudaMemcpy(p_src_m0, &source, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(p_src_m1, &source, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(p_src_m2, &source, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(p_src_m3, &source, sizeof(float), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); /*checkCudaErrors(cudaMemcpy(p_src_m0, &source, sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(p_src_m1, &source, sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(p_src_m2, &source, sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(p_src_m3, &source, sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaDeviceSynchronize());*/ //printf("Calculation \n"); //PF_padded_texture_flow<<<grids,threads,shared_mem_size>>>(src_loc, source, dev_wall, dev_nm0, dev_nm1, dev_nm2, dev_nm3, matdim, dev_WWall, dev_W, pitch); // PF_padded_texture_flow(dim3 srcloc, float src, bool* wallLoc, float*nm0, float*nm1, float* nm2, float* nm3, dim3 matdim, float * WWall, float *W) //PF_registers_texture_flow<<<grids,threads, (W_DIMx*W_DIMy*sizeof(float))>>>(dev_nm0, dev_nm1, dev_nm2, dev_nm3, dev_W, pitch); //__global__ void PF_registers_texture_flow(float * nm0, float * nm1, float * nm2, float * nm3, float * W, size_t pitch) //checkCudaErrors(cudaPeekAtLastError()); /*PF_texture_slideright<<<stream_blocks, stream_threads, 0, stream1>>>(dev_nm0, pitch); PF_texture_slideleft<<<stream_blocks, stream_threads, 0, stream2>>>(dev_nm1, pitch); PF_texture_slidedown<<<stream_blocks, stream_threads, 0, stream3>>>(dev_nm2, pitch); PF_texture_slideup<<<stream_blocks, stream_threads, 0, stream4>>>(dev_nm3, pitch);*/ PF_texture_slideright<<<grids, threads, 0, stream1>>>(dev_nm0, pitch); PF_texture_slideleft<<<grids, threads, 0, stream2>>>(dev_nm1, pitch); PF_texture_slidedown<<<grids, threads, 0, stream3>>>(dev_nm2, pitch); PF_texture_slideup<<<grids, threads, 0, stream4>>>(dev_nm3, pitch); //checkCudaErrors(cudaDeviceSynchronize()); cudaDeviceSynchronize(); /*printf("NM texture values \n"); testTexturesLoop<<<1,1>>>(); cudaDeviceSynchronize();*/ PF_padded_texture_copy<<<grids,threads>>>(dev_m0, dev_m1, dev_m2, dev_m3, matdim, pitch); cudaDeviceSynchronize(); /*printf("M texture values \n"); testTexturesLoop<<<1,1>>>(); cudaDeviceSynchronize();*/ } cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedtime; cudaEventElapsedTime(&elapsedtime, start, stop); printf("CUDA measured: %3.1f ms \n", elapsedtime); cudaEventDestroy(start); cudaEventDestroy(stop); long int final=clock()-t2; printf("GPU iterations took %li ticks (%f seconds) \n", final, ((float)final)/CLOCKS_PER_SEC); m_host = (float *)malloc(sizeof(float)*MATRIX_DIM*MATRIX_DIM); m_ptr = m_host; // So that the class can access M values //checkCudaErrors(cudaMemcpy(m_host, dev_m0, MATRIX_DIM*MATRIX_DIM*sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy2D(m_host, MATRIX_DIM*sizeof(float), dev_m0, pitch, MATRIX_DIM*sizeof(float), MATRIX_DIM, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); //status = cudaMemcpy3D(&hm_p); //if (status != cudaSuccess){printf("Uhoh: %s \n", cudaGetErrorString(status));} // Free all allocated memory (move into separate delete function later) cudaFree(dev_m0); cudaFree(dev_m1); cudaFree(dev_m2); cudaFree(dev_m3); cudaFree(dev_nm0); cudaFree(dev_nm1); cudaFree(dev_nm2); cudaFree(dev_nm3); cudaFree(dev_WWall); cudaFree(dev_W); cudaUnbindTexture(tex_m0); cudaUnbindTexture(tex_m1); cudaUnbindTexture(tex_m2); cudaUnbindTexture(tex_m3); cudaUnbindTexture(tex_nm0); cudaUnbindTexture(tex_nm1); cudaUnbindTexture(tex_nm2); cudaUnbindTexture(tex_nm3); cudaUnbindTexture(tex_WWall); cudaUnbindTexture(tex_W); //cudaFree(m_device.ptr); //cudaFree(nm_device.ptr); //cudaFree(dev_wall); //cudaFree(dev_WWall); //cudaFree(dev_W); } using namespace cv; void cPFinit(float matrixFlow[][4], float matrixWall[][4], float in_sourceLoc[]) { // Initialize some values coef = 1; src_amplitude = 1.0; src_frequency = 1.0; Mat image; image = imread("test.bmp", CV_LOAD_IMAGE_GRAYSCALE); if(! image.data ) // Check for invalid input { printf("invalid file :( \n"); return; } //namedWindow("Imported environment", WINDOW_AUTOSIZE); //imshow("Imported environment", image); //printf("Image has %d columns and %d rows \n", image.cols, image.rows); host_Wall = (byte *)malloc(sizeof(byte)*MATRIX_DIM*MATRIX_DIM); memset(host_Wall, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte)); host_src = (byte*) malloc(sizeof(byte)*MATRIX_DIM*MATRIX_DIM); memset(host_src, 0, MATRIX_DIM*MATRIX_DIM*sizeof(byte)); for (int r = 0; r < image.rows ; r++) { for (int c = 0; c < image.cols; c++) { if (image.at<uchar>(r, c) == 0) // 0 is black, { //host_Wall[c + r * MATRIX_DIM] = 1; host_Wall[c + (MATRIX_DIM - 1 - r) * MATRIX_DIM] = 1; } //else //{ // host_Wall[c + r * MATRIX_DIM] = 1; // //printf("wall at %d, %d\n", c, r); //} } } host_WWall = (float *)malloc(sizeof(float)*WWAL_DIMx*WWAL_DIMy); host_W = (float *)malloc(sizeof(float)*W_DIMx*W_DIMy); for (int y = 0; y < WWAL_DIMy; y++) { for (int x = 0; x < WWAL_DIMx; x++) { host_WWall[x+y*WWAL_DIMx] = matrixWall[x][y]* (coef/2.0); host_W[x+y*W_DIMx] = matrixFlow[x][y]* (coef/2.0); } } // copy source loc: //src_loc.x = in_sourceLoc[0]; //src_loc.y = in_sourceLoc[1]; } void cPFaddWallLocation(int x, int y, bool val) { if (host_Wall != NULL) host_Wall[x+y*MATRIX_DIM] = val; } void cPFdelete(void) { ///*if (host_W != NULL) */free(host_W); ///*if (host_WWall != NULL) */free(host_WWall); ///*if (host_Wall != NULL) */free(host_Wall); //free(m_host); }
487e353b57c5369869bd8a25a239a901f976fb0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Utilities and system includes #include <helper_cuda.h> #ifndef USE_TEXTURE_RGBA8UI texture<float4, 2, hipReadModeElementType> inTex; #else texture<uchar4, 2, hipReadModeElementType> inTex; #endif // clamp x to range [a, b] __device__ float clamp(float x, float a, float b) { return max(a, min(b, x)); } __device__ int clamp(int x, int a, int b) { return max(a, min(b, x)); } // convert floating point rgb color to 8-bit integer __device__ int rgbToInt(float r, float g, float b) { r = clamp(r, 0.0f, 255.0f); g = clamp(g, 0.0f, 255.0f); b = clamp(b, 0.0f, 255.0f); return (int(b)<<16) | (int(g)<<8) | int(r); } // get pixel from 2D image, with clamping to border __device__ uchar4 getPixel(int x, int y) { #ifndef USE_TEXTURE_RGBA8UI float4 res = tex2D(inTex, x, y); uchar4 ucres = make_uchar4(res.x*255.0f, res.y*255.0f, res.z*255.0f, res.w*255.0f); #else uchar4 ucres = tex2D(inTex, x, y); #endif return ucres; } // macros to make indexing shared memory easier #define SMEM(X, Y) sdata[(Y)*tilew+(X)] /* 2D convolution using shared memory - operates on 8-bit RGB data stored in 32-bit int - assumes kernel radius is less than or equal to block size - not optimized for performance _____________ | : : | |_ _:_____:_ _| | | | | | | | | |_ _|_____|_ _| r | : : | |___:_____:___| r bw r <----tilew----> */ //__device__ unsigned int *g_odataCopy = NULL; __global__ void cudaProcess(unsigned int *g_odata, int imgw, int imgh, int tilew, int r, float threshold, float highlight, int motionBlur, unsigned int * g_odataCopy) { extern __shared__ uchar4 sdata[]; int tx = threadIdx.x; int ty = threadIdx.y; int bw = blockDim.x; int bh = blockDim.y; int x = blockIdx.x*bw + tx; int y = blockIdx.y*bh + ty; #if 0 uchar4 c4 = getPixel(x, y); g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x); #else // copy tile to shared memory // center region SMEM(r + tx, r + ty) = getPixel(x, y); // borders if (threadIdx.x < r) { // left SMEM(tx, r + ty) = getPixel(x - r, y); // right SMEM(r + bw + tx, r + ty) = getPixel(x + bw, y); } if (threadIdx.y < r) { // top SMEM(r + tx, ty) = getPixel(x, y - r); // bottom SMEM(r + tx, r + bh + ty) = getPixel(x, y + bh); } // load corners if ((threadIdx.x < r) && (threadIdx.y < r)) { // tl SMEM(tx, ty) = getPixel(x - r, y - r); // bl SMEM(tx, r + bh + ty) = getPixel(x - r, y + bh); // tr SMEM(r + bw + tx, ty) = getPixel(x + bh, y - r); // br SMEM(r + bw + tx, r + bh + ty) = getPixel(x + bw, y + bh); } // wait for loads to complete __syncthreads(); // perform convolution float rsum = 0.0f; float gsum = 0.0f; float bsum = 0.0f; float samples = 0.0f; for (int dy=-r; dy<=r; dy++) { for (int dx=-r; dx<=r; dx++) { #if 0 // try this to see the benefit of using shared memory uchar4 pixel = getPixel(x+dx, y+dy); #else uchar4 pixel = SMEM(r+tx+dx, r+ty+dy); #endif // only sum pixels within disc-shaped kernel float l = dx*dx + dy*dy; if (l <= r*r) { float r = float(pixel.x); float g = float(pixel.y); float b = float(pixel.z); #if 1 // brighten highlights float lum = (r + g + b) / (255*3); if (lum > threshold) { r *= highlight; g *= highlight; b *= highlight; } #endif rsum += r; gsum += g; bsum += b; samples += 1.0f; } } } rsum /= samples; gsum /= samples; bsum /= samples; // ABGR g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum)+0.05*motionBlur*g_odataCopy[y*imgw+x]; g_odataCopy[y*imgw+x] = g_odata[y*imgw+x]; //g_odata[y*imgw+x] = rgbToInt(x,y,0); #endif } extern "C" void launch_cudaProcess(dim3 grid, dim3 block, int sbytes, hipArray *g_data_array, unsigned int *g_odata, int imgw, int imgh, int tilew, int radius, float threshold, float highlight, int motionBlur, unsigned int*g_odataCopy) { /*if (!g_odataCopy) { const size_t sz = 512 * sizeof(unsigned int); unsigned int *g_odataTemp; hipMalloc((void **)&g_odataTemp, sz); hipMemcpyToSymbol("g_odataCopy", &g_odataTemp, sizeof(unsigned int *), size_t(0),hipMemcpyHostToDevice); }*/ checkCudaErrors(hipBindTextureToArray(inTex, g_data_array)); struct hipChannelFormatDesc desc; checkCudaErrors(hipGetChannelDesc(&desc, g_data_array)); #if 0 printf("CUDA Array channel descriptor, bits per component:\n"); printf("X %d Y %d Z %d W %d, kind %d\n", desc.x,desc.y,desc.z,desc.w,desc.f); printf("Possible values for channel format kind: i %d, u%d, f%d:\n", hipChannelFormatKindSigned, hipChannelFormatKindUnsigned, hipChannelFormatKindFloat); #endif //printf("\n"); #ifdef GPU_PROFILING StopWatchInterface *timer = 0; sdkCreateTimer(&timer); int nIter = 30; for (int i = -1; i < nIter; ++i) { if (i == 0) { sdkStartTimer(&timer); } #endif hipLaunchKernelGGL(( cudaProcess), dim3(grid), dim3(block), sbytes , 0, g_odata, imgw, imgh, block.x+(2*radius), radius, 0.8f, 4.0f, motionBlur, g_odataCopy); #ifdef GPU_PROFILING } hipDeviceSynchronize(); sdkStopTimer(&timer); double dSeconds = sdkGetTimerValue(&timer)/((double)nIter * 1000.0); double dNumTexels = (double)imgw * (double)imgh; double mtexps = 1.0e-6 * dNumTexels/dSeconds; if (radius == 4) { printf("\n"); printf("postprocessGL, Throughput = %.4f MTexels/s, Time = %.5f s, Size = %.0f Texels, NumDevsUsed = %d, Workgroup = %u\n", mtexps, dSeconds, dNumTexels, 1, block.x * block.y); } #endif }
487e353b57c5369869bd8a25a239a901f976fb0f.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Utilities and system includes #include <helper_cuda.h> #ifndef USE_TEXTURE_RGBA8UI texture<float4, 2, cudaReadModeElementType> inTex; #else texture<uchar4, 2, cudaReadModeElementType> inTex; #endif // clamp x to range [a, b] __device__ float clamp(float x, float a, float b) { return max(a, min(b, x)); } __device__ int clamp(int x, int a, int b) { return max(a, min(b, x)); } // convert floating point rgb color to 8-bit integer __device__ int rgbToInt(float r, float g, float b) { r = clamp(r, 0.0f, 255.0f); g = clamp(g, 0.0f, 255.0f); b = clamp(b, 0.0f, 255.0f); return (int(b)<<16) | (int(g)<<8) | int(r); } // get pixel from 2D image, with clamping to border __device__ uchar4 getPixel(int x, int y) { #ifndef USE_TEXTURE_RGBA8UI float4 res = tex2D(inTex, x, y); uchar4 ucres = make_uchar4(res.x*255.0f, res.y*255.0f, res.z*255.0f, res.w*255.0f); #else uchar4 ucres = tex2D(inTex, x, y); #endif return ucres; } // macros to make indexing shared memory easier #define SMEM(X, Y) sdata[(Y)*tilew+(X)] /* 2D convolution using shared memory - operates on 8-bit RGB data stored in 32-bit int - assumes kernel radius is less than or equal to block size - not optimized for performance _____________ | : : | |_ _:_____:_ _| | | | | | | | | |_ _|_____|_ _| r | : : | |___:_____:___| r bw r <----tilew----> */ //__device__ unsigned int *g_odataCopy = NULL; __global__ void cudaProcess(unsigned int *g_odata, int imgw, int imgh, int tilew, int r, float threshold, float highlight, int motionBlur, unsigned int * g_odataCopy) { extern __shared__ uchar4 sdata[]; int tx = threadIdx.x; int ty = threadIdx.y; int bw = blockDim.x; int bh = blockDim.y; int x = blockIdx.x*bw + tx; int y = blockIdx.y*bh + ty; #if 0 uchar4 c4 = getPixel(x, y); g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x); #else // copy tile to shared memory // center region SMEM(r + tx, r + ty) = getPixel(x, y); // borders if (threadIdx.x < r) { // left SMEM(tx, r + ty) = getPixel(x - r, y); // right SMEM(r + bw + tx, r + ty) = getPixel(x + bw, y); } if (threadIdx.y < r) { // top SMEM(r + tx, ty) = getPixel(x, y - r); // bottom SMEM(r + tx, r + bh + ty) = getPixel(x, y + bh); } // load corners if ((threadIdx.x < r) && (threadIdx.y < r)) { // tl SMEM(tx, ty) = getPixel(x - r, y - r); // bl SMEM(tx, r + bh + ty) = getPixel(x - r, y + bh); // tr SMEM(r + bw + tx, ty) = getPixel(x + bh, y - r); // br SMEM(r + bw + tx, r + bh + ty) = getPixel(x + bw, y + bh); } // wait for loads to complete __syncthreads(); // perform convolution float rsum = 0.0f; float gsum = 0.0f; float bsum = 0.0f; float samples = 0.0f; for (int dy=-r; dy<=r; dy++) { for (int dx=-r; dx<=r; dx++) { #if 0 // try this to see the benefit of using shared memory uchar4 pixel = getPixel(x+dx, y+dy); #else uchar4 pixel = SMEM(r+tx+dx, r+ty+dy); #endif // only sum pixels within disc-shaped kernel float l = dx*dx + dy*dy; if (l <= r*r) { float r = float(pixel.x); float g = float(pixel.y); float b = float(pixel.z); #if 1 // brighten highlights float lum = (r + g + b) / (255*3); if (lum > threshold) { r *= highlight; g *= highlight; b *= highlight; } #endif rsum += r; gsum += g; bsum += b; samples += 1.0f; } } } rsum /= samples; gsum /= samples; bsum /= samples; // ABGR g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum)+0.05*motionBlur*g_odataCopy[y*imgw+x]; g_odataCopy[y*imgw+x] = g_odata[y*imgw+x]; //g_odata[y*imgw+x] = rgbToInt(x,y,0); #endif } extern "C" void launch_cudaProcess(dim3 grid, dim3 block, int sbytes, cudaArray *g_data_array, unsigned int *g_odata, int imgw, int imgh, int tilew, int radius, float threshold, float highlight, int motionBlur, unsigned int*g_odataCopy) { /*if (!g_odataCopy) { const size_t sz = 512 * sizeof(unsigned int); unsigned int *g_odataTemp; cudaMalloc((void **)&g_odataTemp, sz); cudaMemcpyToSymbol("g_odataCopy", &g_odataTemp, sizeof(unsigned int *), size_t(0),cudaMemcpyHostToDevice); }*/ checkCudaErrors(cudaBindTextureToArray(inTex, g_data_array)); struct cudaChannelFormatDesc desc; checkCudaErrors(cudaGetChannelDesc(&desc, g_data_array)); #if 0 printf("CUDA Array channel descriptor, bits per component:\n"); printf("X %d Y %d Z %d W %d, kind %d\n", desc.x,desc.y,desc.z,desc.w,desc.f); printf("Possible values for channel format kind: i %d, u%d, f%d:\n", cudaChannelFormatKindSigned, cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat); #endif //printf("\n"); #ifdef GPU_PROFILING StopWatchInterface *timer = 0; sdkCreateTimer(&timer); int nIter = 30; for (int i = -1; i < nIter; ++i) { if (i == 0) { sdkStartTimer(&timer); } #endif cudaProcess<<< grid, block, sbytes >>>(g_odata, imgw, imgh, block.x+(2*radius), radius, 0.8f, 4.0f, motionBlur, g_odataCopy); #ifdef GPU_PROFILING } cudaDeviceSynchronize(); sdkStopTimer(&timer); double dSeconds = sdkGetTimerValue(&timer)/((double)nIter * 1000.0); double dNumTexels = (double)imgw * (double)imgh; double mtexps = 1.0e-6 * dNumTexels/dSeconds; if (radius == 4) { printf("\n"); printf("postprocessGL, Throughput = %.4f MTexels/s, Time = %.5f s, Size = %.0f Texels, NumDevsUsed = %d, Workgroup = %u\n", mtexps, dSeconds, dNumTexels, 1, block.x * block.y); } #endif }
f884586f58b3699af69109340682f49f2e2a5dae.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "backProp2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *layer1 = NULL; hipMalloc(&layer1, XSIZE*YSIZE); float *dsyn2 = NULL; hipMalloc(&dsyn2, XSIZE*YSIZE); float *label = NULL; hipMalloc(&label, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( backProp2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer1,dsyn2,label,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( backProp2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer1,dsyn2,label,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( backProp2), dim3(gridBlock),dim3(threadBlock), 0, 0, layer1,dsyn2,label,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f884586f58b3699af69109340682f49f2e2a5dae.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "backProp2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *layer1 = NULL; cudaMalloc(&layer1, XSIZE*YSIZE); float *dsyn2 = NULL; cudaMalloc(&dsyn2, XSIZE*YSIZE); float *label = NULL; cudaMalloc(&label, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); backProp2<<<gridBlock,threadBlock>>>(layer1,dsyn2,label,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { backProp2<<<gridBlock,threadBlock>>>(layer1,dsyn2,label,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { backProp2<<<gridBlock,threadBlock>>>(layer1,dsyn2,label,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3e484d554bf598acdcc5d91c86a2640d4b6fac15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <infiniband/verbs.h> #include <sys/types.h> #include <sys/socket.h> #include <string.h> #include <assert.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #include "common.h" ///////////////////////////////////////////////// DO NOT CHANGE /////////////////////////////////////// #define TCP_PORT_OFFSET 23456 #define TCP_PORT_RANGE 1000 #define SINGLE_QUEUE_SIZE 10 #define CUDA_CHECK(f) do { \ hipError_t e = f; \ if (e != hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e)); \ exit(1); \ } \ } while (0) __device__ int arr_min(int arr[], int arr_size) { int tid = threadIdx.x; int rhs, lhs; for (int stride = 1; stride < arr_size; stride *= 2) { if (tid >= stride && tid < arr_size) { rhs = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { lhs = arr[tid]; if (rhs != 0) { if (lhs == 0) arr[tid] = rhs; else arr[tid] = min(arr[tid], rhs); } } __syncthreads(); } int ret = arr[arr_size - 1]; return ret; } __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } __device__ void gpu_process_image_device(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { out[i] = map[in[i]]; } return; } __global__ void gpu_process_image(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { out[i] = map[in[i]]; } return; } /* TODO: copy queue-based GPU kernel from hw2 */ ///////////// GPU ///////////////// __device__ void dequeue_request(volatile uchar* queue, volatile int* flags, uchar* image_out, int* image_id){ int tid = threadIdx.x; __shared__ int index; if(tid==0){ index = -1; for(int i=0; i<SINGLE_QUEUE_SIZE; i++){ if(flags[i] != -1){ index = i; *image_id = flags[i]; break; } } } __syncthreads(); __threadfence_system(); if(index != -1){ for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x){ image_out[i] = queue[index*SQR(IMG_DIMENSION)+i]; } } __syncthreads(); __threadfence_system(); if(tid==0 && index != -1){ flags[index] = -1; } __threadfence_system(); } __device__ void enqueue_response(volatile uchar* queue, volatile int* flags, uchar* image_out, int image_id){ int tid = threadIdx.x; __shared__ int index; if(tid==0){ index = -1; for(int i=0; index==-1; i=(i+1)%SINGLE_QUEUE_SIZE){ if(flags[i] == -1){ index = i; } } } __syncthreads(); __threadfence_system(); if(index != -1){ for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x){ queue[index*SQR(IMG_DIMENSION)+i] = image_out[i] ; } } __syncthreads(); __threadfence_system(); if(tid==0 && index != -1){ flags[index] = image_id; } __syncthreads(); __threadfence_system(); } __global__ void test_kernel(volatile uchar* cpu_gpu_queue, volatile int* cpu_gpu_flags, volatile uchar* gpu_cpu_queue, volatile int* gpu_cpu_flags, volatile int* running){ int tid = threadIdx.x; int bid = blockIdx.x; int num_blocks = gridDim.x; __shared__ uchar image_in[SQR(IMG_DIMENSION)]; __shared__ uchar image_out[SQR(IMG_DIMENSION)]; __shared__ int image_id; __shared__ int queue_index; __shared__ int flags_index; __shared__ bool started; if(tid==0){ started = false; queue_index = bid * SINGLE_QUEUE_SIZE * SQR(IMG_DIMENSION); flags_index = bid * SINGLE_QUEUE_SIZE; } __syncthreads(); __threadfence_system(); while(*running < num_blocks+1){ if(tid==0){ if(!started){ started = true; atomicAdd((int*)running, 1); } image_id = -1; } __syncthreads(); __threadfence_system(); dequeue_request(cpu_gpu_queue + queue_index, cpu_gpu_flags + flags_index, (uchar*)image_in, &image_id); __syncthreads(); __threadfence_system(); if(image_id != -1){ gpu_process_image_device((uchar*)image_in, (uchar*)image_out); } __syncthreads(); __threadfence_system(); if(image_id != -1){ enqueue_response(gpu_cpu_queue + queue_index, gpu_cpu_flags + flags_index, (uchar*)image_out, image_id); } __syncthreads(); __threadfence_system(); } } /* TODO: end */ void process_image_on_gpu(uchar *img_in, uchar *img_out) { uchar *gpu_image_in, *gpu_image_out; CUDA_CHECK(hipMalloc(&gpu_image_in, SQR(IMG_DIMENSION))); CUDA_CHECK(hipMalloc(&gpu_image_out, SQR(IMG_DIMENSION))); CUDA_CHECK(hipMemcpy(gpu_image_in, img_in, SQR(IMG_DIMENSION), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_process_image), dim3(1), dim3(1024), 0, 0, gpu_image_in, gpu_image_out); CUDA_CHECK(hipMemcpy(img_out, gpu_image_out, SQR(IMG_DIMENSION), hipMemcpyDeviceToHost)); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipFree(gpu_image_in)); CUDA_CHECK(hipFree(gpu_image_out)); } void print_usage_and_die(char *progname) { printf("usage: [port]\n"); exit(1); } struct server_context { mode_enum mode; int tcp_port; int listen_fd; /* Listening socket for TCP connection */ int socket_fd; /* Connected socket for TCP connection */ rpc_request *requests; /* Array of outstanding requests received from the network */ uchar *images_in; /* Input images for all outstanding requests */ uchar *images_out; /* Output images for all outstanding requests */ /* InfiniBand/verbs resources */ struct ibv_context *context; struct ibv_cq *cq; struct ibv_pd *pd; struct ibv_qp *qp; struct ibv_mr *mr_requests; /* Memory region for RPC requests */ struct ibv_mr *mr_images_in; /* Memory region for input images */ struct ibv_mr *mr_images_out; /* Memory region for output images */ /* TODO: add pointers and memory region(s) for CPU-GPU queues */ volatile uchar *cpu_gpu_queue; volatile uchar *gpu_cpu_queue; volatile int *cpu_gpu_flags; volatile int *gpu_cpu_flags; volatile int *running; struct ibv_mr *mr_cpu_gpu_queue; struct ibv_mr *mr_gpu_cpu_queue; struct ibv_mr *mr_cpu_gpu_flags; struct ibv_mr *mr_gpu_cpu_flags; struct ibv_mr *mr_running; }; int max_thread_blocks(int threads_num){ struct hipDeviceProp_t devProp; CUDA_CHECK(hipGetDeviceProperties(&devProp, 0)); int regs_per_thread = 32; int threads_per_threadblock = threads_num; int shared_mem_per_threadblock = sizeof(uchar)*SINGLE_QUEUE_SIZE*SQR(IMG_DIMENSION); int bound1 = devProp.sharedMemPerMultiprocessor/shared_mem_per_threadblock; int bound2 = devProp.sharedMemPerMultiprocessor/shared_mem_per_threadblock; int bound3 = devProp.regsPerMultiprocessor/regs_per_thread/threads_per_threadblock; int tmp = bound1 < bound2 ? bound1 : bound2; int min = tmp < bound3 ? tmp : bound3; int max_threadblocks = devProp.multiProcessorCount * min; return max_threadblocks; } void allocate_memory(server_context *ctx) { CUDA_CHECK(hipHostMalloc(&ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); CUDA_CHECK(hipHostMalloc(&ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); ctx->requests = (rpc_request *)calloc(OUTSTANDING_REQUESTS, sizeof(rpc_request)); /* TODO take CPU-GPU stream allocation code from hw2 */ int num_blocks = max_thread_blocks(1024); size_t queue_size_bytes = num_blocks * SINGLE_QUEUE_SIZE * SQR(IMG_DIMENSION) * sizeof(uchar); size_t flags_size_bytes = num_blocks * SINGLE_QUEUE_SIZE * sizeof(int); CUDA_CHECK(hipHostMalloc(&ctx->running, sizeof(int), hipHostMallocMapped)); CUDA_CHECK(hipHostMalloc(&ctx->cpu_gpu_queue, queue_size_bytes, hipHostMallocMapped)); CUDA_CHECK(hipHostMalloc(&ctx->gpu_cpu_queue, queue_size_bytes, hipHostMallocMapped)); CUDA_CHECK(hipHostMalloc(&ctx->cpu_gpu_flags, flags_size_bytes, hipHostMallocMapped)); CUDA_CHECK(hipHostMalloc(&ctx->gpu_cpu_flags, flags_size_bytes, hipHostMallocMapped)); *(ctx->running) = 0; for(int i=0; i<num_blocks * SINGLE_QUEUE_SIZE; i++){ ctx->cpu_gpu_flags[i] = -1; ctx->gpu_cpu_flags[i] = -1; } __sync_synchronize(); } void tcp_connection(server_context *ctx) { /* setup a TCP connection for initial negotiation with client */ int lfd = socket(AF_INET, SOCK_STREAM, 0); if (lfd < 0) { perror("socket"); exit(1); } ctx->listen_fd = lfd; struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(struct sockaddr_in)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = INADDR_ANY; server_addr.sin_port = htons(ctx->tcp_port); if (bind(lfd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in)) < 0) { perror("bind"); exit(1); } if (listen(lfd, 1)) { perror("listen"); exit(1); } printf("Server waiting on port %d. Client can connect\n", ctx->tcp_port); int sfd = accept(lfd, NULL, NULL); if (sfd < 0) { perror("accept"); exit(1); } printf("client connected\n"); ctx->socket_fd = sfd; } void initialize_verbs(server_context *ctx) { /* get device list */ struct ibv_device **device_list = ibv_get_device_list(NULL); if (!device_list) { printf("ERROR: ibv_get_device_list failed\n"); exit(1); } /* select first (and only) device to work with */ ctx->context = ibv_open_device(device_list[0]); /* create protection domain (PD) */ ctx->pd = ibv_alloc_pd(ctx->context); if (!ctx->pd) { printf("ERROR: ibv_alloc_pd() failed\n"); exit(1); } ctx->mr_requests = ibv_reg_mr(ctx->pd, ctx->requests, sizeof(rpc_request) * OUTSTANDING_REQUESTS, IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_requests) { printf("ibv_reg_mr() failed for requests\n"); exit(1); } ctx->mr_images_in = ibv_reg_mr(ctx->pd, ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_in) { printf("ibv_reg_mr() failed for input images\n"); exit(1); } ctx->mr_images_out = ibv_reg_mr(ctx->pd, ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_out) { printf("ibv_reg_mr() failed for output images\n"); exit(1); } /* TODO register additional memory regions for CPU-GPU queues */ int thread_blocks_num = max_thread_blocks(1024); ctx->mr_running = ibv_reg_mr(ctx->pd, (void*)ctx->running, sizeof(int), IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_running) { printf("ibv_reg_mr() failed for running\n"); exit(1); } ctx->mr_cpu_gpu_queue = ibv_reg_mr(ctx->pd, (void*)ctx->cpu_gpu_queue, sizeof(uchar) * SQR(IMG_DIMENSION) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_cpu_gpu_queue) { printf("ibv_reg_mr() failed for cpu_gpu_queue\n"); exit(1); } ctx->mr_cpu_gpu_flags = ibv_reg_mr(ctx->pd, (void*)ctx->cpu_gpu_flags, sizeof(int) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_cpu_gpu_flags) { printf("ibv_reg_mr() failed for cpu_gpu_flags\n"); exit(1); } ctx->mr_gpu_cpu_queue = ibv_reg_mr(ctx->pd, (void*)ctx->gpu_cpu_queue, sizeof(uchar) * SQR(IMG_DIMENSION) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_gpu_cpu_queue) { printf("ibv_reg_mr() failed for gpu_cpu_queue\n"); exit(1); } ctx->mr_gpu_cpu_flags = ibv_reg_mr(ctx->pd, (void*)ctx->gpu_cpu_flags, sizeof(int) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_gpu_cpu_flags) { printf("ibv_reg_mr() failed for gpu_cpu_flags\n"); exit(1); } /* create completion queue (CQ). We'll use same CQ for both send and receive parts of the QP */ ctx->cq = ibv_create_cq(ctx->context, 2 * OUTSTANDING_REQUESTS, NULL, NULL, 0); /* create a CQ with place for two completions per request */ if (!ctx->cq) { printf("ERROR: ibv_create_cq() failed\n"); exit(1); } /* create QP */ struct ibv_qp_init_attr qp_init_attr; memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr)); qp_init_attr.send_cq = ctx->cq; qp_init_attr.recv_cq = ctx->cq; qp_init_attr.qp_type = IBV_QPT_RC; /* we'll use RC transport service, which supports RDMA */ qp_init_attr.cap.max_send_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in SQ per request. that's enough for us */ qp_init_attr.cap.max_recv_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in RQ per request. that's enough for us */ qp_init_attr.cap.max_send_sge = 1; /* 1 SGE in each send WQE */ qp_init_attr.cap.max_recv_sge = 1; /* 1 SGE in each recv WQE */ ctx->qp = ibv_create_qp(ctx->pd, &qp_init_attr); if (!ctx->qp) { printf("ERROR: ibv_create_qp() failed\n"); exit(1); } } void exchange_parameters(server_context *ctx, ib_info_t *client_info) { /* ok, before we continue we need to get info about the client' QP, and send it info about ours. * namely: QP number, and LID. * we'll use the TCP socket for that */ /* first query port for its LID (L2 address) */ int ret; struct ibv_port_attr port_attr; ret = ibv_query_port(ctx->context, IB_PORT_SERVER, &port_attr); if (ret) { printf("ERROR: ibv_query_port() failed\n"); exit(1); } /* now send our info to client */ struct ib_info_t my_info; my_info.lid = port_attr.lid; my_info.qpn = ctx->qp->qp_num; /* TODO add additional server rkeys / addresses here if needed */ my_info.blocks_num = max_thread_blocks(1024); my_info.rkey_running = (int)ctx->mr_running->rkey; my_info.addr_running = (uint64_t)ctx->mr_running->addr; my_info.rkey_gpu_cpu_queue = (int)ctx->mr_gpu_cpu_queue->rkey; my_info.addr_gpu_cpu_queue = (uint64_t)ctx->mr_gpu_cpu_queue->addr; my_info.rkey_cpu_gpu_queue = (int)ctx->mr_cpu_gpu_queue->rkey; my_info.addr_cpu_gpu_queue = (uint64_t)ctx->mr_cpu_gpu_queue->addr; my_info.rkey_gpu_cpu_flags = (int)ctx->mr_gpu_cpu_flags->rkey; my_info.addr_gpu_cpu_flags = (uint64_t)ctx->mr_gpu_cpu_flags->addr; my_info.rkey_cpu_gpu_flags = (int)ctx->mr_cpu_gpu_flags->rkey; my_info.addr_cpu_gpu_flags = (uint64_t)ctx->mr_cpu_gpu_flags->addr; ret = send(ctx->socket_fd, &my_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("send"); exit(1); } /* get client's info */ recv(ctx->socket_fd, client_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("recv"); exit(1); } /* we don't need TCP anymore. kill the socket */ close(ctx->socket_fd); close(ctx->listen_fd); ctx->socket_fd = ctx->listen_fd = 0; } /* Post a receive buffer of the given index (from the requests array) to the receive queue */ void post_recv(server_context *ctx, int index) { struct ibv_recv_wr recv_wr = {}; /* this is the receive work request (the verb's representation for receive WQE) */ ibv_sge sgl; recv_wr.wr_id = index; sgl.addr = (uintptr_t)&ctx->requests[index]; sgl.length = sizeof(ctx->requests[0]); sgl.lkey = ctx->mr_requests->lkey; recv_wr.sg_list = &sgl; recv_wr.num_sge = 1; if (ibv_post_recv(ctx->qp, &recv_wr, NULL)) { printf("ERROR: ibv_post_recv() failed\n"); exit(1); } } void connect_qp(server_context *ctx, ib_info_t *client_info) { /* this is a multi-phase process, moving the state machine of the QP step by step * until we are ready */ struct ibv_qp_attr qp_attr; /*QP state: RESET -> INIT */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_INIT; qp_attr.pkey_index = 0; qp_attr.port_num = IB_PORT_SERVER; qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; /* we'll allow client to RDMA write and read on this QP */ int ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS); if (ret) { printf("ERROR: ibv_modify_qp() to INIT failed\n"); exit(1); } /*QP: state: INIT -> RTR (Ready to Receive) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTR; qp_attr.path_mtu = IBV_MTU_4096; qp_attr.dest_qp_num = client_info->qpn; /* qp number of client */ qp_attr.rq_psn = 0 ; qp_attr.max_dest_rd_atomic = 1; /* max in-flight RDMA reads */ qp_attr.min_rnr_timer = 12; qp_attr.ah_attr.is_global = 0; /* No Network Layer (L3) */ qp_attr.ah_attr.dlid = client_info->lid; /* LID (L2 Address) of client */ qp_attr.ah_attr.sl = 0; qp_attr.ah_attr.src_path_bits = 0; qp_attr.ah_attr.port_num = IB_PORT_SERVER; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER); if (ret) { printf("ERROR: ibv_modify_qp() to RTR failed\n"); exit(1); } /*QP: state: RTR -> RTS (Ready to Send) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTS; qp_attr.sq_psn = 0; qp_attr.timeout = 14; qp_attr.retry_cnt = 7; qp_attr.rnr_retry = 7; qp_attr.max_rd_atomic = 1; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC); if (ret) { printf("ERROR: ibv_modify_qp() to RTS failed\n"); exit(1); } /* now let's populate the receive QP with recv WQEs */ for (int i = 0; i < OUTSTANDING_REQUESTS; i++) { post_recv(ctx, i); } } void event_loop(server_context *ctx) { /* so the protocol goes like this: * 1. we'll wait for a CQE indicating that we got an Send request from the client. * this tells us we have new work to do. The wr_id we used in post_recv tells us * where the request is. * 2. now we send an RDMA Read to the client to retrieve the request. * we will get a completion indicating the read has completed. * 3. we process the request on the GPU. * 4. upon completion, we send an RDMA Write with immediate to the client with * the results. */ struct ibv_send_wr send_wr; struct ibv_send_wr *bad_send_wr; rpc_request* req; uchar *img_in; uchar *img_out; ibv_sge sgl; bool terminate = false; while (!terminate) { /*step 1: poll for CQE */ struct ibv_wc wc; int ncqes; do { ncqes = ibv_poll_cq(ctx->cq, 1, &wc); } while (ncqes == 0); if (ncqes < 0) { printf("ERROR: ibv_poll_cq() failed\n"); exit(1); } if (wc.status != IBV_WC_SUCCESS) { printf("ERROR: got CQE with error '%s' (%d) (line %d)\n", ibv_wc_status_str(wc.status), wc.status, __LINE__); exit(1); } switch (wc.opcode) { case IBV_WC_RECV: /* Received a new request from the client */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; /* Terminate signal */ if (req->request_id == -1) { printf("Terminating...\n"); terminate = true; break; } if (ctx->mode != MODE_RPC_SERVER) { printf("Got client RPC request when running in queue mode.\n"); exit(1); } /* send RDMA Read to client to read the input */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; sgl.addr = (uintptr_t)img_in; sgl.length = req->input_length; sgl.lkey = ctx->mr_images_in->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_READ; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->input_addr; send_wr.wr.rdma.rkey = req->input_rkey; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_READ: /* Completed RDMA read for a request */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; img_out = &ctx->images_out[wc.wr_id * SQR(IMG_DIMENSION)]; process_image_on_gpu(img_in, img_out); /* send RDMA Write with immediate to client with the response */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; ibv_sge sgl; sgl.addr = (uintptr_t)img_out; sgl.length = req->output_length; sgl.lkey = ctx->mr_images_out->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->output_addr; send_wr.wr.rdma.rkey = req->output_rkey; send_wr.imm_data = req->request_id; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_WRITE: /* Completed RDMA Write - reuse buffers for receiving the next requests */ post_recv(ctx, wc.wr_id); break; default: printf("Unexpected completion\n"); assert(false); } } } void teardown_context(server_context *ctx) { /* cleanup */ ibv_destroy_qp(ctx->qp); ibv_destroy_cq(ctx->cq); ibv_dereg_mr(ctx->mr_requests); ibv_dereg_mr(ctx->mr_images_in); ibv_dereg_mr(ctx->mr_images_out); /* TODO destroy the additional server MRs here if needed */ CUDA_CHECK(hipHostFree((void *)ctx->cpu_gpu_queue)); CUDA_CHECK(hipHostFree((void *)ctx->gpu_cpu_queue)); CUDA_CHECK(hipHostFree((void *)ctx->cpu_gpu_flags)); CUDA_CHECK(hipHostFree((void *)ctx->gpu_cpu_flags)); CUDA_CHECK(hipHostFree((void *)ctx->running)); ibv_dereg_mr(ctx->mr_cpu_gpu_queue); ibv_dereg_mr(ctx->mr_gpu_cpu_queue); ibv_dereg_mr(ctx->mr_cpu_gpu_flags); ibv_dereg_mr(ctx->mr_gpu_cpu_flags); ibv_dereg_mr(ctx->mr_running); ibv_dealloc_pd(ctx->pd); ibv_close_device(ctx->context); } int main(int argc, char *argv[]) { server_context ctx; parse_arguments(argc, argv, &ctx.mode, &ctx.tcp_port); if (!ctx.tcp_port) { srand(time(NULL)); ctx.tcp_port = TCP_PORT_OFFSET + (rand() % TCP_PORT_RANGE); /* to avoid conflicts with other users of the machine */ } /* Initialize memory and CUDA resources */ allocate_memory(&ctx); /* Create a TCP connection with the client to exchange InfiniBand parameters */ tcp_connection(&ctx); /* now that client has connected to us via TCP we'll open up some Infiniband resources and send it the parameters */ initialize_verbs(&ctx); /* exchange InfiniBand parameters with the client */ ib_info_t client_info; exchange_parameters(&ctx, &client_info); /* now need to connect the QP to the client's QP. */ connect_qp(&ctx, &client_info); if (ctx.mode == MODE_QUEUE) { /* TODO run the GPU persistent kernel from hw2, for 1024 threads per block */ int thread_blocks_num = max_thread_blocks(1024); hipLaunchKernelGGL(( test_kernel), dim3(thread_blocks_num), dim3(1024), 0, 0, ctx.cpu_gpu_queue, ctx.cpu_gpu_flags, ctx.gpu_cpu_queue, ctx.gpu_cpu_flags, ctx.running); CUDA_CHECK(hipDeviceSynchronize()); } /* now finally we get to the actual work, in the event loop */ /* The event loop can be used for queue mode for the termination message */ event_loop(&ctx); printf("Done\n"); teardown_context(&ctx); return 0; }
3e484d554bf598acdcc5d91c86a2640d4b6fac15.cu
#include <infiniband/verbs.h> #include <sys/types.h> #include <sys/socket.h> #include <string.h> #include <assert.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #include "common.h" ///////////////////////////////////////////////// DO NOT CHANGE /////////////////////////////////////// #define TCP_PORT_OFFSET 23456 #define TCP_PORT_RANGE 1000 #define SINGLE_QUEUE_SIZE 10 #define CUDA_CHECK(f) do { \ cudaError_t e = f; \ if (e != cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(1); \ } \ } while (0) __device__ int arr_min(int arr[], int arr_size) { int tid = threadIdx.x; int rhs, lhs; for (int stride = 1; stride < arr_size; stride *= 2) { if (tid >= stride && tid < arr_size) { rhs = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { lhs = arr[tid]; if (rhs != 0) { if (lhs == 0) arr[tid] = rhs; else arr[tid] = min(arr[tid], rhs); } } __syncthreads(); } int ret = arr[arr_size - 1]; return ret; } __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } __device__ void gpu_process_image_device(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { out[i] = map[in[i]]; } return; } __global__ void gpu_process_image(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { out[i] = map[in[i]]; } return; } /* TODO: copy queue-based GPU kernel from hw2 */ ///////////// GPU ///////////////// __device__ void dequeue_request(volatile uchar* queue, volatile int* flags, uchar* image_out, int* image_id){ int tid = threadIdx.x; __shared__ int index; if(tid==0){ index = -1; for(int i=0; i<SINGLE_QUEUE_SIZE; i++){ if(flags[i] != -1){ index = i; *image_id = flags[i]; break; } } } __syncthreads(); __threadfence_system(); if(index != -1){ for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x){ image_out[i] = queue[index*SQR(IMG_DIMENSION)+i]; } } __syncthreads(); __threadfence_system(); if(tid==0 && index != -1){ flags[index] = -1; } __threadfence_system(); } __device__ void enqueue_response(volatile uchar* queue, volatile int* flags, uchar* image_out, int image_id){ int tid = threadIdx.x; __shared__ int index; if(tid==0){ index = -1; for(int i=0; index==-1; i=(i+1)%SINGLE_QUEUE_SIZE){ if(flags[i] == -1){ index = i; } } } __syncthreads(); __threadfence_system(); if(index != -1){ for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x){ queue[index*SQR(IMG_DIMENSION)+i] = image_out[i] ; } } __syncthreads(); __threadfence_system(); if(tid==0 && index != -1){ flags[index] = image_id; } __syncthreads(); __threadfence_system(); } __global__ void test_kernel(volatile uchar* cpu_gpu_queue, volatile int* cpu_gpu_flags, volatile uchar* gpu_cpu_queue, volatile int* gpu_cpu_flags, volatile int* running){ int tid = threadIdx.x; int bid = blockIdx.x; int num_blocks = gridDim.x; __shared__ uchar image_in[SQR(IMG_DIMENSION)]; __shared__ uchar image_out[SQR(IMG_DIMENSION)]; __shared__ int image_id; __shared__ int queue_index; __shared__ int flags_index; __shared__ bool started; if(tid==0){ started = false; queue_index = bid * SINGLE_QUEUE_SIZE * SQR(IMG_DIMENSION); flags_index = bid * SINGLE_QUEUE_SIZE; } __syncthreads(); __threadfence_system(); while(*running < num_blocks+1){ if(tid==0){ if(!started){ started = true; atomicAdd((int*)running, 1); } image_id = -1; } __syncthreads(); __threadfence_system(); dequeue_request(cpu_gpu_queue + queue_index, cpu_gpu_flags + flags_index, (uchar*)image_in, &image_id); __syncthreads(); __threadfence_system(); if(image_id != -1){ gpu_process_image_device((uchar*)image_in, (uchar*)image_out); } __syncthreads(); __threadfence_system(); if(image_id != -1){ enqueue_response(gpu_cpu_queue + queue_index, gpu_cpu_flags + flags_index, (uchar*)image_out, image_id); } __syncthreads(); __threadfence_system(); } } /* TODO: end */ void process_image_on_gpu(uchar *img_in, uchar *img_out) { uchar *gpu_image_in, *gpu_image_out; CUDA_CHECK(cudaMalloc(&gpu_image_in, SQR(IMG_DIMENSION))); CUDA_CHECK(cudaMalloc(&gpu_image_out, SQR(IMG_DIMENSION))); CUDA_CHECK(cudaMemcpy(gpu_image_in, img_in, SQR(IMG_DIMENSION), cudaMemcpyHostToDevice)); gpu_process_image<<<1, 1024>>>(gpu_image_in, gpu_image_out); CUDA_CHECK(cudaMemcpy(img_out, gpu_image_out, SQR(IMG_DIMENSION), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaFree(gpu_image_in)); CUDA_CHECK(cudaFree(gpu_image_out)); } void print_usage_and_die(char *progname) { printf("usage: [port]\n"); exit(1); } struct server_context { mode_enum mode; int tcp_port; int listen_fd; /* Listening socket for TCP connection */ int socket_fd; /* Connected socket for TCP connection */ rpc_request *requests; /* Array of outstanding requests received from the network */ uchar *images_in; /* Input images for all outstanding requests */ uchar *images_out; /* Output images for all outstanding requests */ /* InfiniBand/verbs resources */ struct ibv_context *context; struct ibv_cq *cq; struct ibv_pd *pd; struct ibv_qp *qp; struct ibv_mr *mr_requests; /* Memory region for RPC requests */ struct ibv_mr *mr_images_in; /* Memory region for input images */ struct ibv_mr *mr_images_out; /* Memory region for output images */ /* TODO: add pointers and memory region(s) for CPU-GPU queues */ volatile uchar *cpu_gpu_queue; volatile uchar *gpu_cpu_queue; volatile int *cpu_gpu_flags; volatile int *gpu_cpu_flags; volatile int *running; struct ibv_mr *mr_cpu_gpu_queue; struct ibv_mr *mr_gpu_cpu_queue; struct ibv_mr *mr_cpu_gpu_flags; struct ibv_mr *mr_gpu_cpu_flags; struct ibv_mr *mr_running; }; int max_thread_blocks(int threads_num){ struct cudaDeviceProp devProp; CUDA_CHECK(cudaGetDeviceProperties(&devProp, 0)); int regs_per_thread = 32; int threads_per_threadblock = threads_num; int shared_mem_per_threadblock = sizeof(uchar)*SINGLE_QUEUE_SIZE*SQR(IMG_DIMENSION); int bound1 = devProp.sharedMemPerMultiprocessor/shared_mem_per_threadblock; int bound2 = devProp.sharedMemPerMultiprocessor/shared_mem_per_threadblock; int bound3 = devProp.regsPerMultiprocessor/regs_per_thread/threads_per_threadblock; int tmp = bound1 < bound2 ? bound1 : bound2; int min = tmp < bound3 ? tmp : bound3; int max_threadblocks = devProp.multiProcessorCount * min; return max_threadblocks; } void allocate_memory(server_context *ctx) { CUDA_CHECK(cudaHostAlloc(&ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); CUDA_CHECK(cudaHostAlloc(&ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); ctx->requests = (rpc_request *)calloc(OUTSTANDING_REQUESTS, sizeof(rpc_request)); /* TODO take CPU-GPU stream allocation code from hw2 */ int num_blocks = max_thread_blocks(1024); size_t queue_size_bytes = num_blocks * SINGLE_QUEUE_SIZE * SQR(IMG_DIMENSION) * sizeof(uchar); size_t flags_size_bytes = num_blocks * SINGLE_QUEUE_SIZE * sizeof(int); CUDA_CHECK(cudaHostAlloc(&ctx->running, sizeof(int), cudaHostAllocMapped)); CUDA_CHECK(cudaHostAlloc(&ctx->cpu_gpu_queue, queue_size_bytes, cudaHostAllocMapped)); CUDA_CHECK(cudaHostAlloc(&ctx->gpu_cpu_queue, queue_size_bytes, cudaHostAllocMapped)); CUDA_CHECK(cudaHostAlloc(&ctx->cpu_gpu_flags, flags_size_bytes, cudaHostAllocMapped)); CUDA_CHECK(cudaHostAlloc(&ctx->gpu_cpu_flags, flags_size_bytes, cudaHostAllocMapped)); *(ctx->running) = 0; for(int i=0; i<num_blocks * SINGLE_QUEUE_SIZE; i++){ ctx->cpu_gpu_flags[i] = -1; ctx->gpu_cpu_flags[i] = -1; } __sync_synchronize(); } void tcp_connection(server_context *ctx) { /* setup a TCP connection for initial negotiation with client */ int lfd = socket(AF_INET, SOCK_STREAM, 0); if (lfd < 0) { perror("socket"); exit(1); } ctx->listen_fd = lfd; struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(struct sockaddr_in)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = INADDR_ANY; server_addr.sin_port = htons(ctx->tcp_port); if (bind(lfd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in)) < 0) { perror("bind"); exit(1); } if (listen(lfd, 1)) { perror("listen"); exit(1); } printf("Server waiting on port %d. Client can connect\n", ctx->tcp_port); int sfd = accept(lfd, NULL, NULL); if (sfd < 0) { perror("accept"); exit(1); } printf("client connected\n"); ctx->socket_fd = sfd; } void initialize_verbs(server_context *ctx) { /* get device list */ struct ibv_device **device_list = ibv_get_device_list(NULL); if (!device_list) { printf("ERROR: ibv_get_device_list failed\n"); exit(1); } /* select first (and only) device to work with */ ctx->context = ibv_open_device(device_list[0]); /* create protection domain (PD) */ ctx->pd = ibv_alloc_pd(ctx->context); if (!ctx->pd) { printf("ERROR: ibv_alloc_pd() failed\n"); exit(1); } ctx->mr_requests = ibv_reg_mr(ctx->pd, ctx->requests, sizeof(rpc_request) * OUTSTANDING_REQUESTS, IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_requests) { printf("ibv_reg_mr() failed for requests\n"); exit(1); } ctx->mr_images_in = ibv_reg_mr(ctx->pd, ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_in) { printf("ibv_reg_mr() failed for input images\n"); exit(1); } ctx->mr_images_out = ibv_reg_mr(ctx->pd, ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_out) { printf("ibv_reg_mr() failed for output images\n"); exit(1); } /* TODO register additional memory regions for CPU-GPU queues */ int thread_blocks_num = max_thread_blocks(1024); ctx->mr_running = ibv_reg_mr(ctx->pd, (void*)ctx->running, sizeof(int), IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_running) { printf("ibv_reg_mr() failed for running\n"); exit(1); } ctx->mr_cpu_gpu_queue = ibv_reg_mr(ctx->pd, (void*)ctx->cpu_gpu_queue, sizeof(uchar) * SQR(IMG_DIMENSION) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_cpu_gpu_queue) { printf("ibv_reg_mr() failed for cpu_gpu_queue\n"); exit(1); } ctx->mr_cpu_gpu_flags = ibv_reg_mr(ctx->pd, (void*)ctx->cpu_gpu_flags, sizeof(int) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_cpu_gpu_flags) { printf("ibv_reg_mr() failed for cpu_gpu_flags\n"); exit(1); } ctx->mr_gpu_cpu_queue = ibv_reg_mr(ctx->pd, (void*)ctx->gpu_cpu_queue, sizeof(uchar) * SQR(IMG_DIMENSION) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_gpu_cpu_queue) { printf("ibv_reg_mr() failed for gpu_cpu_queue\n"); exit(1); } ctx->mr_gpu_cpu_flags = ibv_reg_mr(ctx->pd, (void*)ctx->gpu_cpu_flags, sizeof(int) * SINGLE_QUEUE_SIZE * thread_blocks_num, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_gpu_cpu_flags) { printf("ibv_reg_mr() failed for gpu_cpu_flags\n"); exit(1); } /* create completion queue (CQ). We'll use same CQ for both send and receive parts of the QP */ ctx->cq = ibv_create_cq(ctx->context, 2 * OUTSTANDING_REQUESTS, NULL, NULL, 0); /* create a CQ with place for two completions per request */ if (!ctx->cq) { printf("ERROR: ibv_create_cq() failed\n"); exit(1); } /* create QP */ struct ibv_qp_init_attr qp_init_attr; memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr)); qp_init_attr.send_cq = ctx->cq; qp_init_attr.recv_cq = ctx->cq; qp_init_attr.qp_type = IBV_QPT_RC; /* we'll use RC transport service, which supports RDMA */ qp_init_attr.cap.max_send_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in SQ per request. that's enough for us */ qp_init_attr.cap.max_recv_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in RQ per request. that's enough for us */ qp_init_attr.cap.max_send_sge = 1; /* 1 SGE in each send WQE */ qp_init_attr.cap.max_recv_sge = 1; /* 1 SGE in each recv WQE */ ctx->qp = ibv_create_qp(ctx->pd, &qp_init_attr); if (!ctx->qp) { printf("ERROR: ibv_create_qp() failed\n"); exit(1); } } void exchange_parameters(server_context *ctx, ib_info_t *client_info) { /* ok, before we continue we need to get info about the client' QP, and send it info about ours. * namely: QP number, and LID. * we'll use the TCP socket for that */ /* first query port for its LID (L2 address) */ int ret; struct ibv_port_attr port_attr; ret = ibv_query_port(ctx->context, IB_PORT_SERVER, &port_attr); if (ret) { printf("ERROR: ibv_query_port() failed\n"); exit(1); } /* now send our info to client */ struct ib_info_t my_info; my_info.lid = port_attr.lid; my_info.qpn = ctx->qp->qp_num; /* TODO add additional server rkeys / addresses here if needed */ my_info.blocks_num = max_thread_blocks(1024); my_info.rkey_running = (int)ctx->mr_running->rkey; my_info.addr_running = (uint64_t)ctx->mr_running->addr; my_info.rkey_gpu_cpu_queue = (int)ctx->mr_gpu_cpu_queue->rkey; my_info.addr_gpu_cpu_queue = (uint64_t)ctx->mr_gpu_cpu_queue->addr; my_info.rkey_cpu_gpu_queue = (int)ctx->mr_cpu_gpu_queue->rkey; my_info.addr_cpu_gpu_queue = (uint64_t)ctx->mr_cpu_gpu_queue->addr; my_info.rkey_gpu_cpu_flags = (int)ctx->mr_gpu_cpu_flags->rkey; my_info.addr_gpu_cpu_flags = (uint64_t)ctx->mr_gpu_cpu_flags->addr; my_info.rkey_cpu_gpu_flags = (int)ctx->mr_cpu_gpu_flags->rkey; my_info.addr_cpu_gpu_flags = (uint64_t)ctx->mr_cpu_gpu_flags->addr; ret = send(ctx->socket_fd, &my_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("send"); exit(1); } /* get client's info */ recv(ctx->socket_fd, client_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("recv"); exit(1); } /* we don't need TCP anymore. kill the socket */ close(ctx->socket_fd); close(ctx->listen_fd); ctx->socket_fd = ctx->listen_fd = 0; } /* Post a receive buffer of the given index (from the requests array) to the receive queue */ void post_recv(server_context *ctx, int index) { struct ibv_recv_wr recv_wr = {}; /* this is the receive work request (the verb's representation for receive WQE) */ ibv_sge sgl; recv_wr.wr_id = index; sgl.addr = (uintptr_t)&ctx->requests[index]; sgl.length = sizeof(ctx->requests[0]); sgl.lkey = ctx->mr_requests->lkey; recv_wr.sg_list = &sgl; recv_wr.num_sge = 1; if (ibv_post_recv(ctx->qp, &recv_wr, NULL)) { printf("ERROR: ibv_post_recv() failed\n"); exit(1); } } void connect_qp(server_context *ctx, ib_info_t *client_info) { /* this is a multi-phase process, moving the state machine of the QP step by step * until we are ready */ struct ibv_qp_attr qp_attr; /*QP state: RESET -> INIT */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_INIT; qp_attr.pkey_index = 0; qp_attr.port_num = IB_PORT_SERVER; qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; /* we'll allow client to RDMA write and read on this QP */ int ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS); if (ret) { printf("ERROR: ibv_modify_qp() to INIT failed\n"); exit(1); } /*QP: state: INIT -> RTR (Ready to Receive) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTR; qp_attr.path_mtu = IBV_MTU_4096; qp_attr.dest_qp_num = client_info->qpn; /* qp number of client */ qp_attr.rq_psn = 0 ; qp_attr.max_dest_rd_atomic = 1; /* max in-flight RDMA reads */ qp_attr.min_rnr_timer = 12; qp_attr.ah_attr.is_global = 0; /* No Network Layer (L3) */ qp_attr.ah_attr.dlid = client_info->lid; /* LID (L2 Address) of client */ qp_attr.ah_attr.sl = 0; qp_attr.ah_attr.src_path_bits = 0; qp_attr.ah_attr.port_num = IB_PORT_SERVER; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER); if (ret) { printf("ERROR: ibv_modify_qp() to RTR failed\n"); exit(1); } /*QP: state: RTR -> RTS (Ready to Send) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTS; qp_attr.sq_psn = 0; qp_attr.timeout = 14; qp_attr.retry_cnt = 7; qp_attr.rnr_retry = 7; qp_attr.max_rd_atomic = 1; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC); if (ret) { printf("ERROR: ibv_modify_qp() to RTS failed\n"); exit(1); } /* now let's populate the receive QP with recv WQEs */ for (int i = 0; i < OUTSTANDING_REQUESTS; i++) { post_recv(ctx, i); } } void event_loop(server_context *ctx) { /* so the protocol goes like this: * 1. we'll wait for a CQE indicating that we got an Send request from the client. * this tells us we have new work to do. The wr_id we used in post_recv tells us * where the request is. * 2. now we send an RDMA Read to the client to retrieve the request. * we will get a completion indicating the read has completed. * 3. we process the request on the GPU. * 4. upon completion, we send an RDMA Write with immediate to the client with * the results. */ struct ibv_send_wr send_wr; struct ibv_send_wr *bad_send_wr; rpc_request* req; uchar *img_in; uchar *img_out; ibv_sge sgl; bool terminate = false; while (!terminate) { /*step 1: poll for CQE */ struct ibv_wc wc; int ncqes; do { ncqes = ibv_poll_cq(ctx->cq, 1, &wc); } while (ncqes == 0); if (ncqes < 0) { printf("ERROR: ibv_poll_cq() failed\n"); exit(1); } if (wc.status != IBV_WC_SUCCESS) { printf("ERROR: got CQE with error '%s' (%d) (line %d)\n", ibv_wc_status_str(wc.status), wc.status, __LINE__); exit(1); } switch (wc.opcode) { case IBV_WC_RECV: /* Received a new request from the client */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; /* Terminate signal */ if (req->request_id == -1) { printf("Terminating...\n"); terminate = true; break; } if (ctx->mode != MODE_RPC_SERVER) { printf("Got client RPC request when running in queue mode.\n"); exit(1); } /* send RDMA Read to client to read the input */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; sgl.addr = (uintptr_t)img_in; sgl.length = req->input_length; sgl.lkey = ctx->mr_images_in->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_READ; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->input_addr; send_wr.wr.rdma.rkey = req->input_rkey; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_READ: /* Completed RDMA read for a request */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; img_out = &ctx->images_out[wc.wr_id * SQR(IMG_DIMENSION)]; process_image_on_gpu(img_in, img_out); /* send RDMA Write with immediate to client with the response */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; ibv_sge sgl; sgl.addr = (uintptr_t)img_out; sgl.length = req->output_length; sgl.lkey = ctx->mr_images_out->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->output_addr; send_wr.wr.rdma.rkey = req->output_rkey; send_wr.imm_data = req->request_id; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_WRITE: /* Completed RDMA Write - reuse buffers for receiving the next requests */ post_recv(ctx, wc.wr_id); break; default: printf("Unexpected completion\n"); assert(false); } } } void teardown_context(server_context *ctx) { /* cleanup */ ibv_destroy_qp(ctx->qp); ibv_destroy_cq(ctx->cq); ibv_dereg_mr(ctx->mr_requests); ibv_dereg_mr(ctx->mr_images_in); ibv_dereg_mr(ctx->mr_images_out); /* TODO destroy the additional server MRs here if needed */ CUDA_CHECK(cudaFreeHost((void *)ctx->cpu_gpu_queue)); CUDA_CHECK(cudaFreeHost((void *)ctx->gpu_cpu_queue)); CUDA_CHECK(cudaFreeHost((void *)ctx->cpu_gpu_flags)); CUDA_CHECK(cudaFreeHost((void *)ctx->gpu_cpu_flags)); CUDA_CHECK(cudaFreeHost((void *)ctx->running)); ibv_dereg_mr(ctx->mr_cpu_gpu_queue); ibv_dereg_mr(ctx->mr_gpu_cpu_queue); ibv_dereg_mr(ctx->mr_cpu_gpu_flags); ibv_dereg_mr(ctx->mr_gpu_cpu_flags); ibv_dereg_mr(ctx->mr_running); ibv_dealloc_pd(ctx->pd); ibv_close_device(ctx->context); } int main(int argc, char *argv[]) { server_context ctx; parse_arguments(argc, argv, &ctx.mode, &ctx.tcp_port); if (!ctx.tcp_port) { srand(time(NULL)); ctx.tcp_port = TCP_PORT_OFFSET + (rand() % TCP_PORT_RANGE); /* to avoid conflicts with other users of the machine */ } /* Initialize memory and CUDA resources */ allocate_memory(&ctx); /* Create a TCP connection with the client to exchange InfiniBand parameters */ tcp_connection(&ctx); /* now that client has connected to us via TCP we'll open up some Infiniband resources and send it the parameters */ initialize_verbs(&ctx); /* exchange InfiniBand parameters with the client */ ib_info_t client_info; exchange_parameters(&ctx, &client_info); /* now need to connect the QP to the client's QP. */ connect_qp(&ctx, &client_info); if (ctx.mode == MODE_QUEUE) { /* TODO run the GPU persistent kernel from hw2, for 1024 threads per block */ int thread_blocks_num = max_thread_blocks(1024); test_kernel<<<thread_blocks_num, 1024>>>(ctx.cpu_gpu_queue, ctx.cpu_gpu_flags, ctx.gpu_cpu_queue, ctx.gpu_cpu_flags, ctx.running); CUDA_CHECK(cudaDeviceSynchronize()); } /* now finally we get to the actual work, in the event loop */ /* The event loop can be used for queue mode for the termination message */ event_loop(&ctx); printf("Done\n"); teardown_context(&ctx); return 0; }
04dc2703f86e8c34bc1ffe0c24bf5372ffb3bb51.hip
// !!! This is a file automatically generated by hipify!!! #include <matazure/tensor> using namespace matazure; __constant__ static_tensor<float, dim< 3, 3>> mask; MATAZURE_CUDA_PUZZEL_CONV_GLOBAL(conv_global, mask) MATAZURE_CUDA_PUZZEL_CONV_BLOCK_ALIGNED(conv_block, mask) MATAZURE_CUDA_PUZZEL_CONV_BLOCK_CRACK(conv_block_crack, mask) MATAZURE_CUDA_PUZZEL_CONV_BLOCK_OVERLAP(conv_block_overlap, mask) int main() { static_tensor<float, dim< 3, 3>> host_mask; fill(host_mask, 1.0f / host_mask.size()); cuda::copy_symbol(host_mask, mask); tensor<byte, 2> gray(512, 512); io::read_raw_data("data/lena_gray8_512x512.raw_data", gray); auto cu_gray = mem_clone(gray, device_t{}); auto lcts_conv = cuda::puzzle::conv_global(tensor_cast<float>(clamp_zero(cu_gray))); auto cts_conv = apply(lcts_conv, op::saturate_convert<byte>{}).persist(); hip::device_synchronize(); auto ts_conv = mem_clone(cts_conv, host_t{}); io::write_raw_data("data/lena_gray8_conv_512x512.raw_data", ts_conv); cuda::tensor<float, 2> cts_conv_block(cu_gray.shape()); cuda::puzzle::conv_block<dim<16, 16>>(tensor_cast<float>(cu_gray), cts_conv_block); auto cts_byte_conv_block = apply(cts_conv_block, op::saturate_convert<byte>{}).persist(); hip::device_synchronize(); auto ts_byte_conv_block = mem_clone(cts_byte_conv_block, host_t{}); io::write_raw_data("data/lena_gray8_conv_block_512x512.raw_data", ts_byte_conv_block); cuda::tensor<float, 2> cts_conv_block_crack(cu_gray.shape()); cuda::puzzle::conv_block_crack<dim<32, 32>>(tensor_cast<float>(clamp_zero(cu_gray)), cts_conv_block_crack); auto cts_byte_conv_block_crack = apply(cts_conv_block_crack, op::saturate_convert<byte>{}).persist(); hip::device_synchronize(); auto ts_byte_conv_block_crack = mem_clone(cts_byte_conv_block_crack, host_t{}); io::write_raw_data("data/lena_gray8_conv_block_crack_512x512.raw_data", ts_byte_conv_block_crack); cuda::tensor<float, 2> cts_conv_block_overlap(cu_gray.shape()); cuda::puzzle::conv_block_overlap<dim<16, 16>>(tensor_cast<float>(clamp_zero(cu_gray)), cts_conv_block_overlap); auto cts_byte_conv_block_overlap = apply(cts_conv_block_overlap, op::saturate_convert<byte>{}).persist(); hip::device_synchronize(); auto ts_byte_conv_block_overlap = mem_clone(cts_byte_conv_block_overlap, host_t{}); io::write_raw_data("data/lena_gray8_conv_block_overlap_512x512.raw_data", ts_byte_conv_block_overlap); return 0; }
04dc2703f86e8c34bc1ffe0c24bf5372ffb3bb51.cu
#include <matazure/tensor> using namespace matazure; __constant__ static_tensor<float, dim< 3, 3>> mask; MATAZURE_CUDA_PUZZEL_CONV_GLOBAL(conv_global, mask) MATAZURE_CUDA_PUZZEL_CONV_BLOCK_ALIGNED(conv_block, mask) MATAZURE_CUDA_PUZZEL_CONV_BLOCK_CRACK(conv_block_crack, mask) MATAZURE_CUDA_PUZZEL_CONV_BLOCK_OVERLAP(conv_block_overlap, mask) int main() { static_tensor<float, dim< 3, 3>> host_mask; fill(host_mask, 1.0f / host_mask.size()); cuda::copy_symbol(host_mask, mask); tensor<byte, 2> gray(512, 512); io::read_raw_data("data/lena_gray8_512x512.raw_data", gray); auto cu_gray = mem_clone(gray, device_t{}); auto lcts_conv = cuda::puzzle::conv_global(tensor_cast<float>(clamp_zero(cu_gray))); auto cts_conv = apply(lcts_conv, op::saturate_convert<byte>{}).persist(); cuda::device_synchronize(); auto ts_conv = mem_clone(cts_conv, host_t{}); io::write_raw_data("data/lena_gray8_conv_512x512.raw_data", ts_conv); cuda::tensor<float, 2> cts_conv_block(cu_gray.shape()); cuda::puzzle::conv_block<dim<16, 16>>(tensor_cast<float>(cu_gray), cts_conv_block); auto cts_byte_conv_block = apply(cts_conv_block, op::saturate_convert<byte>{}).persist(); cuda::device_synchronize(); auto ts_byte_conv_block = mem_clone(cts_byte_conv_block, host_t{}); io::write_raw_data("data/lena_gray8_conv_block_512x512.raw_data", ts_byte_conv_block); cuda::tensor<float, 2> cts_conv_block_crack(cu_gray.shape()); cuda::puzzle::conv_block_crack<dim<32, 32>>(tensor_cast<float>(clamp_zero(cu_gray)), cts_conv_block_crack); auto cts_byte_conv_block_crack = apply(cts_conv_block_crack, op::saturate_convert<byte>{}).persist(); cuda::device_synchronize(); auto ts_byte_conv_block_crack = mem_clone(cts_byte_conv_block_crack, host_t{}); io::write_raw_data("data/lena_gray8_conv_block_crack_512x512.raw_data", ts_byte_conv_block_crack); cuda::tensor<float, 2> cts_conv_block_overlap(cu_gray.shape()); cuda::puzzle::conv_block_overlap<dim<16, 16>>(tensor_cast<float>(clamp_zero(cu_gray)), cts_conv_block_overlap); auto cts_byte_conv_block_overlap = apply(cts_conv_block_overlap, op::saturate_convert<byte>{}).persist(); cuda::device_synchronize(); auto ts_byte_conv_block_overlap = mem_clone(cts_byte_conv_block_overlap, host_t{}); io::write_raw_data("data/lena_gray8_conv_block_overlap_512x512.raw_data", ts_byte_conv_block_overlap); return 0; }
b6136af2ecc471ffc8b8d55ac89c8587ad9d92c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CChanelAWGN2.h" #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(0);}} while(0) #define CURAND_CALL(x) do { if((x) != HIPRAND_STATUS_SUCCESS) { \ printf("Error (%d) at %s:%d\n", x, __FILE__,__LINE__); \ exit(0);}} while(0) __global__ void vectNoise(const int *IN, const float*A, const float *B, float *C, float SigB, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { float x = sqrt(-2.0 * log( A[i] )); float y = B[i]; float Ph = x * sin(_2pi * y); float Qu = x * cos(_2pi * y); #if 0 C[i] = ((float)(2 * IN[i] ) + Ph * SigB) * (2.0f / (SigB * SigB)); C[i+N] = ((float)(2 * IN[i+N]) + Qu * SigB) * (2.0f / (SigB * SigB)); #else const float s2 = (2.0f * SigB * SigB); const float b1 = (float)(2 * IN[i] ) - 1.0f + Ph * SigB; const float b2 = (float)(2 * IN[i+N]) - 1.0f + Qu * SigB; C[i] = (( (b1 - 1.0f) * (b1 - 1.0f) ) - ( (b1 + 1.0f) * (b1 + 1.0f) )) / s2; C[i+N] = (( (b2 - 1.0f) * (b2 - 1.0f) ) - ( (b2 + 1.0f) * (b2 + 1.0f) )) / s2; #endif } } CChanelAWGN2::CChanelAWGN2(CTrame *t, int _BITS_LLR, bool QPSK, bool Es_N0) : CChanel(t, _BITS_LLR, QPSK, Es_N0){ hiprandStatus_t Status; Status = hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); CURAND_CALL(Status); Status = hiprandSetPseudoRandomGeneratorSeed(generator, 1234ULL); CURAND_CALL(Status); CUDA_MALLOC_DEVICE(&d_IN, _data); CUDA_MALLOC_DEVICE(&device_A, _data); CUDA_MALLOC_DEVICE(&device_B, _data); CUDA_MALLOC_DEVICE(&device_R, _data); } CChanelAWGN2::~CChanelAWGN2(){ hipError_t Status; Status = hipFree(d_IN); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(device_A); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(device_B); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(device_R); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); hiprandStatus_t eStatus; eStatus = hiprandDestroyGenerator(generator); CURAND_CALL(eStatus); } void CChanelAWGN2::configure(double _Eb_N0) { rendement = (float) (_vars) / (float) (_data); if (es_n0) { Eb_N0 = _Eb_N0 - 10.0 * log10(2 * rendement); } else { Eb_N0 = _Eb_N0; } double interm = 10.0 * log10(rendement); interm = -0.1*((double)Eb_N0+interm); SigB = sqrt(pow(10.0,interm)/2); } #include <limits.h> #define MAX_RANDOM LONG_MAX /* Maximum value of random() */ double CChanelAWGN2::awgn(double amp) { return 0.00; } #define QPSK 0.707106781 #define BPSK 1.0 void CChanelAWGN2::generate( ) { hiprandStatus_t Status; hipError_t eStatus; eStatus = hipMemcpy(d_IN, t_coded_bits, _data * sizeof(int), hipMemcpyHostToDevice); Status = hiprandGenerateUniform( generator, device_A, _data ); CURAND_CALL(Status); Status = hiprandGenerateUniform( generator, device_B, _data ); CURAND_CALL(Status); int threadsPerBlock = 256; int blocksPerGrid = (_data + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( vectNoise), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_IN, device_A, device_B, device_R, (float)SigB, _data); eStatus = hipMemcpy(t_noise_data, device_R, _data * sizeof(float), hipMemcpyDeviceToHost); /* FILE* f1 = fopen("t_noise_data.json", "w"); for (int m = 0; m < _data ; m++){ fprintf(f1, "\n "); fprintf(f1, " m = %d ", m); fprintf(f1, " %4f ", t_noise_data[m]); fprintf(f1, "\n "); } fclose( f1 );*/ CUDA_CALL(eStatus); }
b6136af2ecc471ffc8b8d55ac89c8587ad9d92c8.cu
#include "CChanelAWGN2.h" #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(0);}} while(0) #define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \ printf("Error (%d) at %s:%d\n", x, __FILE__,__LINE__); \ exit(0);}} while(0) __global__ void vectNoise(const int *IN, const float*A, const float *B, float *C, float SigB, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { float x = sqrt(-2.0 * log( A[i] )); float y = B[i]; float Ph = x * sin(_2pi * y); float Qu = x * cos(_2pi * y); #if 0 C[i] = ((float)(2 * IN[i] ) + Ph * SigB) * (2.0f / (SigB * SigB)); C[i+N] = ((float)(2 * IN[i+N]) + Qu * SigB) * (2.0f / (SigB * SigB)); #else const float s2 = (2.0f * SigB * SigB); const float b1 = (float)(2 * IN[i] ) - 1.0f + Ph * SigB; const float b2 = (float)(2 * IN[i+N]) - 1.0f + Qu * SigB; C[i] = (( (b1 - 1.0f) * (b1 - 1.0f) ) - ( (b1 + 1.0f) * (b1 + 1.0f) )) / s2; C[i+N] = (( (b2 - 1.0f) * (b2 - 1.0f) ) - ( (b2 + 1.0f) * (b2 + 1.0f) )) / s2; #endif } } CChanelAWGN2::CChanelAWGN2(CTrame *t, int _BITS_LLR, bool QPSK, bool Es_N0) : CChanel(t, _BITS_LLR, QPSK, Es_N0){ curandStatus_t Status; Status = curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); CURAND_CALL(Status); Status = curandSetPseudoRandomGeneratorSeed(generator, 1234ULL); CURAND_CALL(Status); CUDA_MALLOC_DEVICE(&d_IN, _data); CUDA_MALLOC_DEVICE(&device_A, _data); CUDA_MALLOC_DEVICE(&device_B, _data); CUDA_MALLOC_DEVICE(&device_R, _data); } CChanelAWGN2::~CChanelAWGN2(){ cudaError_t Status; Status = cudaFree(d_IN); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(device_A); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(device_B); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(device_R); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); curandStatus_t eStatus; eStatus = curandDestroyGenerator(generator); CURAND_CALL(eStatus); } void CChanelAWGN2::configure(double _Eb_N0) { rendement = (float) (_vars) / (float) (_data); if (es_n0) { Eb_N0 = _Eb_N0 - 10.0 * log10(2 * rendement); } else { Eb_N0 = _Eb_N0; } double interm = 10.0 * log10(rendement); interm = -0.1*((double)Eb_N0+interm); SigB = sqrt(pow(10.0,interm)/2); } #include <limits.h> #define MAX_RANDOM LONG_MAX /* Maximum value of random() */ double CChanelAWGN2::awgn(double amp) { return 0.00; } #define QPSK 0.707106781 #define BPSK 1.0 void CChanelAWGN2::generate( ) { curandStatus_t Status; cudaError_t eStatus; eStatus = cudaMemcpy(d_IN, t_coded_bits, _data * sizeof(int), cudaMemcpyHostToDevice); Status = curandGenerateUniform( generator, device_A, _data ); CURAND_CALL(Status); Status = curandGenerateUniform( generator, device_B, _data ); CURAND_CALL(Status); int threadsPerBlock = 256; int blocksPerGrid = (_data + threadsPerBlock - 1) / threadsPerBlock; vectNoise<<<blocksPerGrid, threadsPerBlock>>>(d_IN, device_A, device_B, device_R, (float)SigB, _data); eStatus = cudaMemcpy(t_noise_data, device_R, _data * sizeof(float), cudaMemcpyDeviceToHost); /* FILE* f1 = fopen("t_noise_data.json", "w"); for (int m = 0; m < _data ; m++){ fprintf(f1, "\n "); fprintf(f1, " m = %d ", m); fprintf(f1, " %4f ", t_noise_data[m]); fprintf(f1, "\n "); } fclose( f1 );*/ CUDA_CALL(eStatus); }
2d0c54835b2da296fe3cddce55de6223f1229145.hip
// !!! This is a file automatically generated by hipify!!! // includes system #include <cmath> #include <ctime> #include <iomanip> #include <iostream> #include <fstream> #include <memory> // includes CUDA #include "hip/hip_runtime.h" #include "helper_cuda.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // includes Thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> // includes project #include "config.h" #include "file_util.h" #include "nbody.h" #include "nbody_exception.h" #include "ode.h" #include "pp_disk.h" #include "options.h" #include "tools.h" using namespace std; /////////////////////////////// /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* This sample queries the properties of the CUDA devices present in the system via CUDA Runtime API. */ // Shared Utilities (QA Testing) // This function wraps the CUDA Driver API into a template function template <class T> inline void getCudaAttribute(T *attribute, hipDeviceAttribute_t device_attribute, int device) { hipError_t error = hipDeviceGetAttribute(attribute, device_attribute, device); if (hipSuccess != error) { fprintf(stderr, "cuSafeCallNoSync() Driver API error = %04d from file <%s>, line %i.\n", error, __FILE__, __LINE__); exit(EXIT_FAILURE); } } inline bool IsGPUCapableP2P(hipDeviceProp_t *pProp) { #ifdef _WIN32 return (bool)(pProp->tccDriver ? true : false); #else return (bool)(pProp->major >= 2); #endif } inline bool IsAppBuiltAs64() { #if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) return 1; #else return 0; #endif } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int device_query(int argc, const char **argv) { printf("%s Starting...\n\n", argv[0]); printf(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n"); int deviceCount = 0; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); printf("Result = FAIL\n"); exit(EXIT_FAILURE); } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } int dev, driverVersion = 0, runtimeVersion = 0; for (dev = 0; dev < deviceCount; ++dev) { hipSetDevice(dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); // Console log hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 5000 // This is supported in CUDA 5.0 (runtime API device properties) printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } #else // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API) int memoryClock; getCudaAttribute<int>(&memoryClock, hipDeviceAttributeMemoryClockRate, dev); printf(" Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f); int memBusWidth; getCudaAttribute<int>(&memBusWidth, hipDeviceAttributeMemoryBusWidth, dev); printf(" Memory Bus Width: %d-bit\n", memBusWidth); int L2CacheSize; getCudaAttribute<int>(&L2CacheSize, hipDeviceAttributeL2CacheSize, dev); if (L2CacheSize) { printf(" L2 Cache Size: %d bytes\n", L2CacheSize); } #endif printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); #ifdef WIN32 printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::hipSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::hipSetDevice() with this device)", "Prohibited (no host thread can use ::hipSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::hipSetDevice() with this device)", "Unknown", NULL }; printf(" Compute Mode:\n"); printf(" < %s >\n", sComputeMode[deviceProp.computeMode]); } // csv masterlog info // ***************************** // exe and CUDA driver name printf("\n"); std::string sProfileString = "deviceQuery, CUDA Driver = CUDART"; char cTemp[16]; // driver version sProfileString += ", CUDA Driver Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #else sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #endif sProfileString += cTemp; // Runtime version sProfileString += ", CUDA Runtime Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #else sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #endif sProfileString += cTemp; // Device count sProfileString += ", NumDevs = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d", deviceCount); #else sprintf(cTemp, "%d", deviceCount); #endif sProfileString += cTemp; // Print Out all device Names for (dev = 0; dev < deviceCount; ++dev) { #ifdef _WIN32 sprintf_s(cTemp, 13, ", Device%d = ", dev); #else sprintf(cTemp, ", Device%d = ", dev); #endif hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); sProfileString += cTemp; sProfileString += deviceProp.name; } sProfileString += "\n"; printf("%s", sProfileString.c_str()); printf("Result = PASS\n"); // finish return (EXIT_SUCCESS); } void print_step_stat(pp_disk *ppd, options *opt, integrator* intgr, ttt_t dt_try, ostream& log_f) { char time_stamp[20]; get_time_stamp(time_stamp); int n_failed_step = intgr->get_n_failed_step(); int n_passed_step = intgr->get_n_passed_step(); int n_tried_step = intgr->get_n_tried_step(); ttt_t t = ppd->get_currt(); ttt_t avg_dt = (t - opt->start_time)/(var_t)n_passed_step; log_f << time_stamp << ' '; log_f << "n_tried_step: " << n_tried_step << ", n_failed_step: " << n_failed_step << ", n_passed_step: " << n_passed_step << " until " << t << " [day]. dt_try: " << setprecision(8) << setw(14) << dt_try << " [d], avg_dt: " << avg_dt << " [d]\t"; log_f << setprecision(5) << setw(6) << (t/opt->stop_time)*100.0 << " % done" << endl; log_f.flush(); } void print_msg(string& msg, ostream& log_f) { char time_stamp[20]; get_time_stamp(time_stamp); log_f << time_stamp << ' ' << msg << endl; log_f.flush(); } // --verbose -o C:\Work\Projects\solaris.cuda\TestRun\Dvorak -ip parameters.txt -ibl Dvorak_disk.txt // --verbose -o C:\Work\Projects\solaris.cuda\TestRun\Stepsize\100_Stored_dt_ver0 -ip parameters.txt -ibl Dvorak_disk.txt // --verbose -o C:\Work\Projects\solaris.cuda\TestRun\Dvorak_disk_Emese\Factor_5 -ip parameters.txt -ibl collision-testdata-N10001-vecelem-binary.txt int main(int argc, const char** argv) { cout << "Solaris.NBody.Cuda.Test main.cu started" << endl; device_query(argc, argv); time_t start = time(NULL); var_t sum_time_of_steps = 0.0; int_t n_step = 0; // Integrate the pp_disk ode try { options opt(argc, argv); pp_disk* ppd = opt.create_pp_disk(); integrator* intgr = opt.create_integrator(ppd); ttt_t currt = ppd->get_currt(); ttt_t ps = 0; ttt_t dt = 0; string path = combine_path(opt.printoutDir, "position.txt"); ostream* pos_f = new ofstream(path.c_str(), ios::out); path = combine_path(opt.printoutDir, "event.txt"); ostream* event_f = new ofstream(path.c_str(), ios::out); path = combine_path(opt.printoutDir, "log.txt"); ostream* log_f = new ofstream(path.c_str(), ios::out); // Save initial conditions to the output file ppd->print_positions(*pos_f); while (currt <= opt.stop_time) { ppd->call_check_hit_centrum_ejection_kernel(); if (ppd->get_n_event() > 0) { ppd->print_event_data(*event_f, *log_f); ppd->handle_hit_centrum_ejection(); } ppd->call_check_collision_kernel(); if (ppd->get_n_event() > 0) { ppd->print_event_data(*event_f, *log_f); ppd->handle_collision(); } clock_t start_of_step = clock(); dt = intgr->step(); n_step++; clock_t end_of_step = clock(); sum_time_of_steps += (end_of_step - start_of_step); cout << "Time for one step: " << (end_of_step - start_of_step) / (double)CLOCKS_PER_SEC << " s, avg: " << sum_time_of_steps / (double)CLOCKS_PER_SEC / n_step << " s" << endl; ps += fabs(dt); currt = ppd->get_currt(); if (fabs(ps) >= opt.output_interval) { ps = 0.0; ppd->copy_to_host(); ppd->print_positions(*pos_f); } if (opt.verbose && (intgr->get_n_passed_step()) % 100 == 0) { print_step_stat(ppd, &opt, intgr, dt, *log_f); } } // Save final conditions to the output file ppd->print_positions(*pos_f); } /* try */ catch (nbody_exception& ex) { cerr << "Error: " << ex.what() << endl; } cout << "Total time: " << time(NULL) - start << " s" << endl; return 0; } #if 0 // -nBodies 1 1 0 10000 0 100000 0 -i RKF78 -a 1.0e-10 -t 1000 -dt 10.0 -p 10 10 10 -o C:\Work\Solaris.Cuda.TestRuns\2MStar_5MJupiter_Disc65-270_01\GPU -f C:\Work\Solaris.Cuda.TestRuns\2MStar_5MJupiter_Disc65-270_01\GPU\nBodies_1_1_0_10000_0_100000_0.txt int main(int argc, const char** argv) { cout << "Solaris.NBody.Cuda.Test main.cu started" << endl; device_query(argc, argv); time_t start = time(NULL); // Integrate the pp_disk ode try { options opt(argc, argv); pp_disk* ppd = opt.create_pp_disk(); integrator* intgr = opt.create_integrator(ppd); ttt_t pp = 0; ttt_t ps = 0; ttt_t dt = 0; ostream* positionsf = 0; ostream* orbelemf = 0; //ostream* collisionsf= 0; int pcount = 0; int ccount = 0; if (!opt.printoutToFile) { positionsf = &cout; orbelemf = &cout; //collisionsf = &cerr; } else { //collisionsf = new ofstream(combine_path(opt.printoutDir, "col.txt").c_str()); //positionsf = new ofstream(get_printout_file(opt, pcount++).c_str()); string filename = get_filename_without_ext(opt.filename) + '.' + intgr->get_name() + '.' + (opt.gasDisk == 0 ? "" : "gas.CONSTANT."); string filenameWithExt = filename + get_extension(opt.filename); string path = combine_path(opt.printoutDir, filenameWithExt); //char *c_path = new char[path.length() + 1]; //strcpy(c_path, path.c_str()); positionsf = new ofstream(path.c_str(), ios::app); //filenameWithExt = filename + "oe." + get_extension(opt.filename); //orbelemf = new ofstream(combine_path(opt.printoutDir, filenameWithExt), std::ios::app); } while (ppd->t < opt.stop_time) { if (opt.printout) { if (pp >= opt.printoutPeriod) { pp = 0; } // Start of a print-out period, create new file if necessary if (pp == 0 && intgr->get_n_step() > 0) { var_t avg_dt = (ppd->t - opt.start_time) / intgr->get_n_step(); cout << intgr->get_n_failed_step() << " step(s) failed out of " << intgr->get_n_step() << " steps until " << ppd->t << " [day]\naverage dt: " << setprecision(10) << setw(16) << avg_dt << " [d]" << endl; cerr << setprecision(5) << setw(6) << ((ppd->t - opt.start_time)/opt.stop_time*100) << " %" << endl; } //var_t avg_dt = (ppd->t - opt.start_time) / intgr->get_n_step(); //cout << intgr->get_n_failed_step() << " step(s) failed out of " << intgr->get_n_step() << " steps until " << ppd->t << " [day]\naverage dt: " << setprecision(10) << setw(16) << avg_dt << " [d]" << endl; //cerr << setprecision(5) << setw(6) << ((ppd->t - opt.start_time)/opt.stop_time*100) << " %" << endl; if (0 <= pp && pp <= opt.printoutLength) { if (ps >= opt.printoutStep) { ps = 0; } if (ps == 0) { // Print out positions ppd->copy_to_host(); ppd->print_positions(*positionsf); //pp_disk::h_orbelem_t orbelem = ppd->calculate_orbelem(0); //ppd->print_orbelem(*orbelemf); } } } dt = intgr->step(); cerr << "t: " << setw(15) << (ppd->t) << ", dt: " << setw(15) << dt << " [d]" << endl; pp += dt; ps += dt; } delete ppd; delete intgr; delete positionsf; delete orbelemf; } /* try */ catch (nbody_exception& ex) { cerr << "Error: " << ex.what() << endl; } cout << "Total time: " << time(NULL) - start << " s" << endl; return 0; } #endif
2d0c54835b2da296fe3cddce55de6223f1229145.cu
// includes system #include <cmath> #include <ctime> #include <iomanip> #include <iostream> #include <fstream> #include <memory> // includes CUDA #include "cuda.h" #include "helper_cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" // includes Thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> // includes project #include "config.h" #include "file_util.h" #include "nbody.h" #include "nbody_exception.h" #include "ode.h" #include "pp_disk.h" #include "options.h" #include "tools.h" using namespace std; /////////////////////////////// /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* This sample queries the properties of the CUDA devices present in the system via CUDA Runtime API. */ // Shared Utilities (QA Testing) // This function wraps the CUDA Driver API into a template function template <class T> inline void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device) { CUresult error = cuDeviceGetAttribute(attribute, device_attribute, device); if (CUDA_SUCCESS != error) { fprintf(stderr, "cuSafeCallNoSync() Driver API error = %04d from file <%s>, line %i.\n", error, __FILE__, __LINE__); exit(EXIT_FAILURE); } } inline bool IsGPUCapableP2P(cudaDeviceProp *pProp) { #ifdef _WIN32 return (bool)(pProp->tccDriver ? true : false); #else return (bool)(pProp->major >= 2); #endif } inline bool IsAppBuiltAs64() { #if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) return 1; #else return 0; #endif } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int device_query(int argc, const char **argv) { printf("%s Starting...\n\n", argv[0]); printf(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n"); int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); printf("Result = FAIL\n"); exit(EXIT_FAILURE); } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } int dev, driverVersion = 0, runtimeVersion = 0; for (dev = 0; dev < deviceCount; ++dev) { cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); // Console log cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 5000 // This is supported in CUDA 5.0 (runtime API device properties) printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } #else // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API) int memoryClock; getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev); printf(" Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f); int memBusWidth; getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev); printf(" Memory Bus Width: %d-bit\n", memBusWidth); int L2CacheSize; getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev); if (L2CacheSize) { printf(" L2 Cache Size: %d bytes\n", L2CacheSize); } #endif printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); #ifdef WIN32 printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)", "Unknown", NULL }; printf(" Compute Mode:\n"); printf(" < %s >\n", sComputeMode[deviceProp.computeMode]); } // csv masterlog info // ***************************** // exe and CUDA driver name printf("\n"); std::string sProfileString = "deviceQuery, CUDA Driver = CUDART"; char cTemp[16]; // driver version sProfileString += ", CUDA Driver Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #else sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10); #endif sProfileString += cTemp; // Runtime version sProfileString += ", CUDA Runtime Version = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #else sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10); #endif sProfileString += cTemp; // Device count sProfileString += ", NumDevs = "; #ifdef WIN32 sprintf_s(cTemp, 10, "%d", deviceCount); #else sprintf(cTemp, "%d", deviceCount); #endif sProfileString += cTemp; // Print Out all device Names for (dev = 0; dev < deviceCount; ++dev) { #ifdef _WIN32 sprintf_s(cTemp, 13, ", Device%d = ", dev); #else sprintf(cTemp, ", Device%d = ", dev); #endif cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); sProfileString += cTemp; sProfileString += deviceProp.name; } sProfileString += "\n"; printf("%s", sProfileString.c_str()); printf("Result = PASS\n"); // finish return (EXIT_SUCCESS); } void print_step_stat(pp_disk *ppd, options *opt, integrator* intgr, ttt_t dt_try, ostream& log_f) { char time_stamp[20]; get_time_stamp(time_stamp); int n_failed_step = intgr->get_n_failed_step(); int n_passed_step = intgr->get_n_passed_step(); int n_tried_step = intgr->get_n_tried_step(); ttt_t t = ppd->get_currt(); ttt_t avg_dt = (t - opt->start_time)/(var_t)n_passed_step; log_f << time_stamp << ' '; log_f << "n_tried_step: " << n_tried_step << ", n_failed_step: " << n_failed_step << ", n_passed_step: " << n_passed_step << " until " << t << " [day]. dt_try: " << setprecision(8) << setw(14) << dt_try << " [d], avg_dt: " << avg_dt << " [d]\t"; log_f << setprecision(5) << setw(6) << (t/opt->stop_time)*100.0 << " % done" << endl; log_f.flush(); } void print_msg(string& msg, ostream& log_f) { char time_stamp[20]; get_time_stamp(time_stamp); log_f << time_stamp << ' ' << msg << endl; log_f.flush(); } // --verbose -o C:\Work\Projects\solaris.cuda\TestRun\Dvorak -ip parameters.txt -ibl Dvorak_disk.txt // --verbose -o C:\Work\Projects\solaris.cuda\TestRun\Stepsize\100_Stored_dt_ver0 -ip parameters.txt -ibl Dvorak_disk.txt // --verbose -o C:\Work\Projects\solaris.cuda\TestRun\Dvorak_disk_Emese\Factor_5 -ip parameters.txt -ibl collision-testdata-N10001-vecelem-binary.txt int main(int argc, const char** argv) { cout << "Solaris.NBody.Cuda.Test main.cu started" << endl; device_query(argc, argv); time_t start = time(NULL); var_t sum_time_of_steps = 0.0; int_t n_step = 0; // Integrate the pp_disk ode try { options opt(argc, argv); pp_disk* ppd = opt.create_pp_disk(); integrator* intgr = opt.create_integrator(ppd); ttt_t currt = ppd->get_currt(); ttt_t ps = 0; ttt_t dt = 0; string path = combine_path(opt.printoutDir, "position.txt"); ostream* pos_f = new ofstream(path.c_str(), ios::out); path = combine_path(opt.printoutDir, "event.txt"); ostream* event_f = new ofstream(path.c_str(), ios::out); path = combine_path(opt.printoutDir, "log.txt"); ostream* log_f = new ofstream(path.c_str(), ios::out); // Save initial conditions to the output file ppd->print_positions(*pos_f); while (currt <= opt.stop_time) { ppd->call_check_hit_centrum_ejection_kernel(); if (ppd->get_n_event() > 0) { ppd->print_event_data(*event_f, *log_f); ppd->handle_hit_centrum_ejection(); } ppd->call_check_collision_kernel(); if (ppd->get_n_event() > 0) { ppd->print_event_data(*event_f, *log_f); ppd->handle_collision(); } clock_t start_of_step = clock(); dt = intgr->step(); n_step++; clock_t end_of_step = clock(); sum_time_of_steps += (end_of_step - start_of_step); cout << "Time for one step: " << (end_of_step - start_of_step) / (double)CLOCKS_PER_SEC << " s, avg: " << sum_time_of_steps / (double)CLOCKS_PER_SEC / n_step << " s" << endl; ps += fabs(dt); currt = ppd->get_currt(); if (fabs(ps) >= opt.output_interval) { ps = 0.0; ppd->copy_to_host(); ppd->print_positions(*pos_f); } if (opt.verbose && (intgr->get_n_passed_step()) % 100 == 0) { print_step_stat(ppd, &opt, intgr, dt, *log_f); } } // Save final conditions to the output file ppd->print_positions(*pos_f); } /* try */ catch (nbody_exception& ex) { cerr << "Error: " << ex.what() << endl; } cout << "Total time: " << time(NULL) - start << " s" << endl; return 0; } #if 0 // -nBodies 1 1 0 10000 0 100000 0 -i RKF78 -a 1.0e-10 -t 1000 -dt 10.0 -p 10 10 10 -o C:\Work\Solaris.Cuda.TestRuns\2MStar_5MJupiter_Disc65-270_01\GPU -f C:\Work\Solaris.Cuda.TestRuns\2MStar_5MJupiter_Disc65-270_01\GPU\nBodies_1_1_0_10000_0_100000_0.txt int main(int argc, const char** argv) { cout << "Solaris.NBody.Cuda.Test main.cu started" << endl; device_query(argc, argv); time_t start = time(NULL); // Integrate the pp_disk ode try { options opt(argc, argv); pp_disk* ppd = opt.create_pp_disk(); integrator* intgr = opt.create_integrator(ppd); ttt_t pp = 0; ttt_t ps = 0; ttt_t dt = 0; ostream* positionsf = 0; ostream* orbelemf = 0; //ostream* collisionsf= 0; int pcount = 0; int ccount = 0; if (!opt.printoutToFile) { positionsf = &cout; orbelemf = &cout; //collisionsf = &cerr; } else { //collisionsf = new ofstream(combine_path(opt.printoutDir, "col.txt").c_str()); //positionsf = new ofstream(get_printout_file(opt, pcount++).c_str()); string filename = get_filename_without_ext(opt.filename) + '.' + intgr->get_name() + '.' + (opt.gasDisk == 0 ? "" : "gas.CONSTANT."); string filenameWithExt = filename + get_extension(opt.filename); string path = combine_path(opt.printoutDir, filenameWithExt); //char *c_path = new char[path.length() + 1]; //strcpy(c_path, path.c_str()); positionsf = new ofstream(path.c_str(), ios::app); //filenameWithExt = filename + "oe." + get_extension(opt.filename); //orbelemf = new ofstream(combine_path(opt.printoutDir, filenameWithExt), std::ios::app); } while (ppd->t < opt.stop_time) { if (opt.printout) { if (pp >= opt.printoutPeriod) { pp = 0; } // Start of a print-out period, create new file if necessary if (pp == 0 && intgr->get_n_step() > 0) { var_t avg_dt = (ppd->t - opt.start_time) / intgr->get_n_step(); cout << intgr->get_n_failed_step() << " step(s) failed out of " << intgr->get_n_step() << " steps until " << ppd->t << " [day]\naverage dt: " << setprecision(10) << setw(16) << avg_dt << " [d]" << endl; cerr << setprecision(5) << setw(6) << ((ppd->t - opt.start_time)/opt.stop_time*100) << " %" << endl; } //var_t avg_dt = (ppd->t - opt.start_time) / intgr->get_n_step(); //cout << intgr->get_n_failed_step() << " step(s) failed out of " << intgr->get_n_step() << " steps until " << ppd->t << " [day]\naverage dt: " << setprecision(10) << setw(16) << avg_dt << " [d]" << endl; //cerr << setprecision(5) << setw(6) << ((ppd->t - opt.start_time)/opt.stop_time*100) << " %" << endl; if (0 <= pp && pp <= opt.printoutLength) { if (ps >= opt.printoutStep) { ps = 0; } if (ps == 0) { // Print out positions ppd->copy_to_host(); ppd->print_positions(*positionsf); //pp_disk::h_orbelem_t orbelem = ppd->calculate_orbelem(0); //ppd->print_orbelem(*orbelemf); } } } dt = intgr->step(); cerr << "t: " << setw(15) << (ppd->t) << ", dt: " << setw(15) << dt << " [d]" << endl; pp += dt; ps += dt; } delete ppd; delete intgr; delete positionsf; delete orbelemf; } /* try */ catch (nbody_exception& ex) { cerr << "Error: " << ex.what() << endl; } cout << "Total time: " << time(NULL) - start << " s" << endl; return 0; } #endif
363016ed9e47e0809130cc76f55f735e186bd190.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Array with backwards dependencies. Order Violation. Data Race in line 37. Inter Region Data Race. */ #include <stdio.h> // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ hipError_t cuErr = call; \ if(hipSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\ exit(0); \ } \ }while(0) // Grid dimension #define B 100 // Iterations per block #define T 512 // Host pointer int *countervar; // Initialization int init(){ for(int i=0; i<B*T; i++){ countervar[i]=0; } return 0; } // Kernel __global__ void count(int *countervar){ for(int i = blockIdx.x * T; i < blockIdx.x * T + T; i++){ if(i!=0){ countervar[i] = countervar[i-1] + 1; } } } // Verifying result int check(){ bool test = false; for(int i=0; i<B*T; i++){ if(countervar[i]!=i){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } // Main program int main(){ // Device pointer for counter variable int *d_count; // Allocation of host counter variable countervar = (int *) malloc(B*T*sizeof(int)); // Initialization of the counter variable init(); // Allocation of GPU memory cudaErrorCheck( hipMalloc(&d_count, B*T*sizeof(int))); // Copying the counter variable from the host to the device cudaErrorCheck( hipMemcpy(d_count,countervar,B*T*sizeof(int),hipMemcpyHostToDevice)); //Launch Kernel hipLaunchKernelGGL(( count), dim3(B),dim3(1), 0, 0, d_count); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( hipGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( hipDeviceSynchronize()); // Copying the counter variable from the device to the host cudaErrorCheck( hipMemcpy(countervar,d_count,B*T*sizeof(int),hipMemcpyDeviceToHost)); // Verifying result check(); // Freeing GPU memory cudaErrorCheck( hipFree(d_count)); // Freeing CPU memory free(countervar); return 0; }
363016ed9e47e0809130cc76f55f735e186bd190.cu
/* Array with backwards dependencies. Order Violation. Data Race in line 37. Inter Region Data Race. */ #include <stdio.h> // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ cudaError_t cuErr = call; \ if(cudaSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\ exit(0); \ } \ }while(0) // Grid dimension #define B 100 // Iterations per block #define T 512 // Host pointer int *countervar; // Initialization int init(){ for(int i=0; i<B*T; i++){ countervar[i]=0; } return 0; } // Kernel __global__ void count(int *countervar){ for(int i = blockIdx.x * T; i < blockIdx.x * T + T; i++){ if(i!=0){ countervar[i] = countervar[i-1] + 1; } } } // Verifying result int check(){ bool test = false; for(int i=0; i<B*T; i++){ if(countervar[i]!=i){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } // Main program int main(){ // Device pointer for counter variable int *d_count; // Allocation of host counter variable countervar = (int *) malloc(B*T*sizeof(int)); // Initialization of the counter variable init(); // Allocation of GPU memory cudaErrorCheck( cudaMalloc(&d_count, B*T*sizeof(int))); // Copying the counter variable from the host to the device cudaErrorCheck( cudaMemcpy(d_count,countervar,B*T*sizeof(int),cudaMemcpyHostToDevice)); //Launch Kernel count<<<B,1>>>(d_count); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( cudaGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( cudaDeviceSynchronize()); // Copying the counter variable from the device to the host cudaErrorCheck( cudaMemcpy(countervar,d_count,B*T*sizeof(int),cudaMemcpyDeviceToHost)); // Verifying result check(); // Freeing GPU memory cudaErrorCheck( cudaFree(d_count)); // Freeing CPU memory free(countervar); return 0; }
c629ab5cb6bddfb3bf5adfc362a35d97c0cb4f53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ctranslate2/ops/dequantize.h" #include "cuda/helpers.h" namespace ctranslate2 { namespace ops { template <typename InT, typename OutT> struct dequantize_func { __device__ __forceinline__ OutT operator()(float scale, InT x) const { return __fdividef(static_cast<float>(x), scale); } }; template <Device D, typename InT, typename OutT> void Dequantize::dequantize(const StorageView& input, const StorageView& scale, StorageView& output) const { const dim_t depth = input.dim(-1); cuda::binary_transform(scale.data<float>(), input.data<InT>(), output.data<OutT>(), input.size(), dequantize_func<InT, cuda::device_type<OutT>>(), cuda::repeat_vec_depth<dim_t>(depth)); } template void Dequantize::dequantize<Device::CUDA, int8_t, float>(const StorageView&, const StorageView&, StorageView&) const; template void Dequantize::dequantize<Device::CUDA, int8_t, float16_t>(const StorageView&, const StorageView&, StorageView&) const; template <typename Epilogue, typename T> __global__ void dequantize_gemm_output_kernel(const int32_t* c, const float* a_scales, const float* b_scales, const bool transpose_a, const bool transpose_b, const T* bias, const Epilogue& epilogue, T* y, dim_t depth) { // y = c / (expand_dims(a_scales, trans_a ? 0 : 1) * expand_dims(b_scales, trans_b ? 0 : 1) // if bias: y += expand_dims(bias, 0) // y = epilogue(y) const auto add_func = cuda::plus<T>(); const auto rescale_func = dequantize_func<int32_t, T>(); const dim_t i = blockIdx.x; for (dim_t j = threadIdx.x; j < depth; j += blockDim.x) { const dim_t index = i * depth + j; const float scale = a_scales[transpose_a ? j : i] * b_scales[transpose_b ? j : i]; T v = rescale_func(scale, c[index]); if (bias) v = add_func(v, bias[j]); y[index] = epilogue(v); } } template <typename T> static void dequantize_gemm_output_kernel_wrapper(const int32_t* c, const float* a_scales, const float* b_scales, const bool transpose_a, const bool transpose_b, const T* bias, const ActivationType* activation_type, T* y, dim_t batch_size, dim_t depth) { const dim_t blocks = ::min(batch_size, cuda::max_blocks); const dim_t threads = ::min(depth, cuda::max_threads); if (!activation_type) { hipLaunchKernelGGL(( dequantize_gemm_output_kernel), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(), c, a_scales, b_scales, transpose_a, transpose_b, bias, thrust::identity<T>(), y, depth); } else { switch (*activation_type) { case ActivationType::ReLU: { hipLaunchKernelGGL(( dequantize_gemm_output_kernel), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(), c, a_scales, b_scales, transpose_a, transpose_b, bias, cuda::relu_func<T>(), y, depth); break; } case ActivationType::GELU: { hipLaunchKernelGGL(( dequantize_gemm_output_kernel), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(), c, a_scales, b_scales, transpose_a, transpose_b, bias, cuda::gelu_func<T>(), y, depth); break; } } } } template <Device D, typename T> void Dequantize::dequantize_gemm_output(const StorageView& c, const StorageView& a_scale, const StorageView& b_scale, const bool transpose_a, const bool transpose_b, const StorageView* bias, StorageView& y) const { const dim_t batch_size = a_scale.size(); const dim_t depth = c.dim(-1); dequantize_gemm_output_kernel_wrapper( c.data<int32_t>(), a_scale.data<float>(), b_scale.data<float>(), transpose_a, transpose_b, bias ? cuda::device_cast<T>(bias->data<T>()) : nullptr, _activation_type, cuda::device_cast<T>(y.data<T>()), batch_size, depth); } template void Dequantize::dequantize_gemm_output<Device::CUDA, float>(const StorageView&, const StorageView&, const StorageView&, const bool, const bool, const StorageView*, StorageView&) const; template void Dequantize::dequantize_gemm_output<Device::CUDA, float16_t>(const StorageView&, const StorageView&, const StorageView&, const bool, const bool, const StorageView*, StorageView&) const; } }
c629ab5cb6bddfb3bf5adfc362a35d97c0cb4f53.cu
#include "ctranslate2/ops/dequantize.h" #include "cuda/helpers.h" namespace ctranslate2 { namespace ops { template <typename InT, typename OutT> struct dequantize_func { __device__ __forceinline__ OutT operator()(float scale, InT x) const { return __fdividef(static_cast<float>(x), scale); } }; template <Device D, typename InT, typename OutT> void Dequantize::dequantize(const StorageView& input, const StorageView& scale, StorageView& output) const { const dim_t depth = input.dim(-1); cuda::binary_transform(scale.data<float>(), input.data<InT>(), output.data<OutT>(), input.size(), dequantize_func<InT, cuda::device_type<OutT>>(), cuda::repeat_vec_depth<dim_t>(depth)); } template void Dequantize::dequantize<Device::CUDA, int8_t, float>(const StorageView&, const StorageView&, StorageView&) const; template void Dequantize::dequantize<Device::CUDA, int8_t, float16_t>(const StorageView&, const StorageView&, StorageView&) const; template <typename Epilogue, typename T> __global__ void dequantize_gemm_output_kernel(const int32_t* c, const float* a_scales, const float* b_scales, const bool transpose_a, const bool transpose_b, const T* bias, const Epilogue& epilogue, T* y, dim_t depth) { // y = c / (expand_dims(a_scales, trans_a ? 0 : 1) * expand_dims(b_scales, trans_b ? 0 : 1) // if bias: y += expand_dims(bias, 0) // y = epilogue(y) const auto add_func = cuda::plus<T>(); const auto rescale_func = dequantize_func<int32_t, T>(); const dim_t i = blockIdx.x; for (dim_t j = threadIdx.x; j < depth; j += blockDim.x) { const dim_t index = i * depth + j; const float scale = a_scales[transpose_a ? j : i] * b_scales[transpose_b ? j : i]; T v = rescale_func(scale, c[index]); if (bias) v = add_func(v, bias[j]); y[index] = epilogue(v); } } template <typename T> static void dequantize_gemm_output_kernel_wrapper(const int32_t* c, const float* a_scales, const float* b_scales, const bool transpose_a, const bool transpose_b, const T* bias, const ActivationType* activation_type, T* y, dim_t batch_size, dim_t depth) { const dim_t blocks = std::min(batch_size, cuda::max_blocks); const dim_t threads = std::min(depth, cuda::max_threads); if (!activation_type) { dequantize_gemm_output_kernel<<<blocks, threads, 0, cuda::get_cuda_stream()>>>( c, a_scales, b_scales, transpose_a, transpose_b, bias, thrust::identity<T>(), y, depth); } else { switch (*activation_type) { case ActivationType::ReLU: { dequantize_gemm_output_kernel<<<blocks, threads, 0, cuda::get_cuda_stream()>>>( c, a_scales, b_scales, transpose_a, transpose_b, bias, cuda::relu_func<T>(), y, depth); break; } case ActivationType::GELU: { dequantize_gemm_output_kernel<<<blocks, threads, 0, cuda::get_cuda_stream()>>>( c, a_scales, b_scales, transpose_a, transpose_b, bias, cuda::gelu_func<T>(), y, depth); break; } } } } template <Device D, typename T> void Dequantize::dequantize_gemm_output(const StorageView& c, const StorageView& a_scale, const StorageView& b_scale, const bool transpose_a, const bool transpose_b, const StorageView* bias, StorageView& y) const { const dim_t batch_size = a_scale.size(); const dim_t depth = c.dim(-1); dequantize_gemm_output_kernel_wrapper( c.data<int32_t>(), a_scale.data<float>(), b_scale.data<float>(), transpose_a, transpose_b, bias ? cuda::device_cast<T>(bias->data<T>()) : nullptr, _activation_type, cuda::device_cast<T>(y.data<T>()), batch_size, depth); } template void Dequantize::dequantize_gemm_output<Device::CUDA, float>(const StorageView&, const StorageView&, const StorageView&, const bool, const bool, const StorageView*, StorageView&) const; template void Dequantize::dequantize_gemm_output<Device::CUDA, float16_t>(const StorageView&, const StorageView&, const StorageView&, const bool, const bool, const StorageView*, StorageView&) const; } }
638f693062dd8a371eed54f8bc72487ff38256c9.hip
// !!! This is a file automatically generated by hipify!!! /** * NASA Advanced Supercomputing Parallel Benchmarks C++ * * based on NPB 3.3.1 * * original version and technical report: * http://www.nas.nasa.gov/Software/NPB/ * * Authors: * E. Barszcz * P. Frederickson * A. Woo * M. Yarrow * * C++ version: * Dalvan Griebler <dalvangriebler@gmail.com> * Jnior Lff <loffjh@gmail.com> * Gabriell Araujo <hexenoften@gmail.com> * * CUDA version: * Gabriell Araujo <hexenoften@gmail.com> */ /* NO CAST VERSION 2 */ #include <hip/hip_runtime.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" #define NM (2+(1<<LM)) /* actual dimension including ghost cells for communications */ #define NV (ONE*(2+(1<<NDIM1))*(2+(1<<NDIM2))*(2+(1<<NDIM3))) /* size of rhs array */ #define NR (((NV+NM*NM+5*NM+7*LM+6)/7)*8) /* size of residual array */ #define MAXLEVEL (LT_DEFAULT+1) /* maximum number of levels */ #define M (NM+1) /* set at m=1024, can handle cases up to 1024^3 case */ #define MM (10) #define A (pow(5.0,13.0)) #define X (314159265.0) #define T_INIT (0) #define T_BENCH (1) #define T_MG3P (2) #define T_PSINV (3) #define T_RESID (4) #define T_RESID2 (5) #define T_RPRJ3 (6) #define T_INTERP (7) #define T_NORM2 (8) #define T_COMM3 (9) #define T_LAST (10) #define THREADS_PER_BLOCK (1024) //1024 #define THREADS_PER_BLOCK_ON_NORM2U3 (128) //128 #define THREADS_PER_BLOCK_ON_COMM3 (32) //32 #define THREADS_PER_BLOCK_ON_ZERO3 (1024) //1024 //#define SHARED_2_M (2*M*sizeof(double)) //#define SHARED_3_M (3*M*sizeof(double)) //#define SHARED_2_NORM (2*THREADS_PER_BLOCK_ON_NORM2U3*sizeof(double)) /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static int nx[MAXLEVEL+1]; static int ny[MAXLEVEL+1]; static int nz[MAXLEVEL+1]; static int m1[MAXLEVEL+1]; static int m2[MAXLEVEL+1]; static int m3[MAXLEVEL+1]; static int ir[MAXLEVEL+1]; static int debug_vec[8]; static double u[NR]; static double v[NV]; static double r[NR]; #else static int (*nx)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*ny)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*nz)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*m1)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*m2)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*m3)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*ir)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*debug_vec)=(int*)malloc(sizeof(int)*(8)); static double (*u)=(double*)malloc(sizeof(double)*(NR)); static double (*v)=(double*)malloc(sizeof(double)*(NV)); static double (*r)=(double*)malloc(sizeof(double)*(NR)); #endif static int is1, is2, is3, ie1, ie2, ie3, lt, lb; static boolean timeron; /* gpu variables */ int threads_per_block; int blocks_per_grid; int amount_of_work; size_t size_a_device; size_t size_c_device; size_t size_u_device; size_t size_v_device; size_t size_r_device; double* a_device; double* c_device; double* u_device; double* v_device; double* r_device; //extern __shared__ double extern_share_data[]; /* function prototypes */ static void bubble(double ten[][MM], int j1[][MM], int j2[][MM], int j3[][MM], int m, int ind); static void comm3(void* pointer_u, int n1, int n2, int n3, int kk); static void comm3_gpu(double* u_device, int n1, int n2, int n3, int kk); __global__ void comm3_gpu_kernel_1(double* u, int n1, int n2, int n3, int amount_of_work); __global__ void comm3_gpu_kernel_2(double* u, int n1, int n2, int n3, int amount_of_work); __global__ void comm3_gpu_kernel_3(double* u, int n1, int n2, int n3, int amount_of_work); static void interp(void* pointer_z, int mm1, int mm2, int mm3, void* pointer_u, int n1, int n2, int n3, int k); static void interp_gpu(double* z_device, int mm1, int mm2, int mm3, double* u_device, int n1, int n2, int n3, int k); __global__ void interp_gpu_kernel(double* base_z, double* base_u, int mm1, int mm2, int mm3, int n1, int n2, int n3, int amount_of_work); static void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3, int k); static void mg3P_gpu(double* u_device, double* v_device, double* r_device, double a[4], double c[4], int n1, int n2, int n3, int k); static void norm2u3(void* pointer_r, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz); static void norm2u3_gpu(double* r_device, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz); __global__ void norm2u3_gpu_kernel(double* r, const int n1, const int n2, const int n3, double* res_sum, double* res_max, int number_of_blocks, int amount_of_work); static double power(double a, int n); static void psinv(void* pointer_r, void* pointer_u, int n1, int n2, int n3, double c[4], int k); static void psinv_gpu(double* r_device, double* u_device, int n1, int n2, int n3, double* c_device, int k); __global__ void psinv_gpu_kernel(double* r, double* u, double* c, int n1, int n2, int n3, int amount_of_work); static void release_gpu(); static void rep_nrm(void* pointer_u, int n1, int n2, int n3, char* title, int kk); static void resid(void* pointer_u, void* pointer_v, void* pointer_r, int n1, int n2, int n3, double a[4], int k); static void resid_gpu(double* u_device, double* v_device, double* r_device, int n1, int n2, int n3, double* a_device, int k); __global__ void resid_gpu_kernel(double* r, double* u, double* v, double* a, int n1, int n2, int n3, int amount_of_work); static void rprj3(void* pointer_r, int m1k, int m2k, int m3k, void* pointer_s, int m1j, int m2j, int m3j, int k); static void rprj3_gpu(double* r_device, int m1k, int m2k, int m3k, double* s_device, int m1j, int m2j, int m3j, int k); __global__ void rprj3_gpu_kernel(double* base_r, double* base_s, int m1k, int m2k, int m3k, int m1j, int m2j, int m3j, int d1, int d2, int d3, int amount_of_work); static void setup(int* n1, int* n2, int* n3, int k); static void setup_gpu(double* a, double* c); static void showall(void* pointer_z, int n1, int n2, int n3); static void zero3_gpu(double* z_device, int n1, int n2, int n3); __global__ void zero3_gpu_kernel(double* z, int n1, int n2, int n3, int amount_of_work); static void zero3(void* pointer_z, int n1, int n2, int n3); static void zran3(void* pointer_z, int n1, int n2, int n3, int nx, int ny, int k); /* mg */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif /* * ------------------------------------------------------------------------- * k is the current level. it is passed down through subroutine args * and is not global. it is the current iteration * ------------------------------------------------------------------------- */ int k, it; double t, tinit, mflops; double a[4], c[4]; double rnm2, rnmu, epsilon; int n1, n2, n3, nit; double nn, verify_value, err; boolean verified; char class_npb; int i; char* t_names[T_LAST]; double tmax; for(i=T_INIT; i<T_LAST; i++){ timer_clear(i); } timer_start(T_INIT); /* * ---------------------------------------------------------------------- * read in and broadcast input data * ---------------------------------------------------------------------- */ FILE* fp; if((fp = fopen("timer.flag", "r")) != NULL){ timeron = TRUE; t_names[T_INIT] = (char*) "init"; t_names[T_BENCH] = (char*) "benchmk"; t_names[T_MG3P] = (char*) "mg3P"; t_names[T_PSINV] = (char*) "psinv"; t_names[T_RESID] = (char*) "resid"; t_names[T_RPRJ3] = (char*) "rprj3"; t_names[T_INTERP] = (char*) "interp"; t_names[T_NORM2] = (char*) "norm2"; t_names[T_COMM3] = (char*) "comm3"; fclose(fp); }else{ timeron = FALSE; } fp = fopen("mg.input", "r"); if(fp != NULL){ printf(" Reading from input file mg.input\n"); if(fscanf(fp, "%d", &lt) != 1){ printf(" Error in reading elements\n"); exit(1); } while(fgetc(fp) != '\n'); if(fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]) != 3){ printf(" Error in reading elements\n"); exit(1); } while(fgetc(fp) != '\n'); if(fscanf(fp, "%d", &nit) != 1){ printf(" Error in reading elements\n"); exit(1); } while(fgetc(fp) != '\n'); for(i = 0; i <= 7; i++) { if(fscanf(fp, "%d", &debug_vec[i]) != 1){ printf(" Error in reading elements\n"); exit(1); } } fclose(fp); }else{ printf(" No input file. Using compiled defaults\n"); lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; for(i = 0; i <= 7; i++){ debug_vec[i] = DEBUG_DEFAULT; } } if((nx[lt] != ny[lt]) || (nx[lt] != nz[lt])){ class_npb = 'U'; }else if(nx[lt] == 32 && nit == 4){ class_npb = 'S'; }else if(nx[lt] == 128 && nit == 4){ class_npb = 'W'; }else if(nx[lt] == 256 && nit == 4){ class_npb = 'A'; }else if(nx[lt] == 256 && nit == 20){ class_npb = 'B'; }else if(nx[lt] == 512 && nit == 20){ class_npb = 'C'; }else if(nx[lt] == 1024 && nit == 50){ class_npb = 'D'; }else if(nx[lt] == 2048 && nit == 50){ class_npb = 'E'; }else{ class_npb = 'U'; } /* * --------------------------------------------------------------------- * use these for debug info: * --------------------------------------------------------------------- * debug_vec(0) = 1 !=> report all norms * debug_vec(1) = 1 !=> some setup information * debug_vec(1) = 2 !=> more setup information * debug_vec(2) = k => at level k or below, show result of resid * debug_vec(3) = k => at level k or below, show result of psinv * debug_vec(4) = k => at level k or below, show result of rprj * debug_vec(5) = k => at level k or below, show result of interp * debug_vec(6) = 1 => (unused) * debug_vec(7) = 1 => (unused) * --------------------------------------------------------------------- */ a[0] = -8.0/3.0; a[1] = 0.0; a[2] = 1.0/6.0; a[3] = 1.0/12.0; if(class_npb == 'A' || class_npb == 'S' || class_npb =='W'){ /* coefficients for the s(a) smoother */ c[0] = -3.0/8.0; c[1] = +1.0/32.0; c[2] = -1.0/64.0; c[3] = 0.0; }else{ /* coefficients for the s(b) smoother */ c[0] = -3.0/17.0; c[1] = +1.0/33.0; c[2] = -1.0/61.0; c[3] = 0.0; } lb = 1; k = lt; setup(&n1,&n2,&n3,k); zero3(u,n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],k); norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - MG Benchmark\n\n"); printf(" Size: %3dx%3dx%3d (class_npb %1c)\n", nx[lt], ny[lt], nz[lt], class_npb); printf(" Iterations: %3d\n", nit); resid(u,v,r,n1,n2,n3,a,k); norm2u3(r,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /* * --------------------------------------------------------------------- * one iteration for startup * --------------------------------------------------------------------- */ mg3P(u,v,r,a,c,n1,n2,n3,k); resid(u,v,r,n1,n2,n3,a,k); setup(&n1,&n2,&n3,k); zero3(u,n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],k); timer_stop(T_INIT); tinit = timer_read(T_INIT); printf(" Initialization time: %15.3f seconds\n", tinit); for(i=T_BENCH; i<T_LAST; i++){ timer_clear(i); } setup_gpu(a,c); timer_start(T_BENCH); if(timeron){timer_start(T_RESID2);} resid_gpu(u_device,v_device,r_device,n1,n2,n3,a_device,k); if(timeron){timer_stop(T_RESID2);} norm2u3_gpu(r_device,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); for(it = 1; it <= nit; it++){ //if((it==1)||(it==nit)||((it%5)==0)){printf(" iter %3d\n",it);} if(timeron){timer_start(T_MG3P);} mg3P_gpu(u_device,v_device,r_device,a_device,c_device,n1,n2,n3,k); if(timeron){timer_stop(T_MG3P);} if(timeron){timer_start(T_RESID2);} resid_gpu(u_device,v_device,r_device,n1,n2,n3,a_device,k); if(timeron){timer_stop(T_RESID2);} } norm2u3_gpu(r_device,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); timer_stop(T_BENCH); t = timer_read(T_BENCH); verified = FALSE; verify_value = 0.0; printf(" Benchmark completed\n"); epsilon = 1.0e-8; if(class_npb != 'U'){ if(class_npb == 'S'){ verify_value = 0.5307707005734e-04; }else if(class_npb == 'W'){ verify_value = 0.6467329375339e-05; }else if(class_npb == 'A'){ verify_value = 0.2433365309069e-05; }else if(class_npb == 'B'){ verify_value = 0.1800564401355e-05; }else if(class_npb == 'C'){ verify_value = 0.5706732285740e-06; }else if(class_npb == 'D'){ verify_value = 0.1583275060440e-09; }else if(class_npb == 'E'){ verify_value = 0.8157592357404e-10; } err = fabs(rnm2-verify_value) / verify_value; if(err <= epsilon){ verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.13e\n", rnm2); printf(" Error is %20.13e\n", err); }else{ verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.13e\n", rnm2); printf(" The correct L2 Norm is %20.13e\n", verify_value); } }else{ verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } nn = 1.0*nx[lt]*ny[lt]*nz[lt]; if(t!=0.0){ mflops = 58.0*nit*nn*1.0e-6/t; }else{ mflops = 0.0; } c_print_results((char*)"MG", class_npb, nx[lt], ny[lt], nz[lt], nit, t, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); /* * --------------------------------------------------------------------- * more timers * --------------------------------------------------------------------- */ if(timeron){ tmax = timer_read(T_BENCH); if(tmax==0.0){tmax=1.0;} printf(" SECTION Time (secs)\n"); for(i=T_BENCH; i<T_LAST; i++){ t = timer_read(i); if(i==T_RESID2){ t = timer_read(T_RESID) - t; printf(" --> %8s:%9.3f (%6.2f%%)\n", "mg-resid", t, t*100.0/tmax); }else{ printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100.0/tmax); } } } release_gpu(); return 0; } /* * --------------------------------------------------------------------- * bubble does a bubble sort in direction dir * --------------------------------------------------------------------- */ static void bubble(double ten[][MM], int j1[][MM], int j2[][MM], int j3[][MM], int m, int ind){ double temp; int i, j_temp; if(ind == 1){ for(i = 0; i < m-1; i++){ if(ten[ind][i] > ten[ind][i+1]){ temp = ten[ind][i+1]; ten[ind][i+1] = ten[ind][i]; ten[ind][i] = temp; j_temp = j1[ind][i+1]; j1[ind][i+1] = j1[ind][i]; j1[ind][i] = j_temp; j_temp = j2[ind][i+1]; j2[ind][i+1] = j2[ind][i]; j2[ind][i] = j_temp; j_temp = j3[ind][i+1]; j3[ind][i+1] = j3[ind][i]; j3[ind][i] = j_temp; }else{ return; } } }else{ for(i = 0; i < m-1; i++){ if(ten[ind][i] < ten[ind][i+1]){ temp = ten[ind][i+1]; ten[ind][i+1] = ten[ind][i]; ten[ind][i] = temp; j_temp = j1[ind][i+1]; j1[ind][i+1] = j1[ind][i]; j1[ind][i] = j_temp; j_temp = j2[ind][i+1]; j2[ind][i+1] = j2[ind][i]; j2[ind][i] = j_temp; j_temp = j3[ind][i+1]; j3[ind][i+1] = j3[ind][i]; j3[ind][i] = j_temp; }else{ return; } } } } /* * --------------------------------------------------------------------- * comm3 organizes the communication on all borders * --------------------------------------------------------------------- */ static void comm3(void* pointer_u, int n1, int n2, int n3, int kk){ //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; double* pointer_aux_u = (double*)pointer_u; int i1, i2, i3; if(timeron){timer_start(T_COMM3);} /* axis = 1 */ for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ //u[i3][i2][0] = u[i3][i2][n1-2]; pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (0)] = u[(i3)*n1*n2 + (i2)*n1 + (n1-2)]; //u[i3][i2][n1-1] = u[i3][i2][1]; pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (n1-1)] = pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (1)]; } } /* axis = 2 */ for(i3 = 1; i3 < n3-1; i3++){ for(i1 = 0; i1 < n1; i1++){ //u[i3][0][i1] = u[i3][n2-2][i1]; pointer_aux_u[(i3)*n1*n2 + (0)*n1 + (i1)] = pointer_aux_u[(i3)*n1*n2 + (n2-2)*n1 + (i1)]; //u[i3][n2-1][i1] = u[i3][1][i1]; pointer_aux_u[(i3)*n1*n2 + (n2-1)*n1 + (i1)] = pointer_aux_u[(i3)*n1*n2 + (1)*n1 + (i1)]; } } /* axis = 3 */ for(i2 = 0; i2 < n2; i2++){ for(i1 = 0; i1 < n1; i1++){ //u[0][i2][i1] = u[n3-2][i2][i1]; pointer_aux_u[(0)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_u[(n3-2)*n1*n2 + (i2)*n1 + (i1)]; //u[n3-1][i2][i1] = u[1][i2][i1]; pointer_aux_u[(n3-1)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_u[(1)*n1*n2 + (i2)*n1 + (i1)]; } } if(timeron){timer_stop(T_COMM3);} } static void comm3_gpu(double* u_device, int n1, int n2, int n3, int kk){ if(timeron){timer_start(T_COMM3);} int threads_per_block = THREADS_PER_BLOCK_ON_COMM3; int amount_of_work = (n3-2) * THREADS_PER_BLOCK_ON_COMM3; int blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); hipLaunchKernelGGL(( comm3_gpu_kernel_1), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0, u_device, n1, n2, n3, amount_of_work); hipDeviceSynchronize(); threads_per_block = THREADS_PER_BLOCK_ON_COMM3; amount_of_work = (n3-2) * THREADS_PER_BLOCK_ON_COMM3; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); hipLaunchKernelGGL(( comm3_gpu_kernel_2), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0, u_device, n1, n2, n3, amount_of_work); hipDeviceSynchronize(); threads_per_block = THREADS_PER_BLOCK_ON_COMM3; amount_of_work = n2 * THREADS_PER_BLOCK_ON_COMM3; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); hipLaunchKernelGGL(( comm3_gpu_kernel_3), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0, u_device, n1, n2, n3, amount_of_work); hipDeviceSynchronize(); if(timeron){timer_stop(T_COMM3);} } __global__ void comm3_gpu_kernel_1(double* u, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i3=blockIdx.x+1; int i2=threadIdx.x+1; while(i2<n2-1){ u[i3*n2*n1+i2*n1+0]=u[i3*n2*n1+i2*n1+n1-2]; u[i3*n2*n1+i2*n1+n1-1]=u[i3*n2*n1+i2*n1+1]; i2+=THREADS_PER_BLOCK_ON_COMM3; } } __global__ void comm3_gpu_kernel_2(double* u, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i3=blockIdx.x + 1; int i1=threadIdx.x; while(i1<n1){ u[i3*n2*n1+0*n1+i1]=u[i3*n2*n1+(n2-2)*n1+i1]; u[i3*n2*n1+(n2-1)*n1+i1]=u[i3*n2*n1+1*n1+i1]; i1+=THREADS_PER_BLOCK_ON_COMM3; } } __global__ void comm3_gpu_kernel_3(double* u, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i2=blockIdx.x; int i1=threadIdx.x; while(i1<n1){ u[0*n2*n1+i2*n1+i1]=u[(n3-2)*n2*n1+i2*n1+i1]; u[(n3-1)*n2*n1+i2*n1+i1]=u[1*n2*n1+i2*n1+i1]; i1+=THREADS_PER_BLOCK_ON_COMM3; } } /* * -------------------------------------------------------------------- * interp adds the trilinear interpolation of the correction * from the coarser grid to the current approximation: u = u + Qu' * * observe that this implementation costs 16A + 4M, where * A and M denote the costs of addition and multiplication. * note that this vectorizes, and is also fine for cache * based machines. vector machines may get slightly better * performance however, with 8 separate "do i1" loops, rather than 4. * -------------------------------------------------------------------- */ static void interp(void* pointer_z, int mm1, int mm2, int mm3, void* pointer_u, int n1, int n2, int n3, int k){ //double (*z)[mm2][mm1] = (double (*)[mm2][mm1])pointer_z; //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; double* pointer_aux_z = (double*)pointer_z; double* pointer_aux_u = (double*)pointer_u; int i3, i2, i1, d1, d2, d3, t1, t2, t3; /* * -------------------------------------------------------------------- * note that m = 1037 in globals.h but for this only need to be * 535 to handle up to 1024^3 * integer m * parameter( m=535 ) * -------------------------------------------------------------------- */ double z1[M], z2[M], z3[M]; if(timeron){timer_start(T_INTERP);} if(n1 != 3 && n2 != 3 && n3 != 3){ for(i3 = 0; i3 < mm3-1; i3++){ for(i2 = 0; i2 < mm2-1; i2++){ for(i1 = 0; i1 < mm1; i1++){ //z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1]; z1[i1] = pointer_aux_z[(i3)*mm1*mm2 + (i2+1)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]; //z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1]; z2[i1] = pointer_aux_z[(i3+1)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]; //z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1]; z3[i1] = pointer_aux_z[(i3+1)*mm1*mm2 + (i2+1)*mm1 + (i1)] + pointer_aux_z[(i3+1)*mm1*mm2 + (i2)*mm1 + (i1)] + z1[i1]; } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1] + z[i3][i2][i1]; pointer_aux_u[(2*i3)*n1*n2 +(2*i2)*n1 + (2*i1)] = pointer_aux_u[(2*i3)*n1*n2 +(2*i2)*n1 + (2*i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]; //u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1] + 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]); pointer_aux_u[(2*i3)*n1*n2 + (2*i2)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3)*n1*n2 + (2*i2)*n1 + (2*i1+1)] + 0.5 * (pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1+1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]); } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1] + 0.5 * z1[i1]; pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1)] = pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1)] + 0.5 * z1[i1]; //u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1] + 0.25 * ( z1[i1] + z1[i1+1] ); pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] + 0.25 * ( z1[i1] + z1[i1+1] ); } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1] + 0.5 * z2[i1]; pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1)] + 0.5 * z2[i1]; //u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1] + 0.25 * ( z2[i1] + z2[i1+1] ); pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1+1)] + 0.25 * ( z2[i1] + z2[i1+1] ); } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1] + 0.25 * z3[i1]; pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1)] + 0.25 * z3[i1]; //u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1] + 0.125 * ( z3[i1] + z3[i1+1] ); pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] + 0.125 * ( z3[i1] + z3[i1+1] ); } } } }else{ if(n1 == 3){ d1 = 2; t1 = 1; }else{ d1 = 1; t1 = 0; } if(n2 == 3){ d2 = 2; t2 = 1; }else{ d2 = 1; t2 = 0; } if(n3 == 3){ d3 = 2; t3 = 1; }else{ d3 = 1; t3 = 0; } for(i3 = d3; i3 <= mm3-1; i3++){ for(i2 = d2; i2 <= mm2-1; i2++){ for(i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] + z[i3-1][i2-1][i1-1]; pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]; } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] + 0.5 * (z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] + 0.5 * (pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } for(i2 = 1; i2 <= mm2-1; i2++){ for( i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] + 0.5 * (z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] + 0.5 * (pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] + 0.25 * (z[i3-1][i2][i1] + z[i3-1][i2-1][i1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] + 0.25 * (pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } } for(i3 = 1; i3 <= mm3-1; i3++){ for(i2 = d2; i2 <= mm2-1; i2++){ for(i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] + 0.5 * (z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] + 0.5 * (pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] + 0.25 * (z[i3][i2-1][i1] + z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] + 0.25 * (pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } for(i2 = 1; i2 <= mm2-1; i2++){ for (i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] + 0.25 * (z[i3][i2][i1-1] + z[i3][i2-1][i1-1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] + 0.25 * (pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] + 0.125 * (z[i3][i2][i1] + z[i3][i2-1][i1] + z[i3][i2][i1-1] + z[i3][i2-1][i1-1] + z[i3-1][i2][i1] + z[i3-1][i2-1][i1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] + 0.125 * (pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } } } if(timeron){timer_stop(T_INTERP);} if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_z,mm1,mm2,mm3,(char*)"z: inter",k-1); rep_nrm(pointer_aux_u,n1,n2,n3,(char*)"u: inter",k); } if(debug_vec[5] >= k){ showall(pointer_aux_z,mm1,mm2,mm3); showall(pointer_aux_u,n1,n2,n3); } } static void interp_gpu(double* z_device, int mm1, int mm2, int mm3, double* u_device, int n1, int n2, int n3, int k){ if(timeron){timer_start(T_INTERP);} if(n1 != 3 && n2 != 3 && n3 != 3){ threads_per_block = mm1; amount_of_work = (mm3-1) * (mm2-1) * mm1; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); hipLaunchKernelGGL(( interp_gpu_kernel), dim3(blocks_per_grid), threads_per_block //,SHARED_3_M , 0, 0, z_device, u_device, mm1, mm2, mm3, n1, n2, n3, amount_of_work); hipDeviceSynchronize(); } if(timeron){timer_stop(T_INTERP);} } __global__ void interp_gpu_kernel(double* base_z, double* base_u, int mm1, int mm2, int mm3, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i3,i2,i1; __shared__ double z1[M],z2[M],z3[M]; //double* z1 = (double*)(extern_share_data); //double* z2 = (double*)(&z1[M]); //double* z3 = (double*)(&z2[M]); double (*z)=base_z; double (*u)=base_u; i3=blockIdx.x/(mm2-1); i2=blockIdx.x%(mm2-1); i1=threadIdx.x; z1[i1]=z[i3*mm2*mm1+(i2+1)*mm1+i1]+z[i3*mm2*mm1+i2*mm1+i1]; z2[i1]=z[(i3+1)*mm2*mm1+i2*mm1+i1]+z[i3*mm2*mm1+i2*mm1+i1]; z3[i1]=z[(i3+1)*mm2*mm1+(i2+1)*mm1+i1] +z[(i3+1)*mm2*mm1+i2*mm1+i1]+z1[i1]; __syncthreads(); if(i1<mm1-1){ double z321=z[i3*mm2*mm1+i2*mm1+i1]; u[2*i3*n2*n1+2*i2*n1+2*i1]+=z321; u[2*i3*n2*n1+2*i2*n1+2*i1+1]+=0.5*(z[i3*mm2*mm1+i2*mm1+i1+1]+z321); u[2*i3*n2*n1+(2*i2+1)*n1+2*i1]+=0.5*z1[i1]; u[2*i3*n2*n1+(2*i2+1)*n1+2*i1+1]+=0.25*(z1[i1]+z1[i1+1]); u[(2*i3+1)*n2*n1+2*i2*n1+2*i1]+=0.5*z2[i1]; u[(2*i3+1)*n2*n1+2*i2*n1+2*i1+1]+=0.25*(z2[i1]+z2[i1+1]); u[(2*i3+1)*n2*n1+(2*i2+1)*n1+2*i1]+=0.25*z3[i1]; u[(2*i3+1)*n2*n1+(2*i2+1)*n1+2*i1+1]+=0.125*(z3[i1]+z3[i1+1]); } } /* * -------------------------------------------------------------------- * multigrid v-cycle routine * -------------------------------------------------------------------- */ static void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3, int k){ int j; /* * -------------------------------------------------------------------- * down cycle. * restrict the residual from the find grid to the coarse * ------------------------------------------------------------------- */ for(k = lt; k >= lb+1; k--){ j = k-1; rprj3(&r[ir[k]], m1[k], m2[k], m3[k], &r[ir[j]], m1[j], m2[j], m3[j], k); } k = lb; /* * -------------------------------------------------------------------- * compute an approximate solution on the coarsest grid * -------------------------------------------------------------------- */ zero3(&u[ir[k]], m1[k], m2[k], m3[k]); psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); for(k = lb+1; k <= lt-1; k++){ j = k-1; /* * -------------------------------------------------------------------- * prolongate from level k-1 to k * ------------------------------------------------------------------- */ zero3(&u[ir[k]], m1[k], m2[k], m3[k]); interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k); /* * -------------------------------------------------------------------- * compute residual for level k * -------------------------------------------------------------------- */ resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k); /* * -------------------------------------------------------------------- * apply smoother * -------------------------------------------------------------------- */ psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k); resid(u, v, r, n1, n2, n3, a, k); psinv(r, u, n1, n2, n3, c, k); } static void mg3P_gpu(double* u_device, double* v_device, double* r_device, double* a_device, double* c_device, int n1, int n2, int n3, int k){ int j; /* * -------------------------------------------------------------------- * down cycle. * restrict the residual from the find grid to the coarse * ------------------------------------------------------------------- */ for(k = lt; k >= lb+1; k--){ j = k-1; rprj3_gpu(r_device+ir[k], m1[k], m2[k], m3[k], r_device+ir[j], m1[j], m2[j], m3[j], k); } k = lb; /* * -------------------------------------------------------------------- * compute an approximate solution on the coarsest grid * -------------------------------------------------------------------- */ zero3_gpu(u_device+ir[k], m1[k], m2[k], m3[k]); psinv_gpu(r_device+ir[k], u_device+ir[k], m1[k], m2[k], m3[k], c_device, k); for(k = lb+1; k <= lt-1; k++){ j = k-1; /* * -------------------------------------------------------------------- * prolongate from level k-1 to k * ------------------------------------------------------------------- */ zero3_gpu(u_device+ir[k], m1[k], m2[k], m3[k]); interp_gpu(u_device+ir[j], m1[j], m2[j], m3[j], u_device+ir[k], m1[k], m2[k], m3[k], k); /* * -------------------------------------------------------------------- * compute residual for level k * -------------------------------------------------------------------- */ resid_gpu(u_device+ir[k], r_device+ir[k], r_device+ir[k], m1[k], m2[k], m3[k], a_device, k); /* * -------------------------------------------------------------------- * apply smoother * -------------------------------------------------------------------- */ psinv_gpu(r_device+ir[k], u_device+ir[k], m1[k], m2[k], m3[k], c_device, k); } j = lt - 1; k = lt; interp_gpu(u_device+ir[j], m1[j], m2[j], m3[j], u_device, n1, n2, n3, k); resid_gpu(u_device, v_device, r_device, n1, n2, n3, a_device, k); psinv_gpu(r_device, u_device, n1, n2, n3, c_device, k); } /* * --------------------------------------------------------------------- * norm2u3 evaluates approximations to the l2 norm and the * uniform (or l-infinity or chebyshev) norm, under the * assumption that the boundaries are periodic or zero. add the * boundaries in with half weight (quarter weight on the edges * and eighth weight at the corners) for inhomogeneous boundaries. * --------------------------------------------------------------------- */ static void norm2u3(void* pointer_r, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz){ //double (*r)[n2][n1] = (double (*)[n2][n1])pointer_r; double* pointer_aux_r = (double*)pointer_r; double s, a; int i3, i2, i1; double dn; if(timeron){timer_start(T_NORM2);} dn = 1.0*nx*ny*nz; s = 0.0; *rnmu = 0.0; for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 1; i1 < n1-1; i1++){ //s = s + r[i3][i2][i1] * r[i3][i2][i1]; s = s + pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)] * pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)]; //a = fabs(r[i3][i2][i1]); a = fabs(pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)]); if(a > *rnmu){*rnmu = a;} } } } *rnm2 = sqrt(s/dn); if(timeron){timer_stop(T_NORM2);} } static void norm2u3_gpu(double* r_device, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz){ if(timeron){timer_start(T_NORM2);} double s; double dn, max_rnmu; int temp_size, j; dn=1.0*nx*ny*nz; s=0.0; max_rnmu=0.0; threads_per_block = THREADS_PER_BLOCK_ON_NORM2U3; amount_of_work = (n2-2) * (n3-2) * threads_per_block; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); temp_size = amount_of_work / threads_per_block; double (*sum_host)=(double*)malloc(temp_size*sizeof(double)); double (*max_host)=(double*)malloc(temp_size*sizeof(double)); double* sum_device; double* max_device; hipMalloc(&sum_device,temp_size*sizeof(double)); hipMalloc(&max_device,temp_size*sizeof(double)); hipLaunchKernelGGL(( norm2u3_gpu_kernel), dim3(blocks_per_grid), threads_per_block //,SHARED_2_NORM , 0, 0, r_device, n1, n2, n3, sum_device, max_device, blocks_per_grid, amount_of_work); hipDeviceSynchronize(); hipMemcpy(sum_host, sum_device, temp_size*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(max_host, max_device, temp_size*sizeof(double), hipMemcpyDeviceToHost); for(j=0; j<temp_size; j++){ s=s+sum_host[j]; if(max_rnmu<max_host[j]){max_rnmu=max_host[j];} } hipFree(sum_device); hipFree(max_device); free(sum_host); free(max_host); *rnmu=max_rnmu; *rnm2=sqrt(s/dn); if(timeron){timer_stop(T_NORM2);} } __global__ void norm2u3_gpu_kernel(double* r, const int n1, const int n2, const int n3, double* res_sum, double* res_max, int number_of_blocks, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} __shared__ double scratch_sum[THREADS_PER_BLOCK_ON_NORM2U3]; __shared__ double scratch_max[THREADS_PER_BLOCK_ON_NORM2U3]; //double* scratch_sum = (double*)(extern_share_data); //double* scratch_max = (double*)(&scratch_sum[THREADS_PER_BLOCK_ON_NORM2U3]); int i3=blockIdx.x/(n2-2)+1; int i2=blockIdx.x%(n2-2)+1; int i1=threadIdx.x+1; double s=0.0; double my_rnmu=0.0; double a; while(i1<n1-1){ double r321=r[i3*n2*n1+i2*n1+i1]; s=s+r321*r321; a=fabs(r321); my_rnmu=(a>my_rnmu)?a:my_rnmu; i1+=THREADS_PER_BLOCK_ON_NORM2U3; } int lid=threadIdx.x; scratch_sum[lid]=s; scratch_max[lid]=my_rnmu; __syncthreads(); for(int i=THREADS_PER_BLOCK_ON_NORM2U3/2; i>0; i>>=1){ if(lid<i){ scratch_sum[lid]+=scratch_sum[lid+i]; scratch_max[lid]=(scratch_max[lid]>scratch_max[lid+i])?scratch_max[lid]:scratch_max[lid+i]; } __syncthreads(); } if(lid == 0){ int idx=blockIdx.x; res_sum[idx]=scratch_sum[0]; res_max[idx]=scratch_max[0]; } } /* * --------------------------------------------------------------------- * power raises an integer, disguised as a double * precision real, to an integer power * --------------------------------------------------------------------- */ static double power(double a, int n){ double aj; int nj; double power; power = 1.0; nj = n; aj = a; while(nj != 0){ if((nj%2)==1){randlc(&power, aj);} randlc(&aj, aj); nj = nj/2; } return power; } /* * -------------------------------------------------------------------- * psinv applies an approximate inverse as smoother: u = u + Cr * * this implementation costs 15A + 4M per result, where * A and M denote the costs of Addition and Multiplication. * presuming coefficient c(3) is zero (the NPB assumes this, * but it is thus not a general case), 2A + 1M may be eliminated, * resulting in 13A + 3M. * note that this vectorizes, and is also fine for cache * based machines. * -------------------------------------------------------------------- */ static void psinv(void* pointer_r, void* pointer_u, int n1, int n2, int n3, double c[4], int k){ //double (*r)[n2][n1] = (double (*)[n2][n1])pointer_r; //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; double* pointer_aux_r = (double*)pointer_r; double* pointer_aux_u = (double*)pointer_u; int i3, i2, i1; double r1[M], r2[M]; if(timeron){timer_start(T_PSINV);} for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 0; i1 < n1; i1++){ //r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1] + r[i3-1][i2][i1] + r[i3+1][i2][i1]; r1[i1] = pointer_aux_r[(i3)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_r[(i3)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_r[(i3-1)*n1*n2 + (i2)*n1 + (i1)] + pointer_aux_r[(i3+1)*n1*n2 + (i2)*n1 + (i1)]; //r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1] + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1]; r2[i1] = pointer_aux_r[(i3-1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_r[(i3-1)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_r[(i3+1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_r[(i3+1)*n1*n2 + (i2+1)*n1 + (i1)]; } for(i1 = 1; i1 < n1-1; i1++){ //u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (i1)] + c[0] * pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)] + c[1] * ( pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1-1)] + pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1+1)] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); /* * -------------------------------------------------------------------- * assume c(3) = 0 (enable line below if c(3) not= 0) * -------------------------------------------------------------------- * > + c(3) * ( r2(i1-1) + r2(i1+1) ) * -------------------------------------------------------------------- */ } } } if(timeron){timer_stop(T_PSINV);} /* * -------------------------------------------------------------------- * exchange boundary points * -------------------------------------------------------------------- */ comm3(pointer_aux_u,n1,n2,n3,k); if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_u,n1,n2,n3,(char*)" psinv",k); } if(debug_vec[3] >= k){ showall(pointer_aux_u,n1,n2,n3); } } static void psinv_gpu(double* r_device, double* u_device, int n1, int n2, int n3, double* c_device, int k){ threads_per_block = n1 > THREADS_PER_BLOCK ? THREADS_PER_BLOCK : n1; amount_of_work = (n3-2) * (n2-2) * threads_per_block; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); if(timeron){timer_start(T_PSINV);} hipLaunchKernelGGL(( psinv_gpu_kernel), dim3(blocks_per_grid), threads_per_block //,SHARED_2_M , 0, 0, r_device, u_device, c_device, n1, n2, n3, amount_of_work); hipDeviceSynchronize(); if(timeron){timer_stop(T_PSINV);} /* * -------------------------------------------------------------------- * exchange boundary points * -------------------------------------------------------------------- */ comm3_gpu(u_device,n1,n2,n3,k); } __global__ void psinv_gpu_kernel(double* r, double* u, double* c, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} __shared__ double r1[M],r2[M]; //double* r1 = (double*)(extern_share_data); //double* r2 = (double*)(&r1[M]); int i3=blockIdx.x/(n2-2)+1; int i2=blockIdx.x%(n2-2)+1; int lid=threadIdx.x; int i1; for(i1=lid; i1<n1; i1+=THREADS_PER_BLOCK){ r1[i1]=r[i3*n2*n1+(i2-1)*n2+i1] +r[i3*n2*n1+(i2+1)*n1+i1] +r[(i3-1)*n2*n1+i2*n1+i1] +r[(i3+1)*n2*n1+i2*n1+i1]; r2[i1]=r[(i3-1)*n2*n1+(i2-1)*n1+i1] +r[(i3-1)*n2*n1+(i2+1)*n1+i1] +r[(i3+1)*n2*n1+(i2-1)*n1+i1] +r[(i3+1)*n2*n1+(i2+1)*n1+i1]; } __syncthreads(); for(i1=lid+1; i1<n1-1; i1+=THREADS_PER_BLOCK){ u[i3*n2*n1+i2*n1+i1]=u[i3*n2*n1+i2*n1+i1] +c[0]*r[i3*n2*n1+i2*n1+i1] +c[1]*(r[i3*n2*n1+i2*n1+i1-1] +r[i3*n2*n1+i2*n1+i1+1] +r1[i1]) +c[2]*(r2[i1]+r1[i1-1]+r1[i1+1] ); } } static void release_gpu(){ hipFree(a_device); hipFree(c_device); hipFree(u_device); hipFree(v_device); hipFree(r_device); } /* * --------------------------------------------------------------------- * report on norm * --------------------------------------------------------------------- */ static void rep_nrm(void* pointer_u, int n1, int n2, int n3, char* title, int kk){ double rnm2, rnmu; norm2u3(pointer_u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]); printf(" Level%2d in %8s: norms =%21.14e%21.14e\n", kk, title, rnm2, rnmu); } /* * -------------------------------------------------------------------- * resid computes the residual: r = v - Au * * this implementation costs 15A + 4M per result, where * A and M denote the costs of addition (or subtraction) and * multiplication, respectively. * presuming coefficient a(1) is zero (the NPB assumes this, * but it is thus not a general case), 3A + 1M may be eliminated, * resulting in 12A + 3M. * note that this vectorizes, and is also fine for cache * based machines. * -------------------------------------------------------------------- */ static void resid(void* pointer_u, void* pointer_v, void* pointer_r, int n1, int n2, int n3, double a[4], int k){ //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; //double (*v)[n2][n1] = (double (*)[n2][n1])pointer_v; //double (*r)[n2][n1] = (double (*)[n2][n1])pointer_r; double* pointer_aux_u = (double*)pointer_u; double* pointer_aux_v = (double*)pointer_v; double* pointer_aux_r = (double*)pointer_r; int i3, i2, i1; double u1[M], u2[M]; if(timeron){timer_start(T_RESID);} for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 0; i1 < n1; i1++){ //u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1] + u[i3-1][i2][i1] + u[i3+1][i2][i1]; u1[i1] = pointer_aux_u[(i3)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_u[(i3)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_u[(i3-1)*n1*n2 + (i2)*n1 + (i1)] + pointer_aux_u[(i3+1)*n1*n2 + (i2)*n1 + (i1)]; //u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1] + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1]; u2[i1] = pointer_aux_u[(i3-1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_u[(i3-1)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_u[(i3+1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_u[(i3+1)*n1*n2 + (i2+1)*n1 + (i1)]; } for(i1 = 1; i1 < n1-1; i1++){ /* * --------------------------------------------------------------------- * assume a(1) = 0 (enable 2 lines below if a(1) not= 0) * --------------------------------------------------------------------- * > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3) * > + u1(i1) ) * --------------------------------------------------------------------- */ //r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_v[(i3)*n1*n2 + (i2)*n1 + (i1)] - a[0] * pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (i1)] - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); } } } if(timeron){timer_stop(T_RESID);} /* * -------------------------------------------------------------------- * exchange boundary data * -------------------------------------------------------------------- */ comm3(pointer_aux_r,n1,n2,n3,k); if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_r,n1,n2,n3,(char*)" resid",k); } if(debug_vec[2] >= k){ showall(pointer_aux_r,n1,n2,n3); } } static void resid_gpu(double* u_device, double* v_device, double* r_device, int n1, int n2, int n3, double* a_device, int k){ threads_per_block = n1 > THREADS_PER_BLOCK ? THREADS_PER_BLOCK : n1; amount_of_work = (n3-2) * (n2-2) * threads_per_block; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); if(timeron){timer_start(T_RESID);} hipLaunchKernelGGL(( resid_gpu_kernel), dim3(blocks_per_grid), threads_per_block //,SHARED_2_M , 0, 0, u_device, v_device, r_device, a_device, n1, n2, n3, amount_of_work); hipDeviceSynchronize(); if(timeron){timer_stop(T_RESID);} /* * -------------------------------------------------------------------- * exchange boundary data * -------------------------------------------------------------------- */ comm3_gpu(r_device,n1,n2,n3,k); } __global__ void resid_gpu_kernel(double* u, double* v, double* r, double* a, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} __shared__ double u1[M], u2[M]; //double* u1 = (double*)(extern_share_data); //double* u2 = (double*)(&u1[M]); int i3=blockIdx.x/(n2-2)+1; int i2=blockIdx.x%(n2-2)+1; int lid=threadIdx.x; int i1; for(i1=lid; i1<n1; i1+=THREADS_PER_BLOCK){ u1[i1]=u[i3*n2*n1+(i2-1)*n1+i1] +u[i3*n2*n1+(i2+1)*n1+i1] +u[(i3-1)*n2*n1+i2*n1+i1] +u[(i3+1)*n2*n1+i2*n1+i1]; u2[i1]=u[(i3-1)*n2*n1+(i2-1)*n1+i1] +u[(i3-1)*n2*n1+(i2+1)*n1+i1] +u[(i3+1)*n2*n1+(i2-1)*n1+i1] +u[(i3+1)*n2*n1+(i2+1)*n1+i1]; } __syncthreads(); for(i1=lid+1; i1<n1-1; i1+=THREADS_PER_BLOCK){ r[i3*n2*n1+i2*n1+i1]=v[i3*n2*n1+i2*n1+i1] -a[0]*u[i3*n2*n1+i2*n1+i1] -a[2]*(u2[i1]+u1[i1-1]+u1[i1+1]) -a[3]*(u2[i1-1]+u2[i1+1] ); } } /* * -------------------------------------------------------------------- * rprj3 projects onto the next coarser grid, * using a trilinear finite element projection: s = r' = P r * * this implementation costs 20A + 4M per result, where * A and M denote the costs of addition and multiplication. * note that this vectorizes, and is also fine for cache * based machines. * -------------------------------------------------------------------- */ static void rprj3(void* pointer_r, int m1k, int m2k, int m3k, void* pointer_s, int m1j, int m2j, int m3j, int k){ //double (*r)[m2k][m1k] = (double (*)[m2k][m1k])pointer_r; //double (*s)[m2j][m1j] = (double (*)[m2j][m1j])pointer_s; double* pointer_aux_r = (double*)pointer_r; double* pointer_aux_s = (double*)pointer_s; int j3, j2, j1, i3, i2, i1, d1, d2, d3, j; double x1[M], y1[M], x2, y2; if(timeron){timer_start(T_RPRJ3);} if(m1k == 3){ d1 = 2; }else{ d1 = 1; } if(m2k == 3){ d2 = 2; }else{ d2 = 1; } if(m3k == 3){ d3 = 2; }else{ d3 = 1; } for(j3 = 1; j3 < m3j-1; j3++){ i3 = 2*j3-d3; for(j2 = 1; j2 < m2j-1; j2++){ i2 = 2*j2-d2; for(j1 = 1; j1 < m1j; j1++){ i1 = 2*j1-d1; //x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1] + r[i3][i2+1][i1] + r[i3+2][i2+1][i1]; x1[i1] = pointer_aux_r[(i3+1)*m1k*m2k + (i2)*m1k + (i1)] + pointer_aux_r[(i3+1)*m1k*m2k + (i2+2)*m1k + (i1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+1)*m1k + (i1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+1)*m1k + (i1)]; //y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1] + r[i3][i2+2][i1] + r[i3+2][i2+2][i1]; y1[i1] = pointer_aux_r[(i3)*m1k*m2k + (i2)*m1k + (i1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2)*m1k + (i1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+2)*m1k + (i1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+2)*m1k + (i1)]; } for(j1 = 1; j1 < m1j-1; j1++){ i1 = 2*j1-d1; //y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1] + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1]; y2 = pointer_aux_r[(i3)*m1k*m2k + (i2)*m1k + (i1+1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2)*m1k + (i1+1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+2)*m1k + (i1+1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+2)*m1k + (i1+1)]; //x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1] + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1]; x2 = pointer_aux_r[(i3+1)*m1k*m2k + (i2)*m1k + (i1+1)] + pointer_aux_r[(i3+1)*m1k*m2k + (i2+2)*m1k + (i1+1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+1)*m1k + (i1+1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+1)*m1k + (i1+1)]; //s[j3][j2][j1] = 0.5 * r[i3+1][i2+1][i1+1] + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); pointer_aux_s[(j3)*m1j*m2j + (j2)*m1j + (j1)] = 0.5 * pointer_aux_r[(i3+1)*m1k*m2k + (i2+1)*m1k + (i1+1)] + 0.25 * ( pointer_aux_r[(i3+1)*m1k*m2k + (i2+1)*m1k + (i1)] + pointer_aux_r[(i3+1)*m1k*m2k + (i2+1)*m1k + (i1+2)] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); } } } if(timeron){timer_stop(T_RPRJ3);} j=k-1; comm3(pointer_aux_s,m1j,m2j,m3j,j); if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_s,m1j,m2j,m3j,(char*)" rprj3",k-1); } if(debug_vec[4] >= k){ showall(pointer_aux_s,m1j,m2j,m3j); } } static void rprj3_gpu(double* r_device, int m1k, int m2k, int m3k, double* s_device, int m1j, int m2j, int m3j, int k){ int d1,d2,d3,j; if(m1k==3){ d1=2; }else{ d1=1; } if(m2k==3){ d2=2; }else{ d2=1; } if(m3k==3){ d3=2; }else{ d3=1; } threads_per_block = m1j-1; amount_of_work = (m3j-2) * (m2j-2) * (m1j-1); blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); if(timeron){timer_start(T_RPRJ3);} hipLaunchKernelGGL(( rprj3_gpu_kernel), dim3(blocks_per_grid), threads_per_block //,SHARED_2_M , 0, 0, r_device, s_device, m1k, m2k, m3k, m1j, m2j, m3j, d1, d2, d3, amount_of_work); hipDeviceSynchronize(); if(timeron){timer_stop(T_RPRJ3);} j=k-1; comm3_gpu(s_device,m1j,m2j,m3j,j); } __global__ void rprj3_gpu_kernel(double* base_r, double* base_s, int m1k, int m2k, int m3k, int m1j, int m2j, int m3j, int d1, int d2, int d3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int j3,j2,j1,i3,i2,i1; double x2,y2; __shared__ double x1[M],y1[M]; //double* x1 = (double*)(extern_share_data); //double* y1 = (double*)(&x1[M]); double (*r)=base_r; double (*s)=base_s; j3=blockIdx.x/(m2j-2)+1; j2=blockIdx.x%(m2j-2)+1; j1=threadIdx.x+1; i3=2*j3-d3; i2=2*j2-d2; i1=2*j1-d1; x1[i1]=r[(i3+1)*m2k*m1k+i2*m1k+i1] +r[(i3+1)*m2k*m1k+(i2+2)*m1k+i1] +r[i3*m2k*m1k+(i2+1)*m1k+i1] +r[(i3+2)*m2k*m1k+(i2+1)*m1k+i1]; y1[i1]=r[i3*m2k*m1k+i2*m1k+i1] +r[(i3+2)*m2k*m1k+i2*m1k+i1] +r[i3*m2k*m1k+(i2+2)*m1k+i1] +r[(i3+2)*m2k*m1k+(i2+2)*m1k+i1]; __syncthreads(); if(j1<m1j-1){ i1=2*j1-d1; y2=r[i3*m2k*m1k+i2*m1k+i1+1] +r[(i3+2)*m2k*m1k+i2*m1k+i1+1] +r[i3*m2k*m1k+(i2+2)*m1k+i1+1] +r[(i3+2)*m2k*m1k+(i2+2)*m1k+i1+1]; x2=r[(i3+1)*m2k*m1k+i2*m1k+i1+1] +r[(i3+1)*m2k*m1k+(i2+2)*m1k+i1+1] +r[i3*m2k*m1k+(i2+1)*m1k+i1+1] +r[(i3+2)*m2k*m1k+(i2+1)*m1k+i1+1]; s[j3*m2j*m1j+j2*m1j+j1]= 0.5*r[(i3+1)*m2k*m1k+(i2+1)*m1k+i1+1] +0.25*(r[(i3+1)*m2k*m1k+(i2+1)*m1k+i1] +r[(i3+1)*m2k*m1k+(i2+1)*m1k+i1+2]+x2) +0.125*(x1[i1]+x1[i1+2]+y2) +0.0625*(y1[i1]+y1[i1+2]); } } static void setup(int* n1, int* n2, int* n3, int k){ int j; int ax, mi[MAXLEVEL+1][3]; int ng[MAXLEVEL+1][3]; ng[lt][0] = nx[lt]; ng[lt][1] = ny[lt]; ng[lt][2] = nz[lt]; for(ax = 0; ax < 3; ax++){ for(k = lt-1; k >= 1; k--){ ng[k][ax] = ng[k+1][ax]/2; } } for(k = lt; k >= 1; k--){ nx[k] = ng[k][0]; ny[k] = ng[k][1]; nz[k] = ng[k][2]; } for(k = lt; k >= 1; k--){ for (ax = 0; ax < 3; ax++){ mi[k][ax] = 2 + ng[k][ax]; } m1[k] = mi[k][0]; m2[k] = mi[k][1]; m3[k] = mi[k][2]; } k = lt; is1 = 2 + ng[k][0] - ng[lt][0]; ie1 = 1 + ng[k][0]; *n1 = 3 + ie1 - is1; is2 = 2 + ng[k][1] - ng[lt][1]; ie2 = 1 + ng[k][1]; *n2 = 3 + ie2 - is2; is3 = 2 + ng[k][2] - ng[lt][2]; ie3 = 1 + ng[k][2]; *n3 = 3 + ie3 - is3; ir[lt] = 0; for(j = lt-1; j >= 1; j--){ ir[j] = ir[j+1]+ONE*m1[j+1]*m2[j+1]*m3[j+1]; } if(debug_vec[1] >= 1){ printf(" in setup, \n"); printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", k,lt,ng[k][0],ng[k][1],ng[k][2],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3); } } static void setup_gpu(double* a, double* c){ size_a_device=sizeof(double)*(4); size_c_device=sizeof(double)*(4); size_u_device=sizeof(double)*(NR); size_v_device=sizeof(double)*(NV); size_r_device=sizeof(double)*(NR); hipMalloc(&a_device, size_a_device); hipMalloc(&c_device, size_c_device); hipMalloc(&u_device, size_u_device); hipMalloc(&v_device, size_v_device); hipMalloc(&r_device, size_r_device); hipMemcpy(a_device, a, size_a_device, hipMemcpyHostToDevice); hipMemcpy(c_device, c, size_c_device, hipMemcpyHostToDevice); hipMemcpy(u_device, u, size_u_device, hipMemcpyHostToDevice); hipMemcpy(v_device, v, size_v_device, hipMemcpyHostToDevice); hipMemcpy(r_device, r, size_r_device, hipMemcpyHostToDevice); } static void showall(void* pointer_z, int n1, int n2, int n3){ //double (*z)[n2][n1] = (double (*)[n2][n1])pointer_z; double* pointer_aux_z = (double*)pointer_z; int i1,i2,i3; int m1, m2, m3; m1 = min(n1,18); m2 = min(n2,14); m3 = min(n3,18); printf("\n"); for(i3 = 0; i3 < m3; i3++){ for(i2 = 0; i2 < m2; i2++){ for(i1 = 0; i1 < m1; i1++){ //printf("%6.3f", z[i3][i2][i1]); printf("%6.3f", pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)]); } printf("\n"); } printf(" - - - - - - - \n"); } printf("\n"); } static void zero3(void* pointer_z, int n1, int n2, int n3){ //double (*z)[n2][n1] = (double (*)[n2][n1])pointer_z; double* pointer_aux_z = (double*)pointer_z; int i1, i2, i3; for(i3 = 0;i3 < n3; i3++){ for(i2 = 0; i2 < n2; i2++){ for(i1 = 0; i1 < n1; i1++){ //z[i3][i2][i1] = 0.0; pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] = 0.0; } } } } static void zero3_gpu(double* z_device, int n1, int n2, int n3){ threads_per_block = THREADS_PER_BLOCK_ON_ZERO3; amount_of_work = n1*n2*n3; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); hipLaunchKernelGGL(( zero3_gpu_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, z_device, n1, n2, n3, amount_of_work); } __global__ void zero3_gpu_kernel(double* z, int n1, int n2, int n3, int amount_of_work){ int thread_id=blockIdx.x*blockDim.x+threadIdx.x; if(thread_id>=(n1*n2*n3)){return;} z[thread_id]=0.0; } /* * --------------------------------------------------------------------- * zran3 loads +1 at ten randomly chosen points, * loads -1 at a different ten random points, * and zero elsewhere. * --------------------------------------------------------------------- */ static void zran3(void* pointer_z, int n1, int n2, int n3, int nx, int ny, int k){ //double (*z)[n2][n1] = (double (*)[n2][n1])pointer_z; double* pointer_aux_z = (double*)pointer_z; int i0, m0, m1; int i1, i2, i3, d1, e2, e3; double xx, x0, x1, a1, a2, ai; double ten[2][MM], best; int i, j1[2][MM], j2[2][MM], j3[2][MM]; int jg[2][MM][4]; a1 = power(A, nx); a2 = power(A, nx*ny); //zero3(z, n1, n2, n3); zero3(pointer_aux_z, n1, n2, n3); i = is1-2+nx*(is2-2+ny*(is3-2)); ai = power(A, i); d1 = ie1 - is1 + 1; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = X; randlc(&x0, ai); for(i3 = 1; i3 < e3; i3++){ x1 = x0; for(i2 = 1; i2 < e2; i2++){ xx = x1; //vranlc(d1, &xx, A, &(z[i3][i2][1])); vranlc(d1, &xx, A, &(pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (1)])); randlc(&x1,a1); } randlc(&x0, a2); } /* * --------------------------------------------------------------------- * each processor looks for twenty candidates * --------------------------------------------------------------------- */ for(i = 0; i < MM; i++){ ten[1][i] = 0.0; j1[1][i] = 0; j2[1][i] = 0; j3[1][i] = 0; ten[0][i] = 1.0; j1[0][i] = 0; j2[0][i] = 0; j3[0][i] = 0; } for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 1; i1 < n1-1; i1++){ //if(z[i3][i2][i1] > ten[1][0]){ if(pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] > ten[1][0]){ //ten[1][0] = z[i3][i2][i1]; ten[1][0] = pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)]; j1[1][0] = i1; j2[1][0] = i2; j3[1][0] = i3; bubble(ten, j1, j2, j3, MM, 1); } //if(z[i3][i2][i1] < ten[0][0]){ if(pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] < ten[0][0]){ //ten[0][0] = z[i3][i2][i1]; ten[0][0] = pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble(ten, j1, j2, j3, MM, 0); } } } } /* * --------------------------------------------------------------------- * now which of these are globally best? * --------------------------------------------------------------------- */ i1 = MM - 1; i0 = MM - 1; for(i = MM - 1; i >= 0; i--){ best = 0.0; if(best < ten[1][i1]){ jg[1][i][0] = 0; jg[1][i][1] = is1 - 2 + j1[1][i1]; jg[1][i][2] = is2 - 2 + j2[1][i1]; jg[1][i][3] = is3 - 2 + j3[1][i1]; i1 = i1-1; }else{ jg[1][i][0] = 0; jg[1][i][1] = 0; jg[1][i][2] = 0; jg[1][i][3] = 0; } best = 1.0; if(best > ten[0][i0]){ jg[0][i][0] = 0; jg[0][i][1] = is1 - 2 + j1[0][i0]; jg[0][i][2] = is2 - 2 + j2[0][i0]; jg[0][i][3] = is3 - 2 + j3[0][i0]; i0 = i0-1; }else{ jg[0][i][0] = 0; jg[0][i][1] = 0; jg[0][i][2] = 0; jg[0][i][3] = 0; } } m1 = 0; m0 = 0; for(i3 = 0; i3 < n3; i3++){ for(i2 = 0; i2 < n2; i2++){ for(i1 = 0; i1 < n1; i1++){ //z[i3][i2][i1] = 0.0; pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] = 0.0; } } } for (i = MM-1; i >= m0; i--){ //z[jg[0][i][3]][jg[0][i][2]][jg[0][i][1]] = -1.0; pointer_aux_z[(jg[0][i][3])*n1*n2 + (jg[0][i][2])*n1 + (jg[0][i][1])] = -1.0; } for(i = MM-1; i >= m1; i--){ //z[jg[1][i][3]][jg[1][i][2]][jg[1][i][1]] = +1.0; pointer_aux_z[(jg[1][i][3])*n1*n2 + (jg[1][i][2])*n1 + (jg[1][i][1])] = +1.0; } comm3(pointer_aux_z, n1, n2, n3, k); }
638f693062dd8a371eed54f8bc72487ff38256c9.cu
/** * NASA Advanced Supercomputing Parallel Benchmarks C++ * * based on NPB 3.3.1 * * original version and technical report: * http://www.nas.nasa.gov/Software/NPB/ * * Authors: * E. Barszcz * P. Frederickson * A. Woo * M. Yarrow * * C++ version: * Dalvan Griebler <dalvangriebler@gmail.com> * Júnior Löff <loffjh@gmail.com> * Gabriell Araujo <hexenoften@gmail.com> * * CUDA version: * Gabriell Araujo <hexenoften@gmail.com> */ /* NO CAST VERSION 2 */ #include <cuda.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" #define NM (2+(1<<LM)) /* actual dimension including ghost cells for communications */ #define NV (ONE*(2+(1<<NDIM1))*(2+(1<<NDIM2))*(2+(1<<NDIM3))) /* size of rhs array */ #define NR (((NV+NM*NM+5*NM+7*LM+6)/7)*8) /* size of residual array */ #define MAXLEVEL (LT_DEFAULT+1) /* maximum number of levels */ #define M (NM+1) /* set at m=1024, can handle cases up to 1024^3 case */ #define MM (10) #define A (pow(5.0,13.0)) #define X (314159265.0) #define T_INIT (0) #define T_BENCH (1) #define T_MG3P (2) #define T_PSINV (3) #define T_RESID (4) #define T_RESID2 (5) #define T_RPRJ3 (6) #define T_INTERP (7) #define T_NORM2 (8) #define T_COMM3 (9) #define T_LAST (10) #define THREADS_PER_BLOCK (1024) //1024 #define THREADS_PER_BLOCK_ON_NORM2U3 (128) //128 #define THREADS_PER_BLOCK_ON_COMM3 (32) //32 #define THREADS_PER_BLOCK_ON_ZERO3 (1024) //1024 //#define SHARED_2_M (2*M*sizeof(double)) //#define SHARED_3_M (3*M*sizeof(double)) //#define SHARED_2_NORM (2*THREADS_PER_BLOCK_ON_NORM2U3*sizeof(double)) /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static int nx[MAXLEVEL+1]; static int ny[MAXLEVEL+1]; static int nz[MAXLEVEL+1]; static int m1[MAXLEVEL+1]; static int m2[MAXLEVEL+1]; static int m3[MAXLEVEL+1]; static int ir[MAXLEVEL+1]; static int debug_vec[8]; static double u[NR]; static double v[NV]; static double r[NR]; #else static int (*nx)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*ny)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*nz)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*m1)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*m2)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*m3)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*ir)=(int*)malloc(sizeof(int)*(MAXLEVEL+1)); static int (*debug_vec)=(int*)malloc(sizeof(int)*(8)); static double (*u)=(double*)malloc(sizeof(double)*(NR)); static double (*v)=(double*)malloc(sizeof(double)*(NV)); static double (*r)=(double*)malloc(sizeof(double)*(NR)); #endif static int is1, is2, is3, ie1, ie2, ie3, lt, lb; static boolean timeron; /* gpu variables */ int threads_per_block; int blocks_per_grid; int amount_of_work; size_t size_a_device; size_t size_c_device; size_t size_u_device; size_t size_v_device; size_t size_r_device; double* a_device; double* c_device; double* u_device; double* v_device; double* r_device; //extern __shared__ double extern_share_data[]; /* function prototypes */ static void bubble(double ten[][MM], int j1[][MM], int j2[][MM], int j3[][MM], int m, int ind); static void comm3(void* pointer_u, int n1, int n2, int n3, int kk); static void comm3_gpu(double* u_device, int n1, int n2, int n3, int kk); __global__ void comm3_gpu_kernel_1(double* u, int n1, int n2, int n3, int amount_of_work); __global__ void comm3_gpu_kernel_2(double* u, int n1, int n2, int n3, int amount_of_work); __global__ void comm3_gpu_kernel_3(double* u, int n1, int n2, int n3, int amount_of_work); static void interp(void* pointer_z, int mm1, int mm2, int mm3, void* pointer_u, int n1, int n2, int n3, int k); static void interp_gpu(double* z_device, int mm1, int mm2, int mm3, double* u_device, int n1, int n2, int n3, int k); __global__ void interp_gpu_kernel(double* base_z, double* base_u, int mm1, int mm2, int mm3, int n1, int n2, int n3, int amount_of_work); static void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3, int k); static void mg3P_gpu(double* u_device, double* v_device, double* r_device, double a[4], double c[4], int n1, int n2, int n3, int k); static void norm2u3(void* pointer_r, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz); static void norm2u3_gpu(double* r_device, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz); __global__ void norm2u3_gpu_kernel(double* r, const int n1, const int n2, const int n3, double* res_sum, double* res_max, int number_of_blocks, int amount_of_work); static double power(double a, int n); static void psinv(void* pointer_r, void* pointer_u, int n1, int n2, int n3, double c[4], int k); static void psinv_gpu(double* r_device, double* u_device, int n1, int n2, int n3, double* c_device, int k); __global__ void psinv_gpu_kernel(double* r, double* u, double* c, int n1, int n2, int n3, int amount_of_work); static void release_gpu(); static void rep_nrm(void* pointer_u, int n1, int n2, int n3, char* title, int kk); static void resid(void* pointer_u, void* pointer_v, void* pointer_r, int n1, int n2, int n3, double a[4], int k); static void resid_gpu(double* u_device, double* v_device, double* r_device, int n1, int n2, int n3, double* a_device, int k); __global__ void resid_gpu_kernel(double* r, double* u, double* v, double* a, int n1, int n2, int n3, int amount_of_work); static void rprj3(void* pointer_r, int m1k, int m2k, int m3k, void* pointer_s, int m1j, int m2j, int m3j, int k); static void rprj3_gpu(double* r_device, int m1k, int m2k, int m3k, double* s_device, int m1j, int m2j, int m3j, int k); __global__ void rprj3_gpu_kernel(double* base_r, double* base_s, int m1k, int m2k, int m3k, int m1j, int m2j, int m3j, int d1, int d2, int d3, int amount_of_work); static void setup(int* n1, int* n2, int* n3, int k); static void setup_gpu(double* a, double* c); static void showall(void* pointer_z, int n1, int n2, int n3); static void zero3_gpu(double* z_device, int n1, int n2, int n3); __global__ void zero3_gpu_kernel(double* z, int n1, int n2, int n3, int amount_of_work); static void zero3(void* pointer_z, int n1, int n2, int n3); static void zran3(void* pointer_z, int n1, int n2, int n3, int nx, int ny, int k); /* mg */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif /* * ------------------------------------------------------------------------- * k is the current level. it is passed down through subroutine args * and is not global. it is the current iteration * ------------------------------------------------------------------------- */ int k, it; double t, tinit, mflops; double a[4], c[4]; double rnm2, rnmu, epsilon; int n1, n2, n3, nit; double nn, verify_value, err; boolean verified; char class_npb; int i; char* t_names[T_LAST]; double tmax; for(i=T_INIT; i<T_LAST; i++){ timer_clear(i); } timer_start(T_INIT); /* * ---------------------------------------------------------------------- * read in and broadcast input data * ---------------------------------------------------------------------- */ FILE* fp; if((fp = fopen("timer.flag", "r")) != NULL){ timeron = TRUE; t_names[T_INIT] = (char*) "init"; t_names[T_BENCH] = (char*) "benchmk"; t_names[T_MG3P] = (char*) "mg3P"; t_names[T_PSINV] = (char*) "psinv"; t_names[T_RESID] = (char*) "resid"; t_names[T_RPRJ3] = (char*) "rprj3"; t_names[T_INTERP] = (char*) "interp"; t_names[T_NORM2] = (char*) "norm2"; t_names[T_COMM3] = (char*) "comm3"; fclose(fp); }else{ timeron = FALSE; } fp = fopen("mg.input", "r"); if(fp != NULL){ printf(" Reading from input file mg.input\n"); if(fscanf(fp, "%d", &lt) != 1){ printf(" Error in reading elements\n"); exit(1); } while(fgetc(fp) != '\n'); if(fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]) != 3){ printf(" Error in reading elements\n"); exit(1); } while(fgetc(fp) != '\n'); if(fscanf(fp, "%d", &nit) != 1){ printf(" Error in reading elements\n"); exit(1); } while(fgetc(fp) != '\n'); for(i = 0; i <= 7; i++) { if(fscanf(fp, "%d", &debug_vec[i]) != 1){ printf(" Error in reading elements\n"); exit(1); } } fclose(fp); }else{ printf(" No input file. Using compiled defaults\n"); lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; for(i = 0; i <= 7; i++){ debug_vec[i] = DEBUG_DEFAULT; } } if((nx[lt] != ny[lt]) || (nx[lt] != nz[lt])){ class_npb = 'U'; }else if(nx[lt] == 32 && nit == 4){ class_npb = 'S'; }else if(nx[lt] == 128 && nit == 4){ class_npb = 'W'; }else if(nx[lt] == 256 && nit == 4){ class_npb = 'A'; }else if(nx[lt] == 256 && nit == 20){ class_npb = 'B'; }else if(nx[lt] == 512 && nit == 20){ class_npb = 'C'; }else if(nx[lt] == 1024 && nit == 50){ class_npb = 'D'; }else if(nx[lt] == 2048 && nit == 50){ class_npb = 'E'; }else{ class_npb = 'U'; } /* * --------------------------------------------------------------------- * use these for debug info: * --------------------------------------------------------------------- * debug_vec(0) = 1 !=> report all norms * debug_vec(1) = 1 !=> some setup information * debug_vec(1) = 2 !=> more setup information * debug_vec(2) = k => at level k or below, show result of resid * debug_vec(3) = k => at level k or below, show result of psinv * debug_vec(4) = k => at level k or below, show result of rprj * debug_vec(5) = k => at level k or below, show result of interp * debug_vec(6) = 1 => (unused) * debug_vec(7) = 1 => (unused) * --------------------------------------------------------------------- */ a[0] = -8.0/3.0; a[1] = 0.0; a[2] = 1.0/6.0; a[3] = 1.0/12.0; if(class_npb == 'A' || class_npb == 'S' || class_npb =='W'){ /* coefficients for the s(a) smoother */ c[0] = -3.0/8.0; c[1] = +1.0/32.0; c[2] = -1.0/64.0; c[3] = 0.0; }else{ /* coefficients for the s(b) smoother */ c[0] = -3.0/17.0; c[1] = +1.0/33.0; c[2] = -1.0/61.0; c[3] = 0.0; } lb = 1; k = lt; setup(&n1,&n2,&n3,k); zero3(u,n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],k); norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - MG Benchmark\n\n"); printf(" Size: %3dx%3dx%3d (class_npb %1c)\n", nx[lt], ny[lt], nz[lt], class_npb); printf(" Iterations: %3d\n", nit); resid(u,v,r,n1,n2,n3,a,k); norm2u3(r,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /* * --------------------------------------------------------------------- * one iteration for startup * --------------------------------------------------------------------- */ mg3P(u,v,r,a,c,n1,n2,n3,k); resid(u,v,r,n1,n2,n3,a,k); setup(&n1,&n2,&n3,k); zero3(u,n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],k); timer_stop(T_INIT); tinit = timer_read(T_INIT); printf(" Initialization time: %15.3f seconds\n", tinit); for(i=T_BENCH; i<T_LAST; i++){ timer_clear(i); } setup_gpu(a,c); timer_start(T_BENCH); if(timeron){timer_start(T_RESID2);} resid_gpu(u_device,v_device,r_device,n1,n2,n3,a_device,k); if(timeron){timer_stop(T_RESID2);} norm2u3_gpu(r_device,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); for(it = 1; it <= nit; it++){ //if((it==1)||(it==nit)||((it%5)==0)){printf(" iter %3d\n",it);} if(timeron){timer_start(T_MG3P);} mg3P_gpu(u_device,v_device,r_device,a_device,c_device,n1,n2,n3,k); if(timeron){timer_stop(T_MG3P);} if(timeron){timer_start(T_RESID2);} resid_gpu(u_device,v_device,r_device,n1,n2,n3,a_device,k); if(timeron){timer_stop(T_RESID2);} } norm2u3_gpu(r_device,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); timer_stop(T_BENCH); t = timer_read(T_BENCH); verified = FALSE; verify_value = 0.0; printf(" Benchmark completed\n"); epsilon = 1.0e-8; if(class_npb != 'U'){ if(class_npb == 'S'){ verify_value = 0.5307707005734e-04; }else if(class_npb == 'W'){ verify_value = 0.6467329375339e-05; }else if(class_npb == 'A'){ verify_value = 0.2433365309069e-05; }else if(class_npb == 'B'){ verify_value = 0.1800564401355e-05; }else if(class_npb == 'C'){ verify_value = 0.5706732285740e-06; }else if(class_npb == 'D'){ verify_value = 0.1583275060440e-09; }else if(class_npb == 'E'){ verify_value = 0.8157592357404e-10; } err = fabs(rnm2-verify_value) / verify_value; if(err <= epsilon){ verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.13e\n", rnm2); printf(" Error is %20.13e\n", err); }else{ verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.13e\n", rnm2); printf(" The correct L2 Norm is %20.13e\n", verify_value); } }else{ verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } nn = 1.0*nx[lt]*ny[lt]*nz[lt]; if(t!=0.0){ mflops = 58.0*nit*nn*1.0e-6/t; }else{ mflops = 0.0; } c_print_results((char*)"MG", class_npb, nx[lt], ny[lt], nz[lt], nit, t, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); /* * --------------------------------------------------------------------- * more timers * --------------------------------------------------------------------- */ if(timeron){ tmax = timer_read(T_BENCH); if(tmax==0.0){tmax=1.0;} printf(" SECTION Time (secs)\n"); for(i=T_BENCH; i<T_LAST; i++){ t = timer_read(i); if(i==T_RESID2){ t = timer_read(T_RESID) - t; printf(" --> %8s:%9.3f (%6.2f%%)\n", "mg-resid", t, t*100.0/tmax); }else{ printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100.0/tmax); } } } release_gpu(); return 0; } /* * --------------------------------------------------------------------- * bubble does a bubble sort in direction dir * --------------------------------------------------------------------- */ static void bubble(double ten[][MM], int j1[][MM], int j2[][MM], int j3[][MM], int m, int ind){ double temp; int i, j_temp; if(ind == 1){ for(i = 0; i < m-1; i++){ if(ten[ind][i] > ten[ind][i+1]){ temp = ten[ind][i+1]; ten[ind][i+1] = ten[ind][i]; ten[ind][i] = temp; j_temp = j1[ind][i+1]; j1[ind][i+1] = j1[ind][i]; j1[ind][i] = j_temp; j_temp = j2[ind][i+1]; j2[ind][i+1] = j2[ind][i]; j2[ind][i] = j_temp; j_temp = j3[ind][i+1]; j3[ind][i+1] = j3[ind][i]; j3[ind][i] = j_temp; }else{ return; } } }else{ for(i = 0; i < m-1; i++){ if(ten[ind][i] < ten[ind][i+1]){ temp = ten[ind][i+1]; ten[ind][i+1] = ten[ind][i]; ten[ind][i] = temp; j_temp = j1[ind][i+1]; j1[ind][i+1] = j1[ind][i]; j1[ind][i] = j_temp; j_temp = j2[ind][i+1]; j2[ind][i+1] = j2[ind][i]; j2[ind][i] = j_temp; j_temp = j3[ind][i+1]; j3[ind][i+1] = j3[ind][i]; j3[ind][i] = j_temp; }else{ return; } } } } /* * --------------------------------------------------------------------- * comm3 organizes the communication on all borders * --------------------------------------------------------------------- */ static void comm3(void* pointer_u, int n1, int n2, int n3, int kk){ //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; double* pointer_aux_u = (double*)pointer_u; int i1, i2, i3; if(timeron){timer_start(T_COMM3);} /* axis = 1 */ for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ //u[i3][i2][0] = u[i3][i2][n1-2]; pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (0)] = u[(i3)*n1*n2 + (i2)*n1 + (n1-2)]; //u[i3][i2][n1-1] = u[i3][i2][1]; pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (n1-1)] = pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (1)]; } } /* axis = 2 */ for(i3 = 1; i3 < n3-1; i3++){ for(i1 = 0; i1 < n1; i1++){ //u[i3][0][i1] = u[i3][n2-2][i1]; pointer_aux_u[(i3)*n1*n2 + (0)*n1 + (i1)] = pointer_aux_u[(i3)*n1*n2 + (n2-2)*n1 + (i1)]; //u[i3][n2-1][i1] = u[i3][1][i1]; pointer_aux_u[(i3)*n1*n2 + (n2-1)*n1 + (i1)] = pointer_aux_u[(i3)*n1*n2 + (1)*n1 + (i1)]; } } /* axis = 3 */ for(i2 = 0; i2 < n2; i2++){ for(i1 = 0; i1 < n1; i1++){ //u[0][i2][i1] = u[n3-2][i2][i1]; pointer_aux_u[(0)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_u[(n3-2)*n1*n2 + (i2)*n1 + (i1)]; //u[n3-1][i2][i1] = u[1][i2][i1]; pointer_aux_u[(n3-1)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_u[(1)*n1*n2 + (i2)*n1 + (i1)]; } } if(timeron){timer_stop(T_COMM3);} } static void comm3_gpu(double* u_device, int n1, int n2, int n3, int kk){ if(timeron){timer_start(T_COMM3);} int threads_per_block = THREADS_PER_BLOCK_ON_COMM3; int amount_of_work = (n3-2) * THREADS_PER_BLOCK_ON_COMM3; int blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); comm3_gpu_kernel_1<<<blocks_per_grid,threads_per_block>>>(u_device, n1, n2, n3, amount_of_work); cudaDeviceSynchronize(); threads_per_block = THREADS_PER_BLOCK_ON_COMM3; amount_of_work = (n3-2) * THREADS_PER_BLOCK_ON_COMM3; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); comm3_gpu_kernel_2<<<blocks_per_grid,threads_per_block>>>(u_device, n1, n2, n3, amount_of_work); cudaDeviceSynchronize(); threads_per_block = THREADS_PER_BLOCK_ON_COMM3; amount_of_work = n2 * THREADS_PER_BLOCK_ON_COMM3; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); comm3_gpu_kernel_3<<<blocks_per_grid,threads_per_block>>>(u_device, n1, n2, n3, amount_of_work); cudaDeviceSynchronize(); if(timeron){timer_stop(T_COMM3);} } __global__ void comm3_gpu_kernel_1(double* u, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i3=blockIdx.x+1; int i2=threadIdx.x+1; while(i2<n2-1){ u[i3*n2*n1+i2*n1+0]=u[i3*n2*n1+i2*n1+n1-2]; u[i3*n2*n1+i2*n1+n1-1]=u[i3*n2*n1+i2*n1+1]; i2+=THREADS_PER_BLOCK_ON_COMM3; } } __global__ void comm3_gpu_kernel_2(double* u, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i3=blockIdx.x + 1; int i1=threadIdx.x; while(i1<n1){ u[i3*n2*n1+0*n1+i1]=u[i3*n2*n1+(n2-2)*n1+i1]; u[i3*n2*n1+(n2-1)*n1+i1]=u[i3*n2*n1+1*n1+i1]; i1+=THREADS_PER_BLOCK_ON_COMM3; } } __global__ void comm3_gpu_kernel_3(double* u, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i2=blockIdx.x; int i1=threadIdx.x; while(i1<n1){ u[0*n2*n1+i2*n1+i1]=u[(n3-2)*n2*n1+i2*n1+i1]; u[(n3-1)*n2*n1+i2*n1+i1]=u[1*n2*n1+i2*n1+i1]; i1+=THREADS_PER_BLOCK_ON_COMM3; } } /* * -------------------------------------------------------------------- * interp adds the trilinear interpolation of the correction * from the coarser grid to the current approximation: u = u + Qu' * * observe that this implementation costs 16A + 4M, where * A and M denote the costs of addition and multiplication. * note that this vectorizes, and is also fine for cache * based machines. vector machines may get slightly better * performance however, with 8 separate "do i1" loops, rather than 4. * -------------------------------------------------------------------- */ static void interp(void* pointer_z, int mm1, int mm2, int mm3, void* pointer_u, int n1, int n2, int n3, int k){ //double (*z)[mm2][mm1] = (double (*)[mm2][mm1])pointer_z; //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; double* pointer_aux_z = (double*)pointer_z; double* pointer_aux_u = (double*)pointer_u; int i3, i2, i1, d1, d2, d3, t1, t2, t3; /* * -------------------------------------------------------------------- * note that m = 1037 in globals.h but for this only need to be * 535 to handle up to 1024^3 * integer m * parameter( m=535 ) * -------------------------------------------------------------------- */ double z1[M], z2[M], z3[M]; if(timeron){timer_start(T_INTERP);} if(n1 != 3 && n2 != 3 && n3 != 3){ for(i3 = 0; i3 < mm3-1; i3++){ for(i2 = 0; i2 < mm2-1; i2++){ for(i1 = 0; i1 < mm1; i1++){ //z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1]; z1[i1] = pointer_aux_z[(i3)*mm1*mm2 + (i2+1)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]; //z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1]; z2[i1] = pointer_aux_z[(i3+1)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]; //z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1]; z3[i1] = pointer_aux_z[(i3+1)*mm1*mm2 + (i2+1)*mm1 + (i1)] + pointer_aux_z[(i3+1)*mm1*mm2 + (i2)*mm1 + (i1)] + z1[i1]; } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1] + z[i3][i2][i1]; pointer_aux_u[(2*i3)*n1*n2 +(2*i2)*n1 + (2*i1)] = pointer_aux_u[(2*i3)*n1*n2 +(2*i2)*n1 + (2*i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]; //u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1] + 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]); pointer_aux_u[(2*i3)*n1*n2 + (2*i2)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3)*n1*n2 + (2*i2)*n1 + (2*i1+1)] + 0.5 * (pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1+1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)]); } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1] + 0.5 * z1[i1]; pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1)] = pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1)] + 0.5 * z1[i1]; //u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1] + 0.25 * ( z1[i1] + z1[i1+1] ); pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] + 0.25 * ( z1[i1] + z1[i1+1] ); } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1] + 0.5 * z2[i1]; pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1)] + 0.5 * z2[i1]; //u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1] + 0.25 * ( z2[i1] + z2[i1+1] ); pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2)*n1 + (2*i1+1)] + 0.25 * ( z2[i1] + z2[i1+1] ); } for(i1 = 0; i1 < mm1-1; i1++){ //u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1] + 0.25 * z3[i1]; pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1)] + 0.25 * z3[i1]; //u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1] + 0.125 * ( z3[i1] + z3[i1+1] ); pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] = pointer_aux_u[(2*i3+1)*n1*n2 + (2*i2+1)*n1 + (2*i1+1)] + 0.125 * ( z3[i1] + z3[i1+1] ); } } } }else{ if(n1 == 3){ d1 = 2; t1 = 1; }else{ d1 = 1; t1 = 0; } if(n2 == 3){ d2 = 2; t2 = 1; }else{ d2 = 1; t2 = 0; } if(n3 == 3){ d3 = 2; t3 = 1; }else{ d3 = 1; t3 = 0; } for(i3 = d3; i3 <= mm3-1; i3++){ for(i2 = d2; i2 <= mm2-1; i2++){ for(i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] + z[i3-1][i2-1][i1-1]; pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]; } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] + 0.5 * (z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] + 0.5 * (pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } for(i2 = 1; i2 <= mm2-1; i2++){ for( i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] + 0.5 * (z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] + 0.5 * (pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] + 0.25 * (z[i3-1][i2][i1] + z[i3-1][i2-1][i1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-d3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] + 0.25 * (pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } } for(i3 = 1; i3 <= mm3-1; i3++){ for(i2 = d2; i2 <= mm2-1; i2++){ for(i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] + 0.5 * (z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-d1-1)] + 0.5 * (pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] + 0.25 * (z[i3][i2-1][i1] + z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-d2-1)*n1 + (2*i1-t1-1)] + 0.25 * (pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } for(i2 = 1; i2 <= mm2-1; i2++){ for (i1 = d1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] + 0.25 * (z[i3][i2][i1-1] + z[i3][i2-1][i1-1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-d1-1)] + 0.25 * (pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } for(i1 = 1; i1 <= mm1-1; i1++){ //u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] + 0.125 * (z[i3][i2][i1] + z[i3][i2-1][i1] + z[i3][i2][i1-1] + z[i3][i2-1][i1-1] + z[i3-1][i2][i1] + z[i3-1][i2-1][i1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] = pointer_aux_u[(2*i3-t3-1)*n1*n2 + (2*i2-t2-1)*n1 + (2*i1-t1-1)] + 0.125 * (pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3)*mm1*mm2 + (i2-1)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2)*mm1 + (i1-1)] + pointer_aux_z[(i3-1)*mm1*mm2 + (i2-1)*mm1 + (i1-1)]); } } } } if(timeron){timer_stop(T_INTERP);} if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_z,mm1,mm2,mm3,(char*)"z: inter",k-1); rep_nrm(pointer_aux_u,n1,n2,n3,(char*)"u: inter",k); } if(debug_vec[5] >= k){ showall(pointer_aux_z,mm1,mm2,mm3); showall(pointer_aux_u,n1,n2,n3); } } static void interp_gpu(double* z_device, int mm1, int mm2, int mm3, double* u_device, int n1, int n2, int n3, int k){ if(timeron){timer_start(T_INTERP);} if(n1 != 3 && n2 != 3 && n3 != 3){ threads_per_block = mm1; amount_of_work = (mm3-1) * (mm2-1) * mm1; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); interp_gpu_kernel<<<blocks_per_grid, threads_per_block //,SHARED_3_M >>>( z_device, u_device, mm1, mm2, mm3, n1, n2, n3, amount_of_work); cudaDeviceSynchronize(); } if(timeron){timer_stop(T_INTERP);} } __global__ void interp_gpu_kernel(double* base_z, double* base_u, int mm1, int mm2, int mm3, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int i3,i2,i1; __shared__ double z1[M],z2[M],z3[M]; //double* z1 = (double*)(extern_share_data); //double* z2 = (double*)(&z1[M]); //double* z3 = (double*)(&z2[M]); double (*z)=base_z; double (*u)=base_u; i3=blockIdx.x/(mm2-1); i2=blockIdx.x%(mm2-1); i1=threadIdx.x; z1[i1]=z[i3*mm2*mm1+(i2+1)*mm1+i1]+z[i3*mm2*mm1+i2*mm1+i1]; z2[i1]=z[(i3+1)*mm2*mm1+i2*mm1+i1]+z[i3*mm2*mm1+i2*mm1+i1]; z3[i1]=z[(i3+1)*mm2*mm1+(i2+1)*mm1+i1] +z[(i3+1)*mm2*mm1+i2*mm1+i1]+z1[i1]; __syncthreads(); if(i1<mm1-1){ double z321=z[i3*mm2*mm1+i2*mm1+i1]; u[2*i3*n2*n1+2*i2*n1+2*i1]+=z321; u[2*i3*n2*n1+2*i2*n1+2*i1+1]+=0.5*(z[i3*mm2*mm1+i2*mm1+i1+1]+z321); u[2*i3*n2*n1+(2*i2+1)*n1+2*i1]+=0.5*z1[i1]; u[2*i3*n2*n1+(2*i2+1)*n1+2*i1+1]+=0.25*(z1[i1]+z1[i1+1]); u[(2*i3+1)*n2*n1+2*i2*n1+2*i1]+=0.5*z2[i1]; u[(2*i3+1)*n2*n1+2*i2*n1+2*i1+1]+=0.25*(z2[i1]+z2[i1+1]); u[(2*i3+1)*n2*n1+(2*i2+1)*n1+2*i1]+=0.25*z3[i1]; u[(2*i3+1)*n2*n1+(2*i2+1)*n1+2*i1+1]+=0.125*(z3[i1]+z3[i1+1]); } } /* * -------------------------------------------------------------------- * multigrid v-cycle routine * -------------------------------------------------------------------- */ static void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3, int k){ int j; /* * -------------------------------------------------------------------- * down cycle. * restrict the residual from the find grid to the coarse * ------------------------------------------------------------------- */ for(k = lt; k >= lb+1; k--){ j = k-1; rprj3(&r[ir[k]], m1[k], m2[k], m3[k], &r[ir[j]], m1[j], m2[j], m3[j], k); } k = lb; /* * -------------------------------------------------------------------- * compute an approximate solution on the coarsest grid * -------------------------------------------------------------------- */ zero3(&u[ir[k]], m1[k], m2[k], m3[k]); psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); for(k = lb+1; k <= lt-1; k++){ j = k-1; /* * -------------------------------------------------------------------- * prolongate from level k-1 to k * ------------------------------------------------------------------- */ zero3(&u[ir[k]], m1[k], m2[k], m3[k]); interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k); /* * -------------------------------------------------------------------- * compute residual for level k * -------------------------------------------------------------------- */ resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k); /* * -------------------------------------------------------------------- * apply smoother * -------------------------------------------------------------------- */ psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k); resid(u, v, r, n1, n2, n3, a, k); psinv(r, u, n1, n2, n3, c, k); } static void mg3P_gpu(double* u_device, double* v_device, double* r_device, double* a_device, double* c_device, int n1, int n2, int n3, int k){ int j; /* * -------------------------------------------------------------------- * down cycle. * restrict the residual from the find grid to the coarse * ------------------------------------------------------------------- */ for(k = lt; k >= lb+1; k--){ j = k-1; rprj3_gpu(r_device+ir[k], m1[k], m2[k], m3[k], r_device+ir[j], m1[j], m2[j], m3[j], k); } k = lb; /* * -------------------------------------------------------------------- * compute an approximate solution on the coarsest grid * -------------------------------------------------------------------- */ zero3_gpu(u_device+ir[k], m1[k], m2[k], m3[k]); psinv_gpu(r_device+ir[k], u_device+ir[k], m1[k], m2[k], m3[k], c_device, k); for(k = lb+1; k <= lt-1; k++){ j = k-1; /* * -------------------------------------------------------------------- * prolongate from level k-1 to k * ------------------------------------------------------------------- */ zero3_gpu(u_device+ir[k], m1[k], m2[k], m3[k]); interp_gpu(u_device+ir[j], m1[j], m2[j], m3[j], u_device+ir[k], m1[k], m2[k], m3[k], k); /* * -------------------------------------------------------------------- * compute residual for level k * -------------------------------------------------------------------- */ resid_gpu(u_device+ir[k], r_device+ir[k], r_device+ir[k], m1[k], m2[k], m3[k], a_device, k); /* * -------------------------------------------------------------------- * apply smoother * -------------------------------------------------------------------- */ psinv_gpu(r_device+ir[k], u_device+ir[k], m1[k], m2[k], m3[k], c_device, k); } j = lt - 1; k = lt; interp_gpu(u_device+ir[j], m1[j], m2[j], m3[j], u_device, n1, n2, n3, k); resid_gpu(u_device, v_device, r_device, n1, n2, n3, a_device, k); psinv_gpu(r_device, u_device, n1, n2, n3, c_device, k); } /* * --------------------------------------------------------------------- * norm2u3 evaluates approximations to the l2 norm and the * uniform (or l-infinity or chebyshev) norm, under the * assumption that the boundaries are periodic or zero. add the * boundaries in with half weight (quarter weight on the edges * and eighth weight at the corners) for inhomogeneous boundaries. * --------------------------------------------------------------------- */ static void norm2u3(void* pointer_r, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz){ //double (*r)[n2][n1] = (double (*)[n2][n1])pointer_r; double* pointer_aux_r = (double*)pointer_r; double s, a; int i3, i2, i1; double dn; if(timeron){timer_start(T_NORM2);} dn = 1.0*nx*ny*nz; s = 0.0; *rnmu = 0.0; for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 1; i1 < n1-1; i1++){ //s = s + r[i3][i2][i1] * r[i3][i2][i1]; s = s + pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)] * pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)]; //a = fabs(r[i3][i2][i1]); a = fabs(pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)]); if(a > *rnmu){*rnmu = a;} } } } *rnm2 = sqrt(s/dn); if(timeron){timer_stop(T_NORM2);} } static void norm2u3_gpu(double* r_device, int n1, int n2, int n3, double* rnm2, double* rnmu, int nx, int ny, int nz){ if(timeron){timer_start(T_NORM2);} double s; double dn, max_rnmu; int temp_size, j; dn=1.0*nx*ny*nz; s=0.0; max_rnmu=0.0; threads_per_block = THREADS_PER_BLOCK_ON_NORM2U3; amount_of_work = (n2-2) * (n3-2) * threads_per_block; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); temp_size = amount_of_work / threads_per_block; double (*sum_host)=(double*)malloc(temp_size*sizeof(double)); double (*max_host)=(double*)malloc(temp_size*sizeof(double)); double* sum_device; double* max_device; cudaMalloc(&sum_device,temp_size*sizeof(double)); cudaMalloc(&max_device,temp_size*sizeof(double)); norm2u3_gpu_kernel<<<blocks_per_grid, threads_per_block //,SHARED_2_NORM >>>( r_device, n1, n2, n3, sum_device, max_device, blocks_per_grid, amount_of_work); cudaDeviceSynchronize(); cudaMemcpy(sum_host, sum_device, temp_size*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(max_host, max_device, temp_size*sizeof(double), cudaMemcpyDeviceToHost); for(j=0; j<temp_size; j++){ s=s+sum_host[j]; if(max_rnmu<max_host[j]){max_rnmu=max_host[j];} } cudaFree(sum_device); cudaFree(max_device); free(sum_host); free(max_host); *rnmu=max_rnmu; *rnm2=sqrt(s/dn); if(timeron){timer_stop(T_NORM2);} } __global__ void norm2u3_gpu_kernel(double* r, const int n1, const int n2, const int n3, double* res_sum, double* res_max, int number_of_blocks, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} __shared__ double scratch_sum[THREADS_PER_BLOCK_ON_NORM2U3]; __shared__ double scratch_max[THREADS_PER_BLOCK_ON_NORM2U3]; //double* scratch_sum = (double*)(extern_share_data); //double* scratch_max = (double*)(&scratch_sum[THREADS_PER_BLOCK_ON_NORM2U3]); int i3=blockIdx.x/(n2-2)+1; int i2=blockIdx.x%(n2-2)+1; int i1=threadIdx.x+1; double s=0.0; double my_rnmu=0.0; double a; while(i1<n1-1){ double r321=r[i3*n2*n1+i2*n1+i1]; s=s+r321*r321; a=fabs(r321); my_rnmu=(a>my_rnmu)?a:my_rnmu; i1+=THREADS_PER_BLOCK_ON_NORM2U3; } int lid=threadIdx.x; scratch_sum[lid]=s; scratch_max[lid]=my_rnmu; __syncthreads(); for(int i=THREADS_PER_BLOCK_ON_NORM2U3/2; i>0; i>>=1){ if(lid<i){ scratch_sum[lid]+=scratch_sum[lid+i]; scratch_max[lid]=(scratch_max[lid]>scratch_max[lid+i])?scratch_max[lid]:scratch_max[lid+i]; } __syncthreads(); } if(lid == 0){ int idx=blockIdx.x; res_sum[idx]=scratch_sum[0]; res_max[idx]=scratch_max[0]; } } /* * --------------------------------------------------------------------- * power raises an integer, disguised as a double * precision real, to an integer power * --------------------------------------------------------------------- */ static double power(double a, int n){ double aj; int nj; double power; power = 1.0; nj = n; aj = a; while(nj != 0){ if((nj%2)==1){randlc(&power, aj);} randlc(&aj, aj); nj = nj/2; } return power; } /* * -------------------------------------------------------------------- * psinv applies an approximate inverse as smoother: u = u + Cr * * this implementation costs 15A + 4M per result, where * A and M denote the costs of Addition and Multiplication. * presuming coefficient c(3) is zero (the NPB assumes this, * but it is thus not a general case), 2A + 1M may be eliminated, * resulting in 13A + 3M. * note that this vectorizes, and is also fine for cache * based machines. * -------------------------------------------------------------------- */ static void psinv(void* pointer_r, void* pointer_u, int n1, int n2, int n3, double c[4], int k){ //double (*r)[n2][n1] = (double (*)[n2][n1])pointer_r; //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; double* pointer_aux_r = (double*)pointer_r; double* pointer_aux_u = (double*)pointer_u; int i3, i2, i1; double r1[M], r2[M]; if(timeron){timer_start(T_PSINV);} for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 0; i1 < n1; i1++){ //r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1] + r[i3-1][i2][i1] + r[i3+1][i2][i1]; r1[i1] = pointer_aux_r[(i3)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_r[(i3)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_r[(i3-1)*n1*n2 + (i2)*n1 + (i1)] + pointer_aux_r[(i3+1)*n1*n2 + (i2)*n1 + (i1)]; //r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1] + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1]; r2[i1] = pointer_aux_r[(i3-1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_r[(i3-1)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_r[(i3+1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_r[(i3+1)*n1*n2 + (i2+1)*n1 + (i1)]; } for(i1 = 1; i1 < n1-1; i1++){ //u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (i1)] + c[0] * pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)] + c[1] * ( pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1-1)] + pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1+1)] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); /* * -------------------------------------------------------------------- * assume c(3) = 0 (enable line below if c(3) not= 0) * -------------------------------------------------------------------- * > + c(3) * ( r2(i1-1) + r2(i1+1) ) * -------------------------------------------------------------------- */ } } } if(timeron){timer_stop(T_PSINV);} /* * -------------------------------------------------------------------- * exchange boundary points * -------------------------------------------------------------------- */ comm3(pointer_aux_u,n1,n2,n3,k); if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_u,n1,n2,n3,(char*)" psinv",k); } if(debug_vec[3] >= k){ showall(pointer_aux_u,n1,n2,n3); } } static void psinv_gpu(double* r_device, double* u_device, int n1, int n2, int n3, double* c_device, int k){ threads_per_block = n1 > THREADS_PER_BLOCK ? THREADS_PER_BLOCK : n1; amount_of_work = (n3-2) * (n2-2) * threads_per_block; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); if(timeron){timer_start(T_PSINV);} psinv_gpu_kernel<<<blocks_per_grid, threads_per_block //,SHARED_2_M >>>( r_device, u_device, c_device, n1, n2, n3, amount_of_work); cudaDeviceSynchronize(); if(timeron){timer_stop(T_PSINV);} /* * -------------------------------------------------------------------- * exchange boundary points * -------------------------------------------------------------------- */ comm3_gpu(u_device,n1,n2,n3,k); } __global__ void psinv_gpu_kernel(double* r, double* u, double* c, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} __shared__ double r1[M],r2[M]; //double* r1 = (double*)(extern_share_data); //double* r2 = (double*)(&r1[M]); int i3=blockIdx.x/(n2-2)+1; int i2=blockIdx.x%(n2-2)+1; int lid=threadIdx.x; int i1; for(i1=lid; i1<n1; i1+=THREADS_PER_BLOCK){ r1[i1]=r[i3*n2*n1+(i2-1)*n2+i1] +r[i3*n2*n1+(i2+1)*n1+i1] +r[(i3-1)*n2*n1+i2*n1+i1] +r[(i3+1)*n2*n1+i2*n1+i1]; r2[i1]=r[(i3-1)*n2*n1+(i2-1)*n1+i1] +r[(i3-1)*n2*n1+(i2+1)*n1+i1] +r[(i3+1)*n2*n1+(i2-1)*n1+i1] +r[(i3+1)*n2*n1+(i2+1)*n1+i1]; } __syncthreads(); for(i1=lid+1; i1<n1-1; i1+=THREADS_PER_BLOCK){ u[i3*n2*n1+i2*n1+i1]=u[i3*n2*n1+i2*n1+i1] +c[0]*r[i3*n2*n1+i2*n1+i1] +c[1]*(r[i3*n2*n1+i2*n1+i1-1] +r[i3*n2*n1+i2*n1+i1+1] +r1[i1]) +c[2]*(r2[i1]+r1[i1-1]+r1[i1+1] ); } } static void release_gpu(){ cudaFree(a_device); cudaFree(c_device); cudaFree(u_device); cudaFree(v_device); cudaFree(r_device); } /* * --------------------------------------------------------------------- * report on norm * --------------------------------------------------------------------- */ static void rep_nrm(void* pointer_u, int n1, int n2, int n3, char* title, int kk){ double rnm2, rnmu; norm2u3(pointer_u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]); printf(" Level%2d in %8s: norms =%21.14e%21.14e\n", kk, title, rnm2, rnmu); } /* * -------------------------------------------------------------------- * resid computes the residual: r = v - Au * * this implementation costs 15A + 4M per result, where * A and M denote the costs of addition (or subtraction) and * multiplication, respectively. * presuming coefficient a(1) is zero (the NPB assumes this, * but it is thus not a general case), 3A + 1M may be eliminated, * resulting in 12A + 3M. * note that this vectorizes, and is also fine for cache * based machines. * -------------------------------------------------------------------- */ static void resid(void* pointer_u, void* pointer_v, void* pointer_r, int n1, int n2, int n3, double a[4], int k){ //double (*u)[n2][n1] = (double (*)[n2][n1])pointer_u; //double (*v)[n2][n1] = (double (*)[n2][n1])pointer_v; //double (*r)[n2][n1] = (double (*)[n2][n1])pointer_r; double* pointer_aux_u = (double*)pointer_u; double* pointer_aux_v = (double*)pointer_v; double* pointer_aux_r = (double*)pointer_r; int i3, i2, i1; double u1[M], u2[M]; if(timeron){timer_start(T_RESID);} for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 0; i1 < n1; i1++){ //u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1] + u[i3-1][i2][i1] + u[i3+1][i2][i1]; u1[i1] = pointer_aux_u[(i3)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_u[(i3)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_u[(i3-1)*n1*n2 + (i2)*n1 + (i1)] + pointer_aux_u[(i3+1)*n1*n2 + (i2)*n1 + (i1)]; //u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1] + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1]; u2[i1] = pointer_aux_u[(i3-1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_u[(i3-1)*n1*n2 + (i2+1)*n1 + (i1)] + pointer_aux_u[(i3+1)*n1*n2 + (i2-1)*n1 + (i1)] + pointer_aux_u[(i3+1)*n1*n2 + (i2+1)*n1 + (i1)]; } for(i1 = 1; i1 < n1-1; i1++){ /* * --------------------------------------------------------------------- * assume a(1) = 0 (enable 2 lines below if a(1) not= 0) * --------------------------------------------------------------------- * > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3) * > + u1(i1) ) * --------------------------------------------------------------------- */ //r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); pointer_aux_r[(i3)*n1*n2 + (i2)*n1 + (i1)] = pointer_aux_v[(i3)*n1*n2 + (i2)*n1 + (i1)] - a[0] * pointer_aux_u[(i3)*n1*n2 + (i2)*n1 + (i1)] - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); } } } if(timeron){timer_stop(T_RESID);} /* * -------------------------------------------------------------------- * exchange boundary data * -------------------------------------------------------------------- */ comm3(pointer_aux_r,n1,n2,n3,k); if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_r,n1,n2,n3,(char*)" resid",k); } if(debug_vec[2] >= k){ showall(pointer_aux_r,n1,n2,n3); } } static void resid_gpu(double* u_device, double* v_device, double* r_device, int n1, int n2, int n3, double* a_device, int k){ threads_per_block = n1 > THREADS_PER_BLOCK ? THREADS_PER_BLOCK : n1; amount_of_work = (n3-2) * (n2-2) * threads_per_block; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); if(timeron){timer_start(T_RESID);} resid_gpu_kernel<<<blocks_per_grid, threads_per_block //,SHARED_2_M >>>( u_device, v_device, r_device, a_device, n1, n2, n3, amount_of_work); cudaDeviceSynchronize(); if(timeron){timer_stop(T_RESID);} /* * -------------------------------------------------------------------- * exchange boundary data * -------------------------------------------------------------------- */ comm3_gpu(r_device,n1,n2,n3,k); } __global__ void resid_gpu_kernel(double* u, double* v, double* r, double* a, int n1, int n2, int n3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} __shared__ double u1[M], u2[M]; //double* u1 = (double*)(extern_share_data); //double* u2 = (double*)(&u1[M]); int i3=blockIdx.x/(n2-2)+1; int i2=blockIdx.x%(n2-2)+1; int lid=threadIdx.x; int i1; for(i1=lid; i1<n1; i1+=THREADS_PER_BLOCK){ u1[i1]=u[i3*n2*n1+(i2-1)*n1+i1] +u[i3*n2*n1+(i2+1)*n1+i1] +u[(i3-1)*n2*n1+i2*n1+i1] +u[(i3+1)*n2*n1+i2*n1+i1]; u2[i1]=u[(i3-1)*n2*n1+(i2-1)*n1+i1] +u[(i3-1)*n2*n1+(i2+1)*n1+i1] +u[(i3+1)*n2*n1+(i2-1)*n1+i1] +u[(i3+1)*n2*n1+(i2+1)*n1+i1]; } __syncthreads(); for(i1=lid+1; i1<n1-1; i1+=THREADS_PER_BLOCK){ r[i3*n2*n1+i2*n1+i1]=v[i3*n2*n1+i2*n1+i1] -a[0]*u[i3*n2*n1+i2*n1+i1] -a[2]*(u2[i1]+u1[i1-1]+u1[i1+1]) -a[3]*(u2[i1-1]+u2[i1+1] ); } } /* * -------------------------------------------------------------------- * rprj3 projects onto the next coarser grid, * using a trilinear finite element projection: s = r' = P r * * this implementation costs 20A + 4M per result, where * A and M denote the costs of addition and multiplication. * note that this vectorizes, and is also fine for cache * based machines. * -------------------------------------------------------------------- */ static void rprj3(void* pointer_r, int m1k, int m2k, int m3k, void* pointer_s, int m1j, int m2j, int m3j, int k){ //double (*r)[m2k][m1k] = (double (*)[m2k][m1k])pointer_r; //double (*s)[m2j][m1j] = (double (*)[m2j][m1j])pointer_s; double* pointer_aux_r = (double*)pointer_r; double* pointer_aux_s = (double*)pointer_s; int j3, j2, j1, i3, i2, i1, d1, d2, d3, j; double x1[M], y1[M], x2, y2; if(timeron){timer_start(T_RPRJ3);} if(m1k == 3){ d1 = 2; }else{ d1 = 1; } if(m2k == 3){ d2 = 2; }else{ d2 = 1; } if(m3k == 3){ d3 = 2; }else{ d3 = 1; } for(j3 = 1; j3 < m3j-1; j3++){ i3 = 2*j3-d3; for(j2 = 1; j2 < m2j-1; j2++){ i2 = 2*j2-d2; for(j1 = 1; j1 < m1j; j1++){ i1 = 2*j1-d1; //x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1] + r[i3][i2+1][i1] + r[i3+2][i2+1][i1]; x1[i1] = pointer_aux_r[(i3+1)*m1k*m2k + (i2)*m1k + (i1)] + pointer_aux_r[(i3+1)*m1k*m2k + (i2+2)*m1k + (i1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+1)*m1k + (i1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+1)*m1k + (i1)]; //y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1] + r[i3][i2+2][i1] + r[i3+2][i2+2][i1]; y1[i1] = pointer_aux_r[(i3)*m1k*m2k + (i2)*m1k + (i1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2)*m1k + (i1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+2)*m1k + (i1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+2)*m1k + (i1)]; } for(j1 = 1; j1 < m1j-1; j1++){ i1 = 2*j1-d1; //y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1] + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1]; y2 = pointer_aux_r[(i3)*m1k*m2k + (i2)*m1k + (i1+1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2)*m1k + (i1+1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+2)*m1k + (i1+1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+2)*m1k + (i1+1)]; //x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1] + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1]; x2 = pointer_aux_r[(i3+1)*m1k*m2k + (i2)*m1k + (i1+1)] + pointer_aux_r[(i3+1)*m1k*m2k + (i2+2)*m1k + (i1+1)] + pointer_aux_r[(i3)*m1k*m2k + (i2+1)*m1k + (i1+1)] + pointer_aux_r[(i3+2)*m1k*m2k + (i2+1)*m1k + (i1+1)]; //s[j3][j2][j1] = 0.5 * r[i3+1][i2+1][i1+1] + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); pointer_aux_s[(j3)*m1j*m2j + (j2)*m1j + (j1)] = 0.5 * pointer_aux_r[(i3+1)*m1k*m2k + (i2+1)*m1k + (i1+1)] + 0.25 * ( pointer_aux_r[(i3+1)*m1k*m2k + (i2+1)*m1k + (i1)] + pointer_aux_r[(i3+1)*m1k*m2k + (i2+1)*m1k + (i1+2)] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); } } } if(timeron){timer_stop(T_RPRJ3);} j=k-1; comm3(pointer_aux_s,m1j,m2j,m3j,j); if(debug_vec[0] >= 1){ rep_nrm(pointer_aux_s,m1j,m2j,m3j,(char*)" rprj3",k-1); } if(debug_vec[4] >= k){ showall(pointer_aux_s,m1j,m2j,m3j); } } static void rprj3_gpu(double* r_device, int m1k, int m2k, int m3k, double* s_device, int m1j, int m2j, int m3j, int k){ int d1,d2,d3,j; if(m1k==3){ d1=2; }else{ d1=1; } if(m2k==3){ d2=2; }else{ d2=1; } if(m3k==3){ d3=2; }else{ d3=1; } threads_per_block = m1j-1; amount_of_work = (m3j-2) * (m2j-2) * (m1j-1); blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); if(timeron){timer_start(T_RPRJ3);} rprj3_gpu_kernel<<<blocks_per_grid, threads_per_block //,SHARED_2_M >>>( r_device, s_device, m1k, m2k, m3k, m1j, m2j, m3j, d1, d2, d3, amount_of_work); cudaDeviceSynchronize(); if(timeron){timer_stop(T_RPRJ3);} j=k-1; comm3_gpu(s_device,m1j,m2j,m3j,j); } __global__ void rprj3_gpu_kernel(double* base_r, double* base_s, int m1k, int m2k, int m3k, int m1j, int m2j, int m3j, int d1, int d2, int d3, int amount_of_work){ int check=blockIdx.x*blockDim.x+threadIdx.x; if(check>=amount_of_work){return;} int j3,j2,j1,i3,i2,i1; double x2,y2; __shared__ double x1[M],y1[M]; //double* x1 = (double*)(extern_share_data); //double* y1 = (double*)(&x1[M]); double (*r)=base_r; double (*s)=base_s; j3=blockIdx.x/(m2j-2)+1; j2=blockIdx.x%(m2j-2)+1; j1=threadIdx.x+1; i3=2*j3-d3; i2=2*j2-d2; i1=2*j1-d1; x1[i1]=r[(i3+1)*m2k*m1k+i2*m1k+i1] +r[(i3+1)*m2k*m1k+(i2+2)*m1k+i1] +r[i3*m2k*m1k+(i2+1)*m1k+i1] +r[(i3+2)*m2k*m1k+(i2+1)*m1k+i1]; y1[i1]=r[i3*m2k*m1k+i2*m1k+i1] +r[(i3+2)*m2k*m1k+i2*m1k+i1] +r[i3*m2k*m1k+(i2+2)*m1k+i1] +r[(i3+2)*m2k*m1k+(i2+2)*m1k+i1]; __syncthreads(); if(j1<m1j-1){ i1=2*j1-d1; y2=r[i3*m2k*m1k+i2*m1k+i1+1] +r[(i3+2)*m2k*m1k+i2*m1k+i1+1] +r[i3*m2k*m1k+(i2+2)*m1k+i1+1] +r[(i3+2)*m2k*m1k+(i2+2)*m1k+i1+1]; x2=r[(i3+1)*m2k*m1k+i2*m1k+i1+1] +r[(i3+1)*m2k*m1k+(i2+2)*m1k+i1+1] +r[i3*m2k*m1k+(i2+1)*m1k+i1+1] +r[(i3+2)*m2k*m1k+(i2+1)*m1k+i1+1]; s[j3*m2j*m1j+j2*m1j+j1]= 0.5*r[(i3+1)*m2k*m1k+(i2+1)*m1k+i1+1] +0.25*(r[(i3+1)*m2k*m1k+(i2+1)*m1k+i1] +r[(i3+1)*m2k*m1k+(i2+1)*m1k+i1+2]+x2) +0.125*(x1[i1]+x1[i1+2]+y2) +0.0625*(y1[i1]+y1[i1+2]); } } static void setup(int* n1, int* n2, int* n3, int k){ int j; int ax, mi[MAXLEVEL+1][3]; int ng[MAXLEVEL+1][3]; ng[lt][0] = nx[lt]; ng[lt][1] = ny[lt]; ng[lt][2] = nz[lt]; for(ax = 0; ax < 3; ax++){ for(k = lt-1; k >= 1; k--){ ng[k][ax] = ng[k+1][ax]/2; } } for(k = lt; k >= 1; k--){ nx[k] = ng[k][0]; ny[k] = ng[k][1]; nz[k] = ng[k][2]; } for(k = lt; k >= 1; k--){ for (ax = 0; ax < 3; ax++){ mi[k][ax] = 2 + ng[k][ax]; } m1[k] = mi[k][0]; m2[k] = mi[k][1]; m3[k] = mi[k][2]; } k = lt; is1 = 2 + ng[k][0] - ng[lt][0]; ie1 = 1 + ng[k][0]; *n1 = 3 + ie1 - is1; is2 = 2 + ng[k][1] - ng[lt][1]; ie2 = 1 + ng[k][1]; *n2 = 3 + ie2 - is2; is3 = 2 + ng[k][2] - ng[lt][2]; ie3 = 1 + ng[k][2]; *n3 = 3 + ie3 - is3; ir[lt] = 0; for(j = lt-1; j >= 1; j--){ ir[j] = ir[j+1]+ONE*m1[j+1]*m2[j+1]*m3[j+1]; } if(debug_vec[1] >= 1){ printf(" in setup, \n"); printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", k,lt,ng[k][0],ng[k][1],ng[k][2],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3); } } static void setup_gpu(double* a, double* c){ size_a_device=sizeof(double)*(4); size_c_device=sizeof(double)*(4); size_u_device=sizeof(double)*(NR); size_v_device=sizeof(double)*(NV); size_r_device=sizeof(double)*(NR); cudaMalloc(&a_device, size_a_device); cudaMalloc(&c_device, size_c_device); cudaMalloc(&u_device, size_u_device); cudaMalloc(&v_device, size_v_device); cudaMalloc(&r_device, size_r_device); cudaMemcpy(a_device, a, size_a_device, cudaMemcpyHostToDevice); cudaMemcpy(c_device, c, size_c_device, cudaMemcpyHostToDevice); cudaMemcpy(u_device, u, size_u_device, cudaMemcpyHostToDevice); cudaMemcpy(v_device, v, size_v_device, cudaMemcpyHostToDevice); cudaMemcpy(r_device, r, size_r_device, cudaMemcpyHostToDevice); } static void showall(void* pointer_z, int n1, int n2, int n3){ //double (*z)[n2][n1] = (double (*)[n2][n1])pointer_z; double* pointer_aux_z = (double*)pointer_z; int i1,i2,i3; int m1, m2, m3; m1 = min(n1,18); m2 = min(n2,14); m3 = min(n3,18); printf("\n"); for(i3 = 0; i3 < m3; i3++){ for(i2 = 0; i2 < m2; i2++){ for(i1 = 0; i1 < m1; i1++){ //printf("%6.3f", z[i3][i2][i1]); printf("%6.3f", pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)]); } printf("\n"); } printf(" - - - - - - - \n"); } printf("\n"); } static void zero3(void* pointer_z, int n1, int n2, int n3){ //double (*z)[n2][n1] = (double (*)[n2][n1])pointer_z; double* pointer_aux_z = (double*)pointer_z; int i1, i2, i3; for(i3 = 0;i3 < n3; i3++){ for(i2 = 0; i2 < n2; i2++){ for(i1 = 0; i1 < n1; i1++){ //z[i3][i2][i1] = 0.0; pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] = 0.0; } } } } static void zero3_gpu(double* z_device, int n1, int n2, int n3){ threads_per_block = THREADS_PER_BLOCK_ON_ZERO3; amount_of_work = n1*n2*n3; blocks_per_grid = (ceil((double)(amount_of_work)/(double)(threads_per_block))); zero3_gpu_kernel<<<blocks_per_grid, threads_per_block>>>(z_device, n1, n2, n3, amount_of_work); } __global__ void zero3_gpu_kernel(double* z, int n1, int n2, int n3, int amount_of_work){ int thread_id=blockIdx.x*blockDim.x+threadIdx.x; if(thread_id>=(n1*n2*n3)){return;} z[thread_id]=0.0; } /* * --------------------------------------------------------------------- * zran3 loads +1 at ten randomly chosen points, * loads -1 at a different ten random points, * and zero elsewhere. * --------------------------------------------------------------------- */ static void zran3(void* pointer_z, int n1, int n2, int n3, int nx, int ny, int k){ //double (*z)[n2][n1] = (double (*)[n2][n1])pointer_z; double* pointer_aux_z = (double*)pointer_z; int i0, m0, m1; int i1, i2, i3, d1, e2, e3; double xx, x0, x1, a1, a2, ai; double ten[2][MM], best; int i, j1[2][MM], j2[2][MM], j3[2][MM]; int jg[2][MM][4]; a1 = power(A, nx); a2 = power(A, nx*ny); //zero3(z, n1, n2, n3); zero3(pointer_aux_z, n1, n2, n3); i = is1-2+nx*(is2-2+ny*(is3-2)); ai = power(A, i); d1 = ie1 - is1 + 1; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = X; randlc(&x0, ai); for(i3 = 1; i3 < e3; i3++){ x1 = x0; for(i2 = 1; i2 < e2; i2++){ xx = x1; //vranlc(d1, &xx, A, &(z[i3][i2][1])); vranlc(d1, &xx, A, &(pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (1)])); randlc(&x1,a1); } randlc(&x0, a2); } /* * --------------------------------------------------------------------- * each processor looks for twenty candidates * --------------------------------------------------------------------- */ for(i = 0; i < MM; i++){ ten[1][i] = 0.0; j1[1][i] = 0; j2[1][i] = 0; j3[1][i] = 0; ten[0][i] = 1.0; j1[0][i] = 0; j2[0][i] = 0; j3[0][i] = 0; } for(i3 = 1; i3 < n3-1; i3++){ for(i2 = 1; i2 < n2-1; i2++){ for(i1 = 1; i1 < n1-1; i1++){ //if(z[i3][i2][i1] > ten[1][0]){ if(pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] > ten[1][0]){ //ten[1][0] = z[i3][i2][i1]; ten[1][0] = pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)]; j1[1][0] = i1; j2[1][0] = i2; j3[1][0] = i3; bubble(ten, j1, j2, j3, MM, 1); } //if(z[i3][i2][i1] < ten[0][0]){ if(pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] < ten[0][0]){ //ten[0][0] = z[i3][i2][i1]; ten[0][0] = pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble(ten, j1, j2, j3, MM, 0); } } } } /* * --------------------------------------------------------------------- * now which of these are globally best? * --------------------------------------------------------------------- */ i1 = MM - 1; i0 = MM - 1; for(i = MM - 1; i >= 0; i--){ best = 0.0; if(best < ten[1][i1]){ jg[1][i][0] = 0; jg[1][i][1] = is1 - 2 + j1[1][i1]; jg[1][i][2] = is2 - 2 + j2[1][i1]; jg[1][i][3] = is3 - 2 + j3[1][i1]; i1 = i1-1; }else{ jg[1][i][0] = 0; jg[1][i][1] = 0; jg[1][i][2] = 0; jg[1][i][3] = 0; } best = 1.0; if(best > ten[0][i0]){ jg[0][i][0] = 0; jg[0][i][1] = is1 - 2 + j1[0][i0]; jg[0][i][2] = is2 - 2 + j2[0][i0]; jg[0][i][3] = is3 - 2 + j3[0][i0]; i0 = i0-1; }else{ jg[0][i][0] = 0; jg[0][i][1] = 0; jg[0][i][2] = 0; jg[0][i][3] = 0; } } m1 = 0; m0 = 0; for(i3 = 0; i3 < n3; i3++){ for(i2 = 0; i2 < n2; i2++){ for(i1 = 0; i1 < n1; i1++){ //z[i3][i2][i1] = 0.0; pointer_aux_z[(i3)*n1*n2 + (i2)*n1 + (i1)] = 0.0; } } } for (i = MM-1; i >= m0; i--){ //z[jg[0][i][3]][jg[0][i][2]][jg[0][i][1]] = -1.0; pointer_aux_z[(jg[0][i][3])*n1*n2 + (jg[0][i][2])*n1 + (jg[0][i][1])] = -1.0; } for(i = MM-1; i >= m1; i--){ //z[jg[1][i][3]][jg[1][i][2]][jg[1][i][1]] = +1.0; pointer_aux_z[(jg[1][i][3])*n1*n2 + (jg[1][i][2])*n1 + (jg[1][i][1])] = +1.0; } comm3(pointer_aux_z, n1, n2, n3, k); }
b88fd27357506d80aca0c4df89e6e354d25d0b48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "taylor_shift_conf.h" #include "taylor_shift_cpu.h" #include "taylor_shift_kernel.h" #include "inlines.h" // fast multiplication of two polynomials, created by Sardar Haque, I modify just a line to use it in my code __global__ void listPlainMulGpu_and_right_shift_GPU(int *Mgpu1, int *Mgpu2 , int length_poly, int poly_on_layer, int threadsForAmul, int mulInThreadBlock, int p, double pinv) { __shared__ int sM[2*Tmul]; /* sM is the shared memory where the all the coefficients and intermediate multiplications results are stored. For each multiplication it reserve 4*length_poly -1 spaces. mulID is the multiplication ID. It refers to the poly in Mgpu2 on which it will work. mulID must be less than (poly_on_layer/2). */ int mulID= ((threadIdx.x/threadsForAmul) + blockIdx.x*mulInThreadBlock); if (mulID < (poly_on_layer/2) && threadIdx.x < threadsForAmul*mulInThreadBlock) { /* The next 10 lines of code copy the polynomials in Mgpu1 from global memory to shared memory. Each thread is responsible of copying one coefficient. A thread will copy a coefficient from Mgpu1[( mulID* length_poly*2)...( mulID* length_poly*2) + length_poly*2 -1] j+u gives the right index of the coefficient in Mgpu1. In sM, the coefficients are stored at the lower part. t will find the right (4*length_poly-1) spaced slot for it. s gives the start index of its right slot. s+u gives right position for the index. */ int j = ( mulID* length_poly*2); int q = ( mulID*(2*length_poly)); // modified, clean the -1 int t = (threadIdx.x/threadsForAmul); int u = threadIdx.x % threadsForAmul; int s = t*(4*length_poly-1); int k = s + length_poly; int l = k + length_poly; int c = l+u; int a, b, i; sM[s+u] = Mgpu1[j + u]; __syncthreads(); if(u != (2*length_poly-1) ) { /* In the multiplication space, the half of the leading coefficients are computed differently than the last half. Here the computation of first half are shown. the last half is shown in else statement. In both cases sM[c] is the cofficient on which this thread will work on. sM[a] is the coefficient of one poly. sM[b] is the coefficient of the other poly. */ if(u < length_poly) { a = s; b = k + u; sM[c] = mul_mod(sM[a],sM[b],p,pinv); ++a; --b; for(i = 0; i < u; ++i, ++a, --b) sM[c] = add_mod(mul_mod(sM[a],sM[b],p,pinv),sM[c] ,p); Mgpu2[q+u+1] = sM[c]; //+1 added } else { b = l - 1; a = (u - length_poly) + 1 + s; sM[c] = mul_mod(sM[a],sM[b],p,pinv); ++a; --b; int tempU = u; u = (2*length_poly-2) - u; for(i = 0; i < u; ++i, ++a, --b) sM[c] = add_mod(mul_mod(sM[a],sM[b],p,pinv),sM[c] ,p); Mgpu2[q+tempU+1] = sM[c]; //+1 added } } else Mgpu2[q] = 0; // added for put 0 at position } } // create array identity (initialization of the array Fact) __global__ void identity_GPU(int *T, int n) { int k = blockIdx.x * blockDim.x + threadIdx.x; int boolean = (int) (k == 0); if (k < n+1) T[k] = k + boolean; } // create all the elements of Factorial (%p) __global__ void create_factorial_GPU(int *Fact, int n, int e, int p, double pinv) // warning : n+1 is the size of Fact but we will just full the n last element, not the first one { int k = blockIdx.x * blockDim.x + threadIdx.x; int i, j, part, pos, base; int L = 2; int B = 2; // suite if (k < n/2) { // step 1 Fact[2*k+1] = mul_mod(Fact[2*k], Fact[2*k+1], p, pinv); // next steps for (i=1; i<e; i++) { // L *= 2; B *= 2; // B = 2 * L; part = k / L; pos = k % L; base = Fact[L + part*B - 1]; j = L + part*B + pos; Fact[j] = mul_mod(base, Fact[j], p, pinv); L *= 2; } } } // create an array of the inverse numbers in Z/pZ __global__ void inverse_p_GPU(int *T, int p, double pinv) { int i; int k = blockIdx.x * blockDim.x + threadIdx.x; if (k < p) { if (k > 1) for (i=2; i<p; i++) { if (mul_mod(k, i, p, pinv) == 1) { T[k] = i; i = p; // to stop the loop } } else if (k == 1) T[1] = 1; else // (k == 0) T[0] = 0; } } // create the inverse of a number in Z/pZ __device__ int inverse_GPU(int k, int p, double pinv) { int i, res; if (k > 1) for (i=2; i<p; i++) { if (mul_mod(k, i, p, pinv) == 1) { res = i; i = p; // to stop the loop } } else if (k == 1) res = 1; else // (k == 0) res = 0; return res; } // creates an array of the Newton's Binomials until n modulo p (! size of the array = n+1) __device__ int create_binomial_GPU(int *Factorial, int *Inverse_p, int n, int p, double pinv, int id) { int l = n - id; int temp = mul_mod(Factorial[id], Factorial[l], p, pinv); return mul_mod(Factorial[n], Inverse_p[temp], p, pinv); } // create the Newton's Binomial coefficient "n choose id" modulo p // return "n choose id" = n! / [id!(n-id)!] mod p __device__ int create_binomial2_GPU(int *Factorial, int n, int p, double pinv, int id) { int l = n - id; int prod = mul_mod(Factorial[id], Factorial[l], p, pinv); return quo_mod(Factorial[n], prod, p, pinv); } // create the array of the coefficients of (x+1)^k for k in (1,2^(e-1)) __global__ void develop_xshift_GPU(int *T, int n, int *Factorial, int p, double pinv) { int k = blockIdx.x * blockDim.x + threadIdx.x; int m; int pow2 = 1; if (k < n) { // if (k > 1) { m = (k+1)/2; //k/2 while (m != 0) { m /= 2; pow2 *= 2; } T[k] = create_binomial2_GPU(Factorial, pow2, p, pinv, k+1 - pow2); //k-pow2 } /* else if (k == 1) { T[1] = 1; // for (x+1)^1 T[n] = 1; // last element = 1 } else // (k == 0) T[0] = 1; // for (x+1)^0 */ } } // create the product of two arrays representing polynomials __device__ void conv_prod_GPU(int *res, int *T1, int *T2, int m, int p, int local_n) { int i, j; int K = blockIdx.x * blockDim.x + threadIdx.x; if (K < m) { for (j=0; j<K; j++) { i = K - j; // K = i+j if ((i < local_n+1) && (j < local_n)) // if i < local_n + 1 then T1[i] != 0, else T1[i] = 0 so useless computations res[K] = (res[K] + T1[i]*T2[j]) % p; } for (j=K+1; j<m; j++) { i = K + m - j; if ((i < local_n+1) && (j < local_n)) res[K] = (res[K] + T1[i]*T2[j]) % p; } } } // addition of two arrays __global__ void add_arrays_GPU(int *res, int *T1, int *T2, int size, int p) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<size) res[i] = add_mod(T1[i], T2[i], p); } // creates an array of zeros __global__ void Zeros_GPU(int *T, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) T[i] = 0; } // initialize Polynomial_shift __global__ void init_polynomial_shift_GPU(int *Polynomial, int *Polynomial_shift, int n, int p) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = 2*i; if (i < n/2) { // if (i % 2 == 0) Polynomial_shift[j] = add_mod(Polynomial[j], Polynomial[j+1], p); // else // (i % 2 == 1) Polynomial_shift[j+1] = Polynomial[j+1]; } /* EXAMPLE for n=8 : after this procedure, Polynomial_shift = [f0+f1, f1, f2+f3, f3, f4+f5, f5, f6+f7, f7] */ } // transfer at each step the polynomials which need to be multiplicated __global__ void transfert_array_GPU(int *Mgpu, int *Polynomial_shift, int *Monomial_shift, int n, int local_n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int B = 2*local_n; int pos, part, PART, bool1, bool2; /* EXAMPLE -------------------------------------------------------- ARRAY Polynomial_shift_device[i-1] considered _________ _________ _________ _________ | | | | | | | X | | Y | |_________|_________|_________|_________| part=0 part=1 part=2 part=3 local_n = size of a part -------------------------------------------------------- ARRAY Mgpu[i] considered ___________________ ___________________ | | | | X (x+1)^m | Y (x+1)^m | |___________________|___________________| PART=0 PART=1 B = 2 * local_n = size of a PART m = local_n We want to fill the array Mgpu[i] like this : the polynomials which need to be multiplicated by (x+1)^m are of odd part and we store them at the beginning of each PART of Mgpu[i]. The end of each part doesn't really contain (x+1)^m as we need arrays to be multiplicated, so we avoid the multiplication by 1. Thus the end of each PART contains exactly : [(x+1)^m - 1] / x = m + ... + x^(m-1) {m elements} */ if (i < n) { part = i / local_n; pos = i % local_n; // i = part * local_n + pos PART = part / 2; bool2 = part % 2; // = 0 or 1 bool1 = 1 - bool2; // = 1 or 0, bool1 and bool2 are contraries // What we want to do /* if (part % 2 == 0) Mgpu[PART * B + local_n + pos] = Monomial_shift[local_n + pos];// + 1]; else // (part % 2 == 1) Mgpu[PART * B + pos] = Polynomial_shift[i]; */ // What we do (faster) Mgpu[PART * B + local_n * bool1 + pos] = bool1 * Monomial_shift[local_n + pos] + bool2 * Polynomial_shift[i]; } } __global__ void right_shift_GPU(int *T, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int a; if (i < n) { a = T[i]; __syncthreads(); if (i < n-1) T[i+1] = a; else T[0] = 0; } } __global__ void semi_add_GPU(int *NewPol, int *PrevPol1, int *PrevPol2, int n, int local_n, int p) { int i = blockIdx.x * blockDim.x + threadIdx.x; int part = i / local_n; int pos = i % local_n; int j = 2 * local_n * part + pos; int res; if (i < n/2) { /* if (part % 2 == 0) NewPol[i] = add_mod(NewPol[i], PrevPol[i], p);*/ res = add_mod(PrevPol1[j], PrevPol2[j], p); NewPol[j] = add_mod(NewPol[j], res, p); } } /* ================================================================================ PARALLELIZE !!!!!!! ================================================================================ */ // Horner's method to compute g(x) = f(x+1) (equivalent to Shaw & Traub's method for a=1) /* void horner_shift_GPU(int *Polynomial, int *Polynomial_shift, int n, int p) { int i; int *temp; temp = (int*) calloc (n, sizeof(int)); Polynomial_shift[0] = Polynomial[n-1]; for (i=1; i<n; i++) { memcpy(temp+1, Polynomial_shift, i*sizeof(int)); add_arrays(Polynomial_shift, Polynomial_shift, temp, n, p); Polynomial_shift[0] = (Polynomial_shift[0] + Polynomial[n-1-i]) % p; } free(temp); } */
b88fd27357506d80aca0c4df89e6e354d25d0b48.cu
#include "taylor_shift_conf.h" #include "taylor_shift_cpu.h" #include "taylor_shift_kernel.h" #include "inlines.h" // fast multiplication of two polynomials, created by Sardar Haque, I modify just a line to use it in my code __global__ void listPlainMulGpu_and_right_shift_GPU(int *Mgpu1, int *Mgpu2 , int length_poly, int poly_on_layer, int threadsForAmul, int mulInThreadBlock, int p, double pinv) { __shared__ int sM[2*Tmul]; /* sM is the shared memory where the all the coefficients and intermediate multiplications results are stored. For each multiplication it reserve 4*length_poly -1 spaces. mulID is the multiplication ID. It refers to the poly in Mgpu2 on which it will work. mulID must be less than (poly_on_layer/2). */ int mulID= ((threadIdx.x/threadsForAmul) + blockIdx.x*mulInThreadBlock); if (mulID < (poly_on_layer/2) && threadIdx.x < threadsForAmul*mulInThreadBlock) { /* The next 10 lines of code copy the polynomials in Mgpu1 from global memory to shared memory. Each thread is responsible of copying one coefficient. A thread will copy a coefficient from Mgpu1[( mulID* length_poly*2)...( mulID* length_poly*2) + length_poly*2 -1] j+u gives the right index of the coefficient in Mgpu1. In sM, the coefficients are stored at the lower part. t will find the right (4*length_poly-1) spaced slot for it. s gives the start index of its right slot. s+u gives right position for the index. */ int j = ( mulID* length_poly*2); int q = ( mulID*(2*length_poly)); // modified, clean the -1 int t = (threadIdx.x/threadsForAmul); int u = threadIdx.x % threadsForAmul; int s = t*(4*length_poly-1); int k = s + length_poly; int l = k + length_poly; int c = l+u; int a, b, i; sM[s+u] = Mgpu1[j + u]; __syncthreads(); if(u != (2*length_poly-1) ) { /* In the multiplication space, the half of the leading coefficients are computed differently than the last half. Here the computation of first half are shown. the last half is shown in else statement. In both cases sM[c] is the cofficient on which this thread will work on. sM[a] is the coefficient of one poly. sM[b] is the coefficient of the other poly. */ if(u < length_poly) { a = s; b = k + u; sM[c] = mul_mod(sM[a],sM[b],p,pinv); ++a; --b; for(i = 0; i < u; ++i, ++a, --b) sM[c] = add_mod(mul_mod(sM[a],sM[b],p,pinv),sM[c] ,p); Mgpu2[q+u+1] = sM[c]; //+1 added } else { b = l - 1; a = (u - length_poly) + 1 + s; sM[c] = mul_mod(sM[a],sM[b],p,pinv); ++a; --b; int tempU = u; u = (2*length_poly-2) - u; for(i = 0; i < u; ++i, ++a, --b) sM[c] = add_mod(mul_mod(sM[a],sM[b],p,pinv),sM[c] ,p); Mgpu2[q+tempU+1] = sM[c]; //+1 added } } else Mgpu2[q] = 0; // added for put 0 at position } } // create array identity (initialization of the array Fact) __global__ void identity_GPU(int *T, int n) { int k = blockIdx.x * blockDim.x + threadIdx.x; int boolean = (int) (k == 0); if (k < n+1) T[k] = k + boolean; } // create all the elements of Factorial (%p) __global__ void create_factorial_GPU(int *Fact, int n, int e, int p, double pinv) // warning : n+1 is the size of Fact but we will just full the n last element, not the first one { int k = blockIdx.x * blockDim.x + threadIdx.x; int i, j, part, pos, base; int L = 2; int B = 2; // suite if (k < n/2) { // step 1 Fact[2*k+1] = mul_mod(Fact[2*k], Fact[2*k+1], p, pinv); // next steps for (i=1; i<e; i++) { // L *= 2; B *= 2; // B = 2 * L; part = k / L; pos = k % L; base = Fact[L + part*B - 1]; j = L + part*B + pos; Fact[j] = mul_mod(base, Fact[j], p, pinv); L *= 2; } } } // create an array of the inverse numbers in Z/pZ __global__ void inverse_p_GPU(int *T, int p, double pinv) { int i; int k = blockIdx.x * blockDim.x + threadIdx.x; if (k < p) { if (k > 1) for (i=2; i<p; i++) { if (mul_mod(k, i, p, pinv) == 1) { T[k] = i; i = p; // to stop the loop } } else if (k == 1) T[1] = 1; else // (k == 0) T[0] = 0; } } // create the inverse of a number in Z/pZ __device__ int inverse_GPU(int k, int p, double pinv) { int i, res; if (k > 1) for (i=2; i<p; i++) { if (mul_mod(k, i, p, pinv) == 1) { res = i; i = p; // to stop the loop } } else if (k == 1) res = 1; else // (k == 0) res = 0; return res; } // creates an array of the Newton's Binomials until n modulo p (! size of the array = n+1) __device__ int create_binomial_GPU(int *Factorial, int *Inverse_p, int n, int p, double pinv, int id) { int l = n - id; int temp = mul_mod(Factorial[id], Factorial[l], p, pinv); return mul_mod(Factorial[n], Inverse_p[temp], p, pinv); } // create the Newton's Binomial coefficient "n choose id" modulo p // return "n choose id" = n! / [id!(n-id)!] mod p __device__ int create_binomial2_GPU(int *Factorial, int n, int p, double pinv, int id) { int l = n - id; int prod = mul_mod(Factorial[id], Factorial[l], p, pinv); return quo_mod(Factorial[n], prod, p, pinv); } // create the array of the coefficients of (x+1)^k for k in (1,2^(e-1)) __global__ void develop_xshift_GPU(int *T, int n, int *Factorial, int p, double pinv) { int k = blockIdx.x * blockDim.x + threadIdx.x; int m; int pow2 = 1; if (k < n) { // if (k > 1) { m = (k+1)/2; //k/2 while (m != 0) { m /= 2; pow2 *= 2; } T[k] = create_binomial2_GPU(Factorial, pow2, p, pinv, k+1 - pow2); //k-pow2 } /* else if (k == 1) { T[1] = 1; // for (x+1)^1 T[n] = 1; // last element = 1 } else // (k == 0) T[0] = 1; // for (x+1)^0 */ } } // create the product of two arrays representing polynomials __device__ void conv_prod_GPU(int *res, int *T1, int *T2, int m, int p, int local_n) { int i, j; int K = blockIdx.x * blockDim.x + threadIdx.x; if (K < m) { for (j=0; j<K; j++) { i = K - j; // K = i+j if ((i < local_n+1) && (j < local_n)) // if i < local_n + 1 then T1[i] != 0, else T1[i] = 0 so useless computations res[K] = (res[K] + T1[i]*T2[j]) % p; } for (j=K+1; j<m; j++) { i = K + m - j; if ((i < local_n+1) && (j < local_n)) res[K] = (res[K] + T1[i]*T2[j]) % p; } } } // addition of two arrays __global__ void add_arrays_GPU(int *res, int *T1, int *T2, int size, int p) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<size) res[i] = add_mod(T1[i], T2[i], p); } // creates an array of zeros __global__ void Zeros_GPU(int *T, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) T[i] = 0; } // initialize Polynomial_shift __global__ void init_polynomial_shift_GPU(int *Polynomial, int *Polynomial_shift, int n, int p) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = 2*i; if (i < n/2) { // if (i % 2 == 0) Polynomial_shift[j] = add_mod(Polynomial[j], Polynomial[j+1], p); // else // (i % 2 == 1) Polynomial_shift[j+1] = Polynomial[j+1]; } /* EXAMPLE for n=8 : after this procedure, Polynomial_shift = [f0+f1, f1, f2+f3, f3, f4+f5, f5, f6+f7, f7] */ } // transfer at each step the polynomials which need to be multiplicated __global__ void transfert_array_GPU(int *Mgpu, int *Polynomial_shift, int *Monomial_shift, int n, int local_n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int B = 2*local_n; int pos, part, PART, bool1, bool2; /* EXAMPLE -------------------------------------------------------- ARRAY Polynomial_shift_device[i-1] considered _________ _________ _________ _________ | | | | | | | X | | Y | |_________|_________|_________|_________| part=0 part=1 part=2 part=3 local_n = size of a part -------------------------------------------------------- ARRAY Mgpu[i] considered ___________________ ___________________ | | | | X (x+1)^m | Y (x+1)^m | |___________________|___________________| PART=0 PART=1 B = 2 * local_n = size of a PART m = local_n We want to fill the array Mgpu[i] like this : the polynomials which need to be multiplicated by (x+1)^m are of odd part and we store them at the beginning of each PART of Mgpu[i]. The end of each part doesn't really contain (x+1)^m as we need arrays to be multiplicated, so we avoid the multiplication by 1. Thus the end of each PART contains exactly : [(x+1)^m - 1] / x = m + ... + x^(m-1) {m elements} */ if (i < n) { part = i / local_n; pos = i % local_n; // i = part * local_n + pos PART = part / 2; bool2 = part % 2; // = 0 or 1 bool1 = 1 - bool2; // = 1 or 0, bool1 and bool2 are contraries // What we want to do /* if (part % 2 == 0) Mgpu[PART * B + local_n + pos] = Monomial_shift[local_n + pos];// + 1]; else // (part % 2 == 1) Mgpu[PART * B + pos] = Polynomial_shift[i]; */ // What we do (faster) Mgpu[PART * B + local_n * bool1 + pos] = bool1 * Monomial_shift[local_n + pos] + bool2 * Polynomial_shift[i]; } } __global__ void right_shift_GPU(int *T, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int a; if (i < n) { a = T[i]; __syncthreads(); if (i < n-1) T[i+1] = a; else T[0] = 0; } } __global__ void semi_add_GPU(int *NewPol, int *PrevPol1, int *PrevPol2, int n, int local_n, int p) { int i = blockIdx.x * blockDim.x + threadIdx.x; int part = i / local_n; int pos = i % local_n; int j = 2 * local_n * part + pos; int res; if (i < n/2) { /* if (part % 2 == 0) NewPol[i] = add_mod(NewPol[i], PrevPol[i], p);*/ res = add_mod(PrevPol1[j], PrevPol2[j], p); NewPol[j] = add_mod(NewPol[j], res, p); } } /* ================================================================================ PARALLELIZE !!!!!!! ================================================================================ */ // Horner's method to compute g(x) = f(x+1) (equivalent to Shaw & Traub's method for a=1) /* void horner_shift_GPU(int *Polynomial, int *Polynomial_shift, int n, int p) { int i; int *temp; temp = (int*) calloc (n, sizeof(int)); Polynomial_shift[0] = Polynomial[n-1]; for (i=1; i<n; i++) { memcpy(temp+1, Polynomial_shift, i*sizeof(int)); add_arrays(Polynomial_shift, Polynomial_shift, temp, n, p); Polynomial_shift[0] = (Polynomial_shift[0] + Polynomial[n-1-i]) % p; } free(temp); } */
97541e3e5071011243c46d7119e2d7ef4a61f946.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <cstdio> static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } #define TH_PER_BLOCK 64 // Sets keys with idx [length, nextPow2(length) ) as -1 // Sets keys with idx [0, length - 1] with its idx // -1 signifies maximum possible weight __global__ void cudaSortInit(int* keys, int length, int maxSize) { if (maxSize & (maxSize - 1)) { printf("WARINING: Length is not a power of two."); return; } int idx = blockDim.x * blockIdx.x + threadIdx.x; // Only going to modify ones past max length if (idx >= maxSize) { return; } else if (idx < length) { keys[idx] = idx; } else { keys[idx] = -1; } } // Requires length to be a power of 2 __global__ void cudaBitonicKernel( int* keys, float* value, int length, int step, int maxStep ) { if (length & (length - 1)) { printf("WARINING: Length is not a power of two."); return; } int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId >= length / 2) return; // The whole range is subdivided into regions of 2 * $step size. // Each region will use $step number of threads int regionSize = 2 * step; // Each region contains exactly $step threads int regionId = threadId / step; int idxInRegion = threadId % step; int thisIdx = regionId * regionSize + idxInRegion; int otherIdx = regionId * regionSize + step + idxInRegion; int thisKey = keys[thisIdx], otherKey = keys[otherIdx]; // Each maxStep region contains exactly maxStep active threads int maxStepRegionId = threadId / maxStep; bool needSwap = false; if (maxStepRegionId % 2 == 1) { // If we are in the upsweep phase, and region is odd, // We need to ensure larger one is in lower index needSwap = (otherKey == -1 || (thisKey != -1 && value[thisKey] < value[otherKey])); } else { // We need to ensure larger one is in higher index needSwap = (thisKey == -1 || (otherKey != -1 && value[thisKey] > value[otherKey])); } if (needSwap) { keys[thisIdx] = otherKey; keys[otherIdx] = thisKey; } } __global__ void cudaRandomData(float* values, int length) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId >= length) return; values[threadId] = (threadId * 17167 + (17183 - threadIdx.x) * 15271) % 32768; } void generateRandomData(float* values, int length) { int nBlocks = (length + TH_PER_BLOCK - 1) / TH_PER_BLOCK; hipLaunchKernelGGL(( cudaRandomData), dim3(nBlocks), dim3(TH_PER_BLOCK), 0, 0, values, length); } void prepareIndiciesForSort(int* keys, int length) { int p2len = nextPow2(length); // Power-of-2 length if (p2len > length) { // Needs to pad the keys int nBlocks = (p2len + TH_PER_BLOCK - 1) / TH_PER_BLOCK; hipLaunchKernelGGL(( cudaSortInit), dim3(nBlocks), dim3(TH_PER_BLOCK), 0, 0, keys, length, p2len); } } void cudaSortKV(int* keys, float* values, int length) { int p2len = nextPow2(length); // Power-of-2 length // Only p2len / 2 of swaps may be required int nBlocks = (p2len / 2 + TH_PER_BLOCK - 1) / TH_PER_BLOCK; // Up sweep pahse for (int maxStep = 1; maxStep < p2len; maxStep *= 2) { for (int step = maxStep; step >= 1; step /= 2) { // printf("nblocks = %d, TH_PER_BLOCK = %d.\n", nBlocks, TH_PER_BLOCK); hipLaunchKernelGGL(( cudaBitonicKernel), dim3(nBlocks), dim3(TH_PER_BLOCK), 0, 0, keys, values, p2len, step, maxStep ); } } }
97541e3e5071011243c46d7119e2d7ef4a61f946.cu
#include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <cstdio> static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } #define TH_PER_BLOCK 64 // Sets keys with idx [length, nextPow2(length) ) as -1 // Sets keys with idx [0, length - 1] with its idx // -1 signifies maximum possible weight __global__ void cudaSortInit(int* keys, int length, int maxSize) { if (maxSize & (maxSize - 1)) { printf("WARINING: Length is not a power of two."); return; } int idx = blockDim.x * blockIdx.x + threadIdx.x; // Only going to modify ones past max length if (idx >= maxSize) { return; } else if (idx < length) { keys[idx] = idx; } else { keys[idx] = -1; } } // Requires length to be a power of 2 __global__ void cudaBitonicKernel( int* keys, float* value, int length, int step, int maxStep ) { if (length & (length - 1)) { printf("WARINING: Length is not a power of two."); return; } int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId >= length / 2) return; // The whole range is subdivided into regions of 2 * $step size. // Each region will use $step number of threads int regionSize = 2 * step; // Each region contains exactly $step threads int regionId = threadId / step; int idxInRegion = threadId % step; int thisIdx = regionId * regionSize + idxInRegion; int otherIdx = regionId * regionSize + step + idxInRegion; int thisKey = keys[thisIdx], otherKey = keys[otherIdx]; // Each maxStep region contains exactly maxStep active threads int maxStepRegionId = threadId / maxStep; bool needSwap = false; if (maxStepRegionId % 2 == 1) { // If we are in the upsweep phase, and region is odd, // We need to ensure larger one is in lower index needSwap = (otherKey == -1 || (thisKey != -1 && value[thisKey] < value[otherKey])); } else { // We need to ensure larger one is in higher index needSwap = (thisKey == -1 || (otherKey != -1 && value[thisKey] > value[otherKey])); } if (needSwap) { keys[thisIdx] = otherKey; keys[otherIdx] = thisKey; } } __global__ void cudaRandomData(float* values, int length) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId >= length) return; values[threadId] = (threadId * 17167 + (17183 - threadIdx.x) * 15271) % 32768; } void generateRandomData(float* values, int length) { int nBlocks = (length + TH_PER_BLOCK - 1) / TH_PER_BLOCK; cudaRandomData<<<nBlocks, TH_PER_BLOCK>>>(values, length); } void prepareIndiciesForSort(int* keys, int length) { int p2len = nextPow2(length); // Power-of-2 length if (p2len > length) { // Needs to pad the keys int nBlocks = (p2len + TH_PER_BLOCK - 1) / TH_PER_BLOCK; cudaSortInit<<<nBlocks, TH_PER_BLOCK>>>(keys, length, p2len); } } void cudaSortKV(int* keys, float* values, int length) { int p2len = nextPow2(length); // Power-of-2 length // Only p2len / 2 of swaps may be required int nBlocks = (p2len / 2 + TH_PER_BLOCK - 1) / TH_PER_BLOCK; // Up sweep pahse for (int maxStep = 1; maxStep < p2len; maxStep *= 2) { for (int step = maxStep; step >= 1; step /= 2) { // printf("nblocks = %d, TH_PER_BLOCK = %d.\n", nBlocks, TH_PER_BLOCK); cudaBitonicKernel<<<nBlocks, TH_PER_BLOCK>>>( keys, values, p2len, step, maxStep ); } } }
2f1e6ba64029ae7879fa2a16ebf9edd2dbc9fa7b.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #if defined (WITH_GRAPHICS) #include <interopManager.hpp> #include <Array.hpp> #include <surface.hpp> #include <err_cuda.hpp> #include <debug_cuda.hpp> #include <join.hpp> #include <reduce.hpp> #include <reorder.hpp> using af::dim4; namespace cuda { template<typename T> void copy_surface(const Array<T> &P, fg::Surface* surface) { const T *d_P = P.get(); InteropManager& intrpMngr = InteropManager::getInstance(); cudaGraphicsResource *cudaVBOResource = intrpMngr.getBufferResource(surface); // Map resource. Copy data to VBO. Unmap resource. size_t num_bytes = surface->size(); T* d_vbo = NULL; hipGraphicsMapResources(1, &cudaVBOResource, 0); hipGraphicsResourceGetMappedPointer((void **)&d_vbo, &num_bytes, cudaVBOResource); hipMemcpyAsync(d_vbo, d_P, num_bytes, hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())); hipGraphicsUnmapResources(1, &cudaVBOResource, 0); CheckGL("After cuda resource copy"); POST_LAUNCH_CHECK(); } #define INSTANTIATE(T) \ template void copy_surface<T>(const Array<T> &P, fg::Surface* surface); INSTANTIATE(float) INSTANTIATE(double) INSTANTIATE(int) INSTANTIATE(uint) INSTANTIATE(short) INSTANTIATE(ushort) INSTANTIATE(uchar) } #endif // WITH_GRAPHICS
2f1e6ba64029ae7879fa2a16ebf9edd2dbc9fa7b.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #if defined (WITH_GRAPHICS) #include <interopManager.hpp> #include <Array.hpp> #include <surface.hpp> #include <err_cuda.hpp> #include <debug_cuda.hpp> #include <join.hpp> #include <reduce.hpp> #include <reorder.hpp> using af::dim4; namespace cuda { template<typename T> void copy_surface(const Array<T> &P, fg::Surface* surface) { const T *d_P = P.get(); InteropManager& intrpMngr = InteropManager::getInstance(); cudaGraphicsResource *cudaVBOResource = intrpMngr.getBufferResource(surface); // Map resource. Copy data to VBO. Unmap resource. size_t num_bytes = surface->size(); T* d_vbo = NULL; cudaGraphicsMapResources(1, &cudaVBOResource, 0); cudaGraphicsResourceGetMappedPointer((void **)&d_vbo, &num_bytes, cudaVBOResource); cudaMemcpyAsync(d_vbo, d_P, num_bytes, cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId())); cudaGraphicsUnmapResources(1, &cudaVBOResource, 0); CheckGL("After cuda resource copy"); POST_LAUNCH_CHECK(); } #define INSTANTIATE(T) \ template void copy_surface<T>(const Array<T> &P, fg::Surface* surface); INSTANTIATE(float) INSTANTIATE(double) INSTANTIATE(int) INSTANTIATE(uint) INSTANTIATE(short) INSTANTIATE(ushort) INSTANTIATE(uchar) } #endif // WITH_GRAPHICS
flow_color.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "flow_color.h" #include <math.h> #define PI 3.1415926535f #define EPSILON 0.0001 // compute angle in radians between motion vector v and (0, 1) // the component v2 is assumed to be normalized w.r.t. the original vector v __device__ float d_getAngleFromVector(float v1, float v2) { float angle = acosf(v2); if (v1 < 0) { angle = 2 * PI - angle; } return angle; } // compute a color coding for the given flow field adding a colored border indicating the direction __global__ void createColorCoding(float* d_v1, float* d_v2, float* d_out, int w, int h, int border) { // get current thread index (x, y) int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // return if coordinate (x, y) not inside image if (x >= w || y >= h) { return; } // index for access image pixel of image with border int idxb = x + w * y; // index for access image pixel inside image without border int idx = (x-border) + (w-2*border) * (y-border); // scale factor float scale = 0.4f; // compute vector length float v1, v2; if (x < border || x >= w - border || y < border || y >= h - border) { v1 = (x - w / 2.0f) / (fminf(w, h) * scale / 3.0f); v2 = (y - h / 2.0f) / (fminf(w, h) * scale / 3.0f); } else { v1 = d_v1[idx]; v2 = d_v2[idx]; } float v_len = sqrtf(v1*v1 + v2*v2); if (v_len > EPSILON) { // compute angle float angle = d_getAngleFromVector(v1, v2 / v_len); // use weighted v_len for speed v_len *= scale; // get color index and color interpolant float colorInterp = angle * 3 / PI; int colorIdx = static_cast<int>(colorInterp); colorInterp -= colorIdx; // apply color scheme to output image const float intensities[] = { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; float red = intensities[colorIdx] + colorInterp * (intensities[(colorIdx + 1) % 6] - intensities[colorIdx]); float green = intensities[(colorIdx + 2) % 6] + colorInterp * (intensities[(colorIdx + 3) % 6] - intensities[(colorIdx + 2) % 6]); float blue = intensities[(colorIdx + 4) % 6] + colorInterp * (intensities[(colorIdx + 5) % 6] - intensities[(colorIdx + 4) % 6]); d_out[idxb] = fminf(1.0f, v_len*red); d_out[idxb + w*h] = fminf(1.0f, v_len*green); d_out[idxb + 2 * w*h] = fminf(1.0f, v_len*blue); } else { // vector to short for beeing color coded d_out[idxb] = 0.0f; d_out[idxb + w*h] = 0.0f; d_out[idxb + 2 * w*h] = 0.0f; } } // compute a color coding for the given flow field adding a colored border indicating the direction + blend with the input image by alpha = 0.5 __global__ void createColorCoding(float* d_in, float* d_v1, float* d_v2, float* d_out, int w, int h, int nc, int border) { // get current thread index (x, y) int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // return if coordinate (x, y) not inside image if (x >= w || y >= h) { return; } bool isBorder = (x < border || x >= w - border || y < border || y >= h - border); // width without border int wfree = w - 2 * border; // height without border int hfree = h - 2 * border; // index for access image pixel of image with border int idxb = x + w * y; // index for access image pixel inside image without border int idx = (x - border) + wfree * (y - border); // scale factor float scale = 0.4f; // compute vector length float v1, v2; if (isBorder) { v1 = (x - w / 2.0f) / (fminf(w, h) * scale / 3.0f); v2 = (y - h / 2.0f) / (fminf(w, h) * scale / 3.0f); } else { v1 = d_v1[idx]; v2 = d_v2[idx]; } float v_len = sqrtf(v1*v1 + v2*v2); // get input image color values float in_r, in_g, in_b; if (!isBorder) { in_r = d_in[idx]; in_g = in_r; in_b = in_r; if (nc == 3) { in_g = d_in[idx + wfree*hfree]; in_b = d_in[idx + 2 * wfree*hfree]; } } if (v_len > EPSILON) { // compute angle float angle = d_getAngleFromVector(v1, v2 / v_len); // use weighted v_len for speed v_len *= scale; // get color index and color interpolant float colorInterp = angle * 3 / PI; int colorIdx = static_cast<int>(colorInterp); colorInterp -= colorIdx; // apply color scheme to output image (merge with input image data) const float intensities[] = { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; float red = intensities[colorIdx] + colorInterp * (intensities[(colorIdx + 1) % 6] - intensities[colorIdx]); float green = intensities[(colorIdx + 2) % 6] + colorInterp * (intensities[(colorIdx + 3) % 6] - intensities[(colorIdx + 2) % 6]); float blue = intensities[(colorIdx + 4) % 6] + colorInterp * (intensities[(colorIdx + 5) % 6] - intensities[(colorIdx + 4) % 6]); if (isBorder) { d_out[idxb] = fminf(1.0f, v_len*red); d_out[idxb + w*h] = fminf(1.0f, v_len*green); d_out[idxb + 2 * w*h] = fminf(1.0f, v_len*blue); } else { d_out[idxb] = fminf(1.0f, 0.5f*v_len*red + 0.5f*in_r); d_out[idxb + w*h] = fminf(1.0f, 0.5f*v_len*green + 0.5f*in_g); d_out[idxb + 2 * w*h] = fminf(1.0f, 0.5f*v_len*blue + 0.5f*in_b); } } else { // vector is to short for being color coded d_out[idxb] = 0.5f * in_r; d_out[idxb + w*h] = 0.5f * in_g; d_out[idxb + 2 * w*h] = 0.5f * in_b; } }
flow_color.cu
#include "flow_color.h" #include <math.h> #define PI 3.1415926535f #define EPSILON 0.0001 // compute angle in radians between motion vector v and (0, 1) // the component v2 is assumed to be normalized w.r.t. the original vector v __device__ float d_getAngleFromVector(float v1, float v2) { float angle = acosf(v2); if (v1 < 0) { angle = 2 * PI - angle; } return angle; } // compute a color coding for the given flow field adding a colored border indicating the direction __global__ void createColorCoding(float* d_v1, float* d_v2, float* d_out, int w, int h, int border) { // get current thread index (x, y) int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // return if coordinate (x, y) not inside image if (x >= w || y >= h) { return; } // index for access image pixel of image with border int idxb = x + w * y; // index for access image pixel inside image without border int idx = (x-border) + (w-2*border) * (y-border); // scale factor float scale = 0.4f; // compute vector length float v1, v2; if (x < border || x >= w - border || y < border || y >= h - border) { v1 = (x - w / 2.0f) / (fminf(w, h) * scale / 3.0f); v2 = (y - h / 2.0f) / (fminf(w, h) * scale / 3.0f); } else { v1 = d_v1[idx]; v2 = d_v2[idx]; } float v_len = sqrtf(v1*v1 + v2*v2); if (v_len > EPSILON) { // compute angle float angle = d_getAngleFromVector(v1, v2 / v_len); // use weighted v_len for speed v_len *= scale; // get color index and color interpolant float colorInterp = angle * 3 / PI; int colorIdx = static_cast<int>(colorInterp); colorInterp -= colorIdx; // apply color scheme to output image const float intensities[] = { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; float red = intensities[colorIdx] + colorInterp * (intensities[(colorIdx + 1) % 6] - intensities[colorIdx]); float green = intensities[(colorIdx + 2) % 6] + colorInterp * (intensities[(colorIdx + 3) % 6] - intensities[(colorIdx + 2) % 6]); float blue = intensities[(colorIdx + 4) % 6] + colorInterp * (intensities[(colorIdx + 5) % 6] - intensities[(colorIdx + 4) % 6]); d_out[idxb] = fminf(1.0f, v_len*red); d_out[idxb + w*h] = fminf(1.0f, v_len*green); d_out[idxb + 2 * w*h] = fminf(1.0f, v_len*blue); } else { // vector to short for beeing color coded d_out[idxb] = 0.0f; d_out[idxb + w*h] = 0.0f; d_out[idxb + 2 * w*h] = 0.0f; } } // compute a color coding for the given flow field adding a colored border indicating the direction + blend with the input image by alpha = 0.5 __global__ void createColorCoding(float* d_in, float* d_v1, float* d_v2, float* d_out, int w, int h, int nc, int border) { // get current thread index (x, y) int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // return if coordinate (x, y) not inside image if (x >= w || y >= h) { return; } bool isBorder = (x < border || x >= w - border || y < border || y >= h - border); // width without border int wfree = w - 2 * border; // height without border int hfree = h - 2 * border; // index for access image pixel of image with border int idxb = x + w * y; // index for access image pixel inside image without border int idx = (x - border) + wfree * (y - border); // scale factor float scale = 0.4f; // compute vector length float v1, v2; if (isBorder) { v1 = (x - w / 2.0f) / (fminf(w, h) * scale / 3.0f); v2 = (y - h / 2.0f) / (fminf(w, h) * scale / 3.0f); } else { v1 = d_v1[idx]; v2 = d_v2[idx]; } float v_len = sqrtf(v1*v1 + v2*v2); // get input image color values float in_r, in_g, in_b; if (!isBorder) { in_r = d_in[idx]; in_g = in_r; in_b = in_r; if (nc == 3) { in_g = d_in[idx + wfree*hfree]; in_b = d_in[idx + 2 * wfree*hfree]; } } if (v_len > EPSILON) { // compute angle float angle = d_getAngleFromVector(v1, v2 / v_len); // use weighted v_len for speed v_len *= scale; // get color index and color interpolant float colorInterp = angle * 3 / PI; int colorIdx = static_cast<int>(colorInterp); colorInterp -= colorIdx; // apply color scheme to output image (merge with input image data) const float intensities[] = { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; float red = intensities[colorIdx] + colorInterp * (intensities[(colorIdx + 1) % 6] - intensities[colorIdx]); float green = intensities[(colorIdx + 2) % 6] + colorInterp * (intensities[(colorIdx + 3) % 6] - intensities[(colorIdx + 2) % 6]); float blue = intensities[(colorIdx + 4) % 6] + colorInterp * (intensities[(colorIdx + 5) % 6] - intensities[(colorIdx + 4) % 6]); if (isBorder) { d_out[idxb] = fminf(1.0f, v_len*red); d_out[idxb + w*h] = fminf(1.0f, v_len*green); d_out[idxb + 2 * w*h] = fminf(1.0f, v_len*blue); } else { d_out[idxb] = fminf(1.0f, 0.5f*v_len*red + 0.5f*in_r); d_out[idxb + w*h] = fminf(1.0f, 0.5f*v_len*green + 0.5f*in_g); d_out[idxb + 2 * w*h] = fminf(1.0f, 0.5f*v_len*blue + 0.5f*in_b); } } else { // vector is to short for being color coded d_out[idxb] = 0.5f * in_r; d_out[idxb + w*h] = 0.5f * in_g; d_out[idxb + 2 * w*h] = 0.5f * in_b; } }
66860a333f149731c02c4d056eff7197e8232182.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <gasal2/gasal.h> template <algo_type T> __global__ void gasal_get_tb( uint8_t *cigar, uint32_t *query_batch_lens, uint32_t *target_batch_lens, uint32_t *cigar_offset, uint4 *packed_tb_matrices, gasal_res_t *device_res, int n_tasks ){ int total_score __attribute__((unused)); int curr_score __attribute__((unused)); const uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_tasks) return; int offset = cigar_offset[tid]; int i, j; if (T==algo_type::LOCAL) { i = device_res->target_batch_end[tid]; j = device_res->query_batch_end[tid]; total_score = device_res->aln_score[tid]; curr_score = 0; } else if (T==algo_type::GLOBAL) { i = target_batch_lens[tid]; j = query_batch_lens[tid]; } uint32_t prev_op_to_fill = 0; int read_len_8 = query_batch_lens[tid]%8 ? query_batch_lens[tid] + (8 - (query_batch_lens[tid]%8)) : query_batch_lens[tid]; int n_ops = 0; int prev_tile_no = -1; uint4 tile = make_uint4(0, 0, 0, 0); int op_select = 3; int op_shift = 0; int count = 0; uint32_t op_to_fill; while(i >= 0 && j >= 0) { const int cell = (((i >> 3) * read_len_8) << 3) + (j << 3) + (i&7); int tile_no = cell>>5; tile = tile_no != prev_tile_no ? packed_tb_matrices[(tile_no*n_tasks) + tid] : tile; prev_tile_no = tile_no; int cell_no_in_tile = cell - (tile_no<<5); int reg_no_in_tile = cell_no_in_tile >> 3; int cell_no_in_reg = cell_no_in_tile - (reg_no_in_tile << 3); uint32_t reg = reg_no_in_tile == 0 ? tile.x : (reg_no_in_tile == 1 ? tile.y : (reg_no_in_tile == 2 ? tile.z : tile.w)); uint32_t cell_op = (reg >> (28 - (cell_no_in_reg << 2))) & 15; uint32_t op = (cell_op >> op_shift) & op_select; op_to_fill = op == 0 || op_select == 3 ? op : op_shift ; op_select = op == 0 || (op == 1 && op_select == 3) ? 3 : 1; op_shift = op == 0 || ( op == 1 && op_select == 3) ? 0 : ((op == 2 || op == 3) ? op : op_shift); if(count < 63 && op_to_fill == prev_op_to_fill) { count++; } else { if (count > 0) { uint8_t reg_out = 0; reg_out |= prev_op_to_fill; reg_out |= (uint8_t)(count << 2); cigar[offset++] = reg_out; n_ops++; } count = 1; } if (T==algo_type::LOCAL) { curr_score += ((op_to_fill == 2 || op_to_fill == 3) && prev_op_to_fill != op_to_fill) ? -_cudaGapOE : ((op_to_fill == 2 || op_to_fill == 3) ? - _cudaGapExtend : (op_to_fill == 1 ? -_cudaMismatchScore : _cudaMatchScore)); if (curr_score == total_score) break; } prev_op_to_fill = op_to_fill; i = op_to_fill == 0 || op_to_fill == 1 || op_to_fill == 2 ? i - 1 : i; j = op_to_fill == 0 || op_to_fill == 1 || op_to_fill == 3 ? j - 1 : j; } uint8_t reg_out = 0; reg_out |= prev_op_to_fill; reg_out |= (uint8_t)(count << 2); cigar[offset++] = reg_out; n_ops++; if (T==algo_type::GLOBAL) { while (i >= 0) { uint32_t reg_out = 0; uint8_t resd_count = (i+1) <= 63 ? (i+1) : 63; reg_out |= 2; reg_out |= (uint8_t)(resd_count << 2); cigar[offset++] = reg_out; n_ops++; i = i - 63; } while (j >= 0) { uint32_t reg_out = 0; uint8_t resd_count = (j+1) <= 63 ? (j+1) : 63; reg_out |= 3; reg_out |= (uint8_t)(resd_count << 2); cigar[offset++] = reg_out; n_ops++; j = j - 63; } } if (T==algo_type::LOCAL) { device_res->target_batch_start[tid] = i; device_res->query_batch_start[tid] = j; } query_batch_lens[tid] = n_ops; }
66860a333f149731c02c4d056eff7197e8232182.cu
#pragma once #include <gasal2/gasal.h> template <algo_type T> __global__ void gasal_get_tb( uint8_t *cigar, uint32_t *query_batch_lens, uint32_t *target_batch_lens, uint32_t *cigar_offset, uint4 *packed_tb_matrices, gasal_res_t *device_res, int n_tasks ){ int total_score __attribute__((unused)); int curr_score __attribute__((unused)); const uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_tasks) return; int offset = cigar_offset[tid]; int i, j; if (T==algo_type::LOCAL) { i = device_res->target_batch_end[tid]; j = device_res->query_batch_end[tid]; total_score = device_res->aln_score[tid]; curr_score = 0; } else if (T==algo_type::GLOBAL) { i = target_batch_lens[tid]; j = query_batch_lens[tid]; } uint32_t prev_op_to_fill = 0; int read_len_8 = query_batch_lens[tid]%8 ? query_batch_lens[tid] + (8 - (query_batch_lens[tid]%8)) : query_batch_lens[tid]; int n_ops = 0; int prev_tile_no = -1; uint4 tile = make_uint4(0, 0, 0, 0); int op_select = 3; int op_shift = 0; int count = 0; uint32_t op_to_fill; while(i >= 0 && j >= 0) { const int cell = (((i >> 3) * read_len_8) << 3) + (j << 3) + (i&7); int tile_no = cell>>5; tile = tile_no != prev_tile_no ? packed_tb_matrices[(tile_no*n_tasks) + tid] : tile; prev_tile_no = tile_no; int cell_no_in_tile = cell - (tile_no<<5); int reg_no_in_tile = cell_no_in_tile >> 3; int cell_no_in_reg = cell_no_in_tile - (reg_no_in_tile << 3); uint32_t reg = reg_no_in_tile == 0 ? tile.x : (reg_no_in_tile == 1 ? tile.y : (reg_no_in_tile == 2 ? tile.z : tile.w)); uint32_t cell_op = (reg >> (28 - (cell_no_in_reg << 2))) & 15; uint32_t op = (cell_op >> op_shift) & op_select; op_to_fill = op == 0 || op_select == 3 ? op : op_shift ; op_select = op == 0 || (op == 1 && op_select == 3) ? 3 : 1; op_shift = op == 0 || ( op == 1 && op_select == 3) ? 0 : ((op == 2 || op == 3) ? op : op_shift); if(count < 63 && op_to_fill == prev_op_to_fill) { count++; } else { if (count > 0) { uint8_t reg_out = 0; reg_out |= prev_op_to_fill; reg_out |= (uint8_t)(count << 2); cigar[offset++] = reg_out; n_ops++; } count = 1; } if (T==algo_type::LOCAL) { curr_score += ((op_to_fill == 2 || op_to_fill == 3) && prev_op_to_fill != op_to_fill) ? -_cudaGapOE : ((op_to_fill == 2 || op_to_fill == 3) ? - _cudaGapExtend : (op_to_fill == 1 ? -_cudaMismatchScore : _cudaMatchScore)); if (curr_score == total_score) break; } prev_op_to_fill = op_to_fill; i = op_to_fill == 0 || op_to_fill == 1 || op_to_fill == 2 ? i - 1 : i; j = op_to_fill == 0 || op_to_fill == 1 || op_to_fill == 3 ? j - 1 : j; } uint8_t reg_out = 0; reg_out |= prev_op_to_fill; reg_out |= (uint8_t)(count << 2); cigar[offset++] = reg_out; n_ops++; if (T==algo_type::GLOBAL) { while (i >= 0) { uint32_t reg_out = 0; uint8_t resd_count = (i+1) <= 63 ? (i+1) : 63; reg_out |= 2; reg_out |= (uint8_t)(resd_count << 2); cigar[offset++] = reg_out; n_ops++; i = i - 63; } while (j >= 0) { uint32_t reg_out = 0; uint8_t resd_count = (j+1) <= 63 ? (j+1) : 63; reg_out |= 3; reg_out |= (uint8_t)(resd_count << 2); cigar[offset++] = reg_out; n_ops++; j = j - 63; } } if (T==algo_type::LOCAL) { device_res->target_batch_start[tid] = i; device_res->query_batch_start[tid] = j; } query_batch_lens[tid] = n_ops; }
a7717fc4fbf7e1214164be84044ce0f7ec00fa95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { const int NB_THREAD = Indice2D::nbThread(); const int TID = Indice2D::tid(); // Debug, facultatif //if (TID == 0) //{ //printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel //} // pattern 1-1 if (TID < n) // facultatif mais plus sr ptrDevW[TID] = ptrDevV1[TID] + ptrDevV2[TID]; // pattern entrelacement // int s = TID; // while (s < n) // { // ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s]; // s += NB_THREAD; // } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
a7717fc4fbf7e1214164be84044ce0f7ec00fa95.cu
#include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { const int NB_THREAD = Indice2D::nbThread(); const int TID = Indice2D::tid(); // Debug, facultatif //if (TID == 0) //{ //printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel //} // pattern 1-1 if (TID < n) // facultatif mais plus sûr ptrDevW[TID] = ptrDevV1[TID] + ptrDevV2[TID]; // pattern entrelacement // int s = TID; // while (s < n) // { // ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s]; // s += NB_THREAD; // } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
e73c69b499e9038fc3d7ec658c14e00b18ddadbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // final comparison: c++, parallel mv multiplication, sparse mv multiplication with fft // CentOS compiling: // export LD_LIBRARY_PATH="/usr/local/cuda-11.0/targets/x86_64-linux/lib/" // nvcc -std=c++11 resultgen.cu -o resultgen -I/usr/local/cuda/targets/x86_64-linux/include/ -L/usr/local/cuda/targets/x86_64-linux/lib/ -lcufft // ./resultgen // Windows compiling: // nvcc resultgen.cu -o resultgen -I/usr/local/cuda/targets/x86_64-linux/include/ -L/usr/local/cuda/targets/x86_64-linux/lib/ -lcufft // resultgen.exe #include <iostream> #include <thread> #include <chrono> #include <hipfft.h> #include "make_print.h" #include "hip/hip_complex.h" #define M 500 // number of Matrices we want to multiply #define V 10 // number of Vectors we want to multiply #define dimU 16 // number of senders = rows of Matrix h_m_W #define dimB 128 // number of receivers = columns of Matrix h_m_W #define K 12 // number of zero values per row #define MATRIX_SIZE dimU*dimB #define MATRIXW_BYTES MATRIX_SIZE*sizeof(hipDoubleComplex) #define VECTORY_BYTES dimB*sizeof(hipDoubleComplex) #define VECTORR_BYTES dimU*sizeof(hipDoubleComplex) dim3 getDimGrid(const int m, const int n) { dim3 dimGrid(m, n, 1); return dimGrid; } dim3 getDimBlock(const int m, const int n) { dim3 dimBlock(m, n, 1); return dimBlock; } /********************************** normal MV *********************************/ // matrix/vector multiplication, one thread per row // hipDoubleComplex is complex number for cuda (cuFloatComplex does NOT work/exist) __global__ void matrixVectorMultiplication(hipDoubleComplex *d_m_W, hipDoubleComplex *d_v_y, hipDoubleComplex *d_v_r) { // each thread represents one row in one of the Matrices int threadId = ((gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * 1) + threadIdx.x; hipDoubleComplex sum = make_cuDoubleComplex(0, 0); // each thread does one row of multiplications for(int i = 0; i < dimB; i++) { sum = cuCadd(sum, cuCmul(d_m_W[(threadId%(M*dimU))* dimB + i], d_v_y[ i + (threadId / (M * dimU)) * dimB])); } d_v_r[threadId] = sum; } /********************************** normal MV *********************************/ /************************************ SPMV ************************************/ // matrix/vector multiplication, one thread per row // hipDoubleComplex is complex number for cuda (cuFloatComplex does NOT work/exist) __global__ void sparseMatrixVectorMultiplication(hipDoubleComplex* d_a, int* d_ia, int* d_ja, hipDoubleComplex *d_v_y, hipDoubleComplex* d_a_r) { // each thread represents one row in one of the Matrices int threadId = ((gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * 1) + threadIdx.x; hipDoubleComplex sum = make_cuDoubleComplex(0, 0); // each thread does one row of non zero multiplications for (int i = d_ia[(threadId%(M*dimU))]; i < d_ia[(threadId%(M*dimU)) +1]; i++) { sum = cuCadd(sum, cuCmul(d_a[i], d_v_y[d_ja[i]+(threadId / (M * dimU)) * dimB])); } d_a_r[threadId] = sum; } /************************************ SPMV ************************************/ int main() { for (int zeros = 0; zeros < 16; zeros++) { for (int iteration = 0; iteration < 3; iteration++) { // declare host matrices and vectors hipDoubleComplex* h_m_W, * h_v_y, * h_v_r, * h_a_r, * h_a_rfft, * h_a; int* h_ia, * h_ja; int total_nnz = 0; // allocate Memory h_m_W = (hipDoubleComplex*)malloc(MATRIXW_BYTES * M); h_v_y = (hipDoubleComplex*)malloc(VECTORY_BYTES * V); h_v_r = (hipDoubleComplex*)malloc(VECTORR_BYTES * M * V); h_a_r = (hipDoubleComplex*)malloc(VECTORR_BYTES * M * V); h_a_rfft = (hipDoubleComplex*)malloc(VECTORR_BYTES * M * V); h_a = (hipDoubleComplex*)malloc(MATRIXW_BYTES * M); h_ia = (int*)malloc((dimU * M + 1) * sizeof(int)); h_ja = (int*)malloc(M * dimU * dimB * sizeof(int)); // declare GPU memory pointers hipDoubleComplex* d_m_W, * d_v_y, * d_v_r, * d_a_r, * d_a_rfft, * d_a; int* d_ia, * d_ja; // filling matrices with rendom amount of zeros per row //fillMatrix(h_m_W, MATRIX_SIZE * M); //printMatrices(h_m_W, MATRIX_SIZE, dimB, M); // fill matrices with k zeros per row fillStructuredMatrix(h_m_W, dimU * M, dimB, zeros); //printMatrices(h_m_W, MATRIX_SIZE, dimB, M); fillVector(h_v_y, dimB * V); //printVectors(h_v_y, dimB, V); // make matrices CSR makeCSR(h_m_W, dimU, dimB, h_a, h_ia, h_ja, &total_nnz, M); //printCSR(h_a, h_ia, h_ja, dimU, &total_nnz, M); // allocate GPU memory pointers hipMalloc((void**)&d_m_W, MATRIXW_BYTES * M); hipMalloc((void**)&d_v_y, VECTORY_BYTES * V); hipMalloc((void**)&d_v_r, VECTORR_BYTES * M * V); hipMalloc((void**)&d_a_r, VECTORR_BYTES * M * V); hipMalloc((void**)&d_a_rfft, VECTORR_BYTES * M * V); hipMalloc((void**)&d_a, total_nnz * sizeof(hipDoubleComplex)); hipMalloc((void**)&d_ia, (dimU * M + 1) * sizeof(int)); hipMalloc((void**)&d_ja, total_nnz * sizeof(int)); // calculate the necessary space (same fo MV and SPMV) dim3 dimGrid = getDimGrid(M, V); dim3 dimBlock = getDimBlock(dimU, 1); /********************************** normal MV *********************************/ // transfer the array to the GPU hipMemcpy(d_m_W, h_m_W, M * MATRIXW_BYTES, hipMemcpyHostToDevice); hipMemcpy(d_v_y, h_v_y, V * VECTORY_BYTES, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start_mv = std::chrono::high_resolution_clock::now(); // launch the kernel matrixVectorMultiplication << <dimGrid, dimBlock >> > (d_m_W, d_v_y, d_v_r); auto finish_mv = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_mv = finish_mv - start_mv; // copy back the result array to the CPU hipMemcpy(h_v_r, d_v_r, M * V * VECTORR_BYTES, hipMemcpyDeviceToHost); // free GPU memory allocation hipFree(d_m_W); hipFree(d_v_y); hipFree(d_v_r); /********************************** normal MV *********************************/ hipDeviceSynchronize(); std::this_thread::sleep_for(std::chrono::seconds(5)); /************************************ SPMV ************************************/ //transfer the array to the GPU hipMemcpy(d_v_y, h_v_y, V * VECTORY_BYTES, hipMemcpyHostToDevice); hipMemcpy(d_a, h_a, total_nnz * sizeof(hipDoubleComplex), hipMemcpyHostToDevice); hipMemcpy(d_ia, h_ia, (dimU * M + 1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_ja, h_ja, total_nnz * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_a_r, h_a_r, VECTORR_BYTES * M * V, hipMemcpyHostToDevice); // create plan for FFT hipfftHandle plan; hipfftPlan1d(&plan, dimU, HIPFFT_Z2Z, M * V); hipDeviceSynchronize(); auto start_spmv = std::chrono::high_resolution_clock::now(); // launch the kernel sparseMatrixVectorMultiplication << <dimGrid, dimBlock >> > (d_a, d_ia, d_ja, d_v_y, d_a_r); auto finish_spmv = std::chrono::high_resolution_clock::now(); // FFT hipfftExecZ2Z(plan, d_a_r, d_a_rfft, HIPFFT_FORWARD); auto finish_spmvfft = std::chrono::high_resolution_clock::now(); hipfftDestroy(plan); // copy back the result array to the CPU hipMemcpy(h_a_r, d_a_r, M * V * VECTORR_BYTES, hipMemcpyDeviceToHost); hipMemcpy(h_a_rfft, d_a_rfft, VECTORR_BYTES * M * V, hipMemcpyDeviceToHost); //printVectors(h_a_r, dimU, M*V); //printVectors(h_a_rfft, dimU, M*V); std::chrono::duration<double> elapsed_spmv = finish_spmv - start_spmv; std::chrono::duration<double> elapsed_spmvfft = finish_spmvfft - start_spmv; // free GPU memory allocation hipFree(d_a_rfft); hipFree(d_a_r); hipFree(d_a); hipFree(d_ia); hipFree(d_ja); /************************************ SPMV ************************************/ hipDeviceSynchronize(); std::this_thread::sleep_for(std::chrono::seconds(5)); /********************************** c++ part ********************************** hipDoubleComplex* cppResult; cppResult = (hipDoubleComplex*)malloc(M * V * VECTORR_BYTES); auto start_cpp = std::chrono::high_resolution_clock::now(); // loops through the result Array, same memory access as cuda implementation for (int i = 0; i < dimU * M * V; i++) { cppResult[i] = make_cuDoubleComplex(0, 0); for (int j = 0; j < dimB; j++) { cppResult[i] = cuCadd(cppResult[i], cuCmul(h_m_W[((i % (dimU * M)) * dimB) + j], h_v_y[((i / (dimU * M))) * dimB + j])); } } auto finish_cpp = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_cpp = finish_cpp - start_cpp; ********************************** c++ part **********************************/ std::cout << "\n\nRESULTS:\n\n"; //printVectors(a, dimU, V*M); //printVectors(cppResult, dimU, V * M); // Check if MV == SPMV result int errorCount = 0; for (int i = 0; i < dimU * M * V; i++) { if (cuCreal(h_v_r[i]) == cuCreal(h_a_r[i]) && cuCimag(h_v_r[i]) == cuCimag(h_a_r[i])) { continue; } else { errorCount++; } } if (errorCount == 0) { std::cout << "SPMV results equal MV result \n"; } else { std::cout << "SPMV result not equal to MV, number of Errors: " << errorCount << " \n"; } std::cout << "\nComputation time:\nMV: " << elapsed_mv.count() << " s\nSPMV: " << elapsed_spmv.count() << " s\nSPMV+FFT: " << elapsed_spmvfft.count() << " s\n"; //std::cout <<"c++: " << elapsed_cpp.count() << " s\n"; std::cout << "rows: " << dimU << ", cols: " << dimB << ", matrices: " << M << ", vectors: " << V << ", zeros: " << zeros << "\n"; std::this_thread::sleep_for(std::chrono::seconds(5)); } } return 0; }
e73c69b499e9038fc3d7ec658c14e00b18ddadbf.cu
// final comparison: c++, parallel mv multiplication, sparse mv multiplication with fft // CentOS compiling: // export LD_LIBRARY_PATH="/usr/local/cuda-11.0/targets/x86_64-linux/lib/" // nvcc -std=c++11 resultgen.cu -o resultgen -I/usr/local/cuda/targets/x86_64-linux/include/ -L/usr/local/cuda/targets/x86_64-linux/lib/ -lcufft // ./resultgen // Windows compiling: // nvcc resultgen.cu -o resultgen -I/usr/local/cuda/targets/x86_64-linux/include/ -L/usr/local/cuda/targets/x86_64-linux/lib/ -lcufft // resultgen.exe #include <iostream> #include <thread> #include <chrono> #include <cufft.h> #include "make_print.h" #include "cuComplex.h" #define M 500 // number of Matrices we want to multiply #define V 10 // number of Vectors we want to multiply #define dimU 16 // number of senders = rows of Matrix h_m_W #define dimB 128 // number of receivers = columns of Matrix h_m_W #define K 12 // number of zero values per row #define MATRIX_SIZE dimU*dimB #define MATRIXW_BYTES MATRIX_SIZE*sizeof(cuDoubleComplex) #define VECTORY_BYTES dimB*sizeof(cuDoubleComplex) #define VECTORR_BYTES dimU*sizeof(cuDoubleComplex) dim3 getDimGrid(const int m, const int n) { dim3 dimGrid(m, n, 1); return dimGrid; } dim3 getDimBlock(const int m, const int n) { dim3 dimBlock(m, n, 1); return dimBlock; } /********************************** normal MV *********************************/ // matrix/vector multiplication, one thread per row // cuDoubleComplex is complex number for cuda (cuFloatComplex does NOT work/exist) __global__ void matrixVectorMultiplication(cuDoubleComplex *d_m_W, cuDoubleComplex *d_v_y, cuDoubleComplex *d_v_r) { // each thread represents one row in one of the Matrices int threadId = ((gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * 1) + threadIdx.x; cuDoubleComplex sum = make_cuDoubleComplex(0, 0); // each thread does one row of multiplications for(int i = 0; i < dimB; i++) { sum = cuCadd(sum, cuCmul(d_m_W[(threadId%(M*dimU))* dimB + i], d_v_y[ i + (threadId / (M * dimU)) * dimB])); } d_v_r[threadId] = sum; } /********************************** normal MV *********************************/ /************************************ SPMV ************************************/ // matrix/vector multiplication, one thread per row // cuDoubleComplex is complex number for cuda (cuFloatComplex does NOT work/exist) __global__ void sparseMatrixVectorMultiplication(cuDoubleComplex* d_a, int* d_ia, int* d_ja, cuDoubleComplex *d_v_y, cuDoubleComplex* d_a_r) { // each thread represents one row in one of the Matrices int threadId = ((gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x * 1) + threadIdx.x; cuDoubleComplex sum = make_cuDoubleComplex(0, 0); // each thread does one row of non zero multiplications for (int i = d_ia[(threadId%(M*dimU))]; i < d_ia[(threadId%(M*dimU)) +1]; i++) { sum = cuCadd(sum, cuCmul(d_a[i], d_v_y[d_ja[i]+(threadId / (M * dimU)) * dimB])); } d_a_r[threadId] = sum; } /************************************ SPMV ************************************/ int main() { for (int zeros = 0; zeros < 16; zeros++) { for (int iteration = 0; iteration < 3; iteration++) { // declare host matrices and vectors cuDoubleComplex* h_m_W, * h_v_y, * h_v_r, * h_a_r, * h_a_rfft, * h_a; int* h_ia, * h_ja; int total_nnz = 0; // allocate Memory h_m_W = (cuDoubleComplex*)malloc(MATRIXW_BYTES * M); h_v_y = (cuDoubleComplex*)malloc(VECTORY_BYTES * V); h_v_r = (cuDoubleComplex*)malloc(VECTORR_BYTES * M * V); h_a_r = (cuDoubleComplex*)malloc(VECTORR_BYTES * M * V); h_a_rfft = (cuDoubleComplex*)malloc(VECTORR_BYTES * M * V); h_a = (cuDoubleComplex*)malloc(MATRIXW_BYTES * M); h_ia = (int*)malloc((dimU * M + 1) * sizeof(int)); h_ja = (int*)malloc(M * dimU * dimB * sizeof(int)); // declare GPU memory pointers cuDoubleComplex* d_m_W, * d_v_y, * d_v_r, * d_a_r, * d_a_rfft, * d_a; int* d_ia, * d_ja; // filling matrices with rendom amount of zeros per row //fillMatrix(h_m_W, MATRIX_SIZE * M); //printMatrices(h_m_W, MATRIX_SIZE, dimB, M); // fill matrices with k zeros per row fillStructuredMatrix(h_m_W, dimU * M, dimB, zeros); //printMatrices(h_m_W, MATRIX_SIZE, dimB, M); fillVector(h_v_y, dimB * V); //printVectors(h_v_y, dimB, V); // make matrices CSR makeCSR(h_m_W, dimU, dimB, h_a, h_ia, h_ja, &total_nnz, M); //printCSR(h_a, h_ia, h_ja, dimU, &total_nnz, M); // allocate GPU memory pointers cudaMalloc((void**)&d_m_W, MATRIXW_BYTES * M); cudaMalloc((void**)&d_v_y, VECTORY_BYTES * V); cudaMalloc((void**)&d_v_r, VECTORR_BYTES * M * V); cudaMalloc((void**)&d_a_r, VECTORR_BYTES * M * V); cudaMalloc((void**)&d_a_rfft, VECTORR_BYTES * M * V); cudaMalloc((void**)&d_a, total_nnz * sizeof(cuDoubleComplex)); cudaMalloc((void**)&d_ia, (dimU * M + 1) * sizeof(int)); cudaMalloc((void**)&d_ja, total_nnz * sizeof(int)); // calculate the necessary space (same fo MV and SPMV) dim3 dimGrid = getDimGrid(M, V); dim3 dimBlock = getDimBlock(dimU, 1); /********************************** normal MV *********************************/ // transfer the array to the GPU cudaMemcpy(d_m_W, h_m_W, M * MATRIXW_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_v_y, h_v_y, V * VECTORY_BYTES, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto start_mv = std::chrono::high_resolution_clock::now(); // launch the kernel matrixVectorMultiplication << <dimGrid, dimBlock >> > (d_m_W, d_v_y, d_v_r); auto finish_mv = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_mv = finish_mv - start_mv; // copy back the result array to the CPU cudaMemcpy(h_v_r, d_v_r, M * V * VECTORR_BYTES, cudaMemcpyDeviceToHost); // free GPU memory allocation cudaFree(d_m_W); cudaFree(d_v_y); cudaFree(d_v_r); /********************************** normal MV *********************************/ cudaDeviceSynchronize(); std::this_thread::sleep_for(std::chrono::seconds(5)); /************************************ SPMV ************************************/ //transfer the array to the GPU cudaMemcpy(d_v_y, h_v_y, V * VECTORY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_a, h_a, total_nnz * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); cudaMemcpy(d_ia, h_ia, (dimU * M + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ja, h_ja, total_nnz * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_a_r, h_a_r, VECTORR_BYTES * M * V, cudaMemcpyHostToDevice); // create plan for FFT cufftHandle plan; cufftPlan1d(&plan, dimU, CUFFT_Z2Z, M * V); cudaDeviceSynchronize(); auto start_spmv = std::chrono::high_resolution_clock::now(); // launch the kernel sparseMatrixVectorMultiplication << <dimGrid, dimBlock >> > (d_a, d_ia, d_ja, d_v_y, d_a_r); auto finish_spmv = std::chrono::high_resolution_clock::now(); // FFT cufftExecZ2Z(plan, d_a_r, d_a_rfft, CUFFT_FORWARD); auto finish_spmvfft = std::chrono::high_resolution_clock::now(); cufftDestroy(plan); // copy back the result array to the CPU cudaMemcpy(h_a_r, d_a_r, M * V * VECTORR_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(h_a_rfft, d_a_rfft, VECTORR_BYTES * M * V, cudaMemcpyDeviceToHost); //printVectors(h_a_r, dimU, M*V); //printVectors(h_a_rfft, dimU, M*V); std::chrono::duration<double> elapsed_spmv = finish_spmv - start_spmv; std::chrono::duration<double> elapsed_spmvfft = finish_spmvfft - start_spmv; // free GPU memory allocation cudaFree(d_a_rfft); cudaFree(d_a_r); cudaFree(d_a); cudaFree(d_ia); cudaFree(d_ja); /************************************ SPMV ************************************/ cudaDeviceSynchronize(); std::this_thread::sleep_for(std::chrono::seconds(5)); /********************************** c++ part ********************************** cuDoubleComplex* cppResult; cppResult = (cuDoubleComplex*)malloc(M * V * VECTORR_BYTES); auto start_cpp = std::chrono::high_resolution_clock::now(); // loops through the result Array, same memory access as cuda implementation for (int i = 0; i < dimU * M * V; i++) { cppResult[i] = make_cuDoubleComplex(0, 0); for (int j = 0; j < dimB; j++) { cppResult[i] = cuCadd(cppResult[i], cuCmul(h_m_W[((i % (dimU * M)) * dimB) + j], h_v_y[((i / (dimU * M))) * dimB + j])); } } auto finish_cpp = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_cpp = finish_cpp - start_cpp; ********************************** c++ part **********************************/ std::cout << "\n\nRESULTS:\n\n"; //printVectors(a, dimU, V*M); //printVectors(cppResult, dimU, V * M); // Check if MV == SPMV result int errorCount = 0; for (int i = 0; i < dimU * M * V; i++) { if (cuCreal(h_v_r[i]) == cuCreal(h_a_r[i]) && cuCimag(h_v_r[i]) == cuCimag(h_a_r[i])) { continue; } else { errorCount++; } } if (errorCount == 0) { std::cout << "SPMV results equal MV result \n"; } else { std::cout << "SPMV result not equal to MV, number of Errors: " << errorCount << " \n"; } std::cout << "\nComputation time:\nMV: " << elapsed_mv.count() << " s\nSPMV: " << elapsed_spmv.count() << " s\nSPMV+FFT: " << elapsed_spmvfft.count() << " s\n"; //std::cout <<"c++: " << elapsed_cpp.count() << " s\n"; std::cout << "rows: " << dimU << ", cols: " << dimB << ", matrices: " << M << ", vectors: " << V << ", zeros: " << zeros << "\n"; std::this_thread::sleep_for(std::chrono::seconds(5)); } } return 0; }
8c082e5b82bddffb00e5f118d14d0e90d7a55e0f.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/axes.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/cuda/numeric.cuh" #include "chainerx/cuda/numeric_limits.cuh" #include "chainerx/cuda/reduce.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/kernels/math.h" #include "chainerx/kernels/sorting.h" #include "chainerx/macro.h" #include "chainerx/numeric_limits.h" #include "chainerx/reduction_kernel_arg.h" #include "chainerx/routines/math.h" #include "chainerx/shape.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct ArgMaxImpl { using CudaType = cuda_internal::DataType<T>; struct MaxAndArgMax { CudaType max; int64_t argmax; }; __device__ MaxAndArgMax Identity() { return {CudaType{}, -1}; } __device__ MaxAndArgMax MapIn(CudaType in, int64_t index) { return {in, index}; } __device__ void Reduce(MaxAndArgMax next, MaxAndArgMax& accum) { // Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated. if (next.argmax != -1 && (accum.argmax == -1 || accum.max < next.max)) { accum = next; } } __device__ int64_t MapOut(MaxAndArgMax accum) { return accum.argmax; } }; class CudaArgMaxKernel : public ArgMaxKernel { public: void Call(const Array& a, const Axes& axis, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(a.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Reduce<T, int64_t>(a, axis, out, ArgMaxImpl<T>{}); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(ArgMaxKernel, CudaArgMaxKernel); } // namespace namespace { template <typename T> struct ArgMinImpl { using CudaType = cuda_internal::DataType<T>; struct MinAndArgMin { CudaType min; int64_t argmin; }; __device__ MinAndArgMin Identity() { return {CudaType{}, -1}; } __device__ MinAndArgMin MapIn(CudaType in, int64_t index) { return {in, index}; } __device__ void Reduce(MinAndArgMin next, MinAndArgMin& accum) { // Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated. if (next.argmin != -1 && (accum.argmin == -1 || accum.min > next.min)) { accum = next; } } __device__ int64_t MapOut(MinAndArgMin accum) { return accum.argmin; } }; class CudaArgMinKernel : public ArgMinKernel { public: void Call(const Array& a, const Axes& axis, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(a.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Reduce<T, int64_t>(a, axis, out, ArgMinImpl<T>{}); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(ArgMinKernel, CudaArgMinKernel); } // namespace namespace { template <typename In, typename Out> struct SumImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ OutCudaType Identity() { return OutCudaType{0}; } __device__ OutCudaType MapIn(InCudaType in, int64_t /*index*/) { return static_cast<OutCudaType>(in); } __device__ void Reduce(OutCudaType next, OutCudaType& accum) { accum += next; } __device__ OutCudaType MapOut(OutCudaType accum) { return accum; } }; class CudaSumKernel : public SumKernel { public: void Call(const Array& a, const Axes& axis, const Array& out) override { Device& device = a.device(); CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true)); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; auto do_sum = [&a, &axis, &out](auto in_pt, auto out_pt) { using In = typename decltype(in_pt)::type; using Out = typename decltype(out_pt)::type; Reduce<In, Out>(a, axis, out, SumImpl<In, Out>{}); }; VisitDtype(out.dtype(), [a_dtype = a.dtype(), &do_sum](auto out_pt) { VisitDtype(a_dtype, do_sum, out_pt); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(SumKernel, CudaSumKernel); } // namespace } // namespace cuda } // namespace chainerx
8c082e5b82bddffb00e5f118d14d0e90d7a55e0f.cu
#include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/axes.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/cuda/numeric.cuh" #include "chainerx/cuda/numeric_limits.cuh" #include "chainerx/cuda/reduce.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/kernels/math.h" #include "chainerx/kernels/sorting.h" #include "chainerx/macro.h" #include "chainerx/numeric_limits.h" #include "chainerx/reduction_kernel_arg.h" #include "chainerx/routines/math.h" #include "chainerx/shape.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct ArgMaxImpl { using CudaType = cuda_internal::DataType<T>; struct MaxAndArgMax { CudaType max; int64_t argmax; }; __device__ MaxAndArgMax Identity() { return {CudaType{}, -1}; } __device__ MaxAndArgMax MapIn(CudaType in, int64_t index) { return {in, index}; } __device__ void Reduce(MaxAndArgMax next, MaxAndArgMax& accum) { // Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated. if (next.argmax != -1 && (accum.argmax == -1 || accum.max < next.max)) { accum = next; } } __device__ int64_t MapOut(MaxAndArgMax accum) { return accum.argmax; } }; class CudaArgMaxKernel : public ArgMaxKernel { public: void Call(const Array& a, const Axes& axis, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(a.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Reduce<T, int64_t>(a, axis, out, ArgMaxImpl<T>{}); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(ArgMaxKernel, CudaArgMaxKernel); } // namespace namespace { template <typename T> struct ArgMinImpl { using CudaType = cuda_internal::DataType<T>; struct MinAndArgMin { CudaType min; int64_t argmin; }; __device__ MinAndArgMin Identity() { return {CudaType{}, -1}; } __device__ MinAndArgMin MapIn(CudaType in, int64_t index) { return {in, index}; } __device__ void Reduce(MinAndArgMin next, MinAndArgMin& accum) { // Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated. if (next.argmin != -1 && (accum.argmin == -1 || accum.min > next.min)) { accum = next; } } __device__ int64_t MapOut(MinAndArgMin accum) { return accum.argmin; } }; class CudaArgMinKernel : public ArgMinKernel { public: void Call(const Array& a, const Axes& axis, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(a.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Reduce<T, int64_t>(a, axis, out, ArgMinImpl<T>{}); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(ArgMinKernel, CudaArgMinKernel); } // namespace namespace { template <typename In, typename Out> struct SumImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ OutCudaType Identity() { return OutCudaType{0}; } __device__ OutCudaType MapIn(InCudaType in, int64_t /*index*/) { return static_cast<OutCudaType>(in); } __device__ void Reduce(OutCudaType next, OutCudaType& accum) { accum += next; } __device__ OutCudaType MapOut(OutCudaType accum) { return accum; } }; class CudaSumKernel : public SumKernel { public: void Call(const Array& a, const Axes& axis, const Array& out) override { Device& device = a.device(); CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true)); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; auto do_sum = [&a, &axis, &out](auto in_pt, auto out_pt) { using In = typename decltype(in_pt)::type; using Out = typename decltype(out_pt)::type; Reduce<In, Out>(a, axis, out, SumImpl<In, Out>{}); }; VisitDtype(out.dtype(), [a_dtype = a.dtype(), &do_sum](auto out_pt) { VisitDtype(a_dtype, do_sum, out_pt); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(SumKernel, CudaSumKernel); } // namespace } // namespace cuda } // namespace chainerx
3dda27e38d5cffe6511d6bf2d54a0973a37a925f.hip
// !!! This is a file automatically generated by hipify!!! /* * PAVLE - Parallel Variable-Length Encoder for CUDA. Main file. * * Copyright (C) 2009 Ana Balevic <ana.balevic@gmail.com> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it under the terms of the * MIT License. Read the full licence: http://www.opensource.org/licenses/mit-license.php * * If you find this program useful, please contact me and reference PAVLE home page in your work. * */ #include <hip/hip_runtime.h> #include "helper_cuda.h" #include "print_helpers.h" #include "comparison_helpers.h" #include "stats_logger.h" #include "load_data.h" #include <sys/time.h> #include "vlc_kernel_sm64huff.cu" #include "scan.hip" #include "pack_kernels.cu" #include "cpuencode.h" #include "../timing.h" void runVLCTest(char *file_name, uint num_block_threads, bool unified=false, uint num_blocks=1); extern "C" void cpu_vlc_encode(unsigned int* indata, unsigned int num_elements, unsigned int* outdata, unsigned int *outsize, unsigned int *codewords, unsigned int* codewordlens); int main(int argc, char* argv[]){ unsigned int num_block_threads = 256; if (argc > 2) { bool unified = (bool) (atoi(argv[1]) != 0); for (int i=2; i<argc; i++) { runVLCTest(argv[i], num_block_threads, unified); } } else if (argc == 2) { bool unified = (bool) (atoi(argv[1]) != 0); runVLCTest(NULL, num_block_threads, 1024, unified); } checkCudaErrors(hipDeviceReset()); return 0; } void runVLCTest(char *file_name, uint num_block_threads, bool unified, uint num_blocks) { float time_pre = 0; float time_post = 0; float time_serial = 0; float time_copy_in = 0; float time_copy_out = 0; float time_kernel = 0; float time_malloc = 0; float time_free = 0; printf("CUDA! Starting VLC Tests!\n"); unsigned int num_elements; //uint num_elements = num_blocks * num_block_threads; unsigned int mem_size; //uint mem_size = num_elements * sizeof(int); unsigned int symbol_type_size = sizeof(int); //////// LOAD DATA /////////////// double H; // entropy TIMESTAMP(t0); initParams(file_name, num_block_threads, num_blocks, num_elements, mem_size, symbol_type_size); printf("Parameters: num_elements: %d, num_blocks: %d, num_block_threads: %d\n----------------------------\n", num_elements, num_blocks, num_block_threads); TIMESTAMP(t1); time_pre += ELAPSED(t0, t1); ////////LOAD DATA /////////////// uint *sourceData; uint *destData; uint *crefData; crefData= (uint*) malloc(mem_size); uint *codewords; uint *codewordlens; if (unified) { checkCudaErrors(hipMallocManaged(&sourceData, mem_size)); checkCudaErrors(hipMallocManaged(&destData, mem_size)); checkCudaErrors(hipMallocManaged(&codewords, NUM_SYMBOLS * symbol_type_size)); checkCudaErrors(hipMallocManaged(&codewordlens, NUM_SYMBOLS * symbol_type_size)); } else { sourceData = (uint*) malloc(mem_size); destData = (uint*) malloc(mem_size); codewords = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); codewordlens = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); } uint *cw32 = (uint*) malloc(mem_size); uint *cw32len = (uint*) malloc(mem_size); uint *cw32idx = (uint*) malloc(mem_size); uint *cindex2= (uint*) malloc(num_blocks*sizeof(int)); TIMESTAMP(t2); time_malloc += ELAPSED(t1, t2); memset(sourceData, 0, mem_size); memset(destData, 0, mem_size); memset(crefData, 0, mem_size); memset(cw32, 0, mem_size); memset(cw32len, 0, mem_size); memset(cw32idx, 0, mem_size); memset(codewords, 0, NUM_SYMBOLS*symbol_type_size); memset(codewordlens, 0, NUM_SYMBOLS*symbol_type_size); memset(cindex2, 0, num_blocks*sizeof(int)); TIMESTAMP(t3); time_pre += ELAPSED(t2, t3); //////// LOAD DATA /////////////// loadData(file_name, sourceData, codewords, codewordlens, num_elements, mem_size, H); //////// LOAD DATA /////////////// TIMESTAMP(t3p); unsigned int *d_sourceData, *d_destData, *d_destDataPacked; unsigned int *d_codewords, *d_codewordlens; unsigned int *d_cw32, *d_cw32len, *d_cw32idx, *d_cindex, *d_cindex2; if (unified) { checkCudaErrors(hipMallocManaged((void**) &d_destDataPacked, mem_size)); } else { checkCudaErrors(hipMalloc((void**) &d_sourceData, mem_size)); checkCudaErrors(hipMalloc((void**) &d_destData, mem_size)); checkCudaErrors(hipMalloc((void**) &d_destDataPacked, mem_size)); checkCudaErrors(hipMalloc((void**) &d_codewords, NUM_SYMBOLS*symbol_type_size)); checkCudaErrors(hipMalloc((void**) &d_codewordlens, NUM_SYMBOLS*symbol_type_size)); } checkCudaErrors(hipMalloc((void**) &d_cw32, mem_size)); checkCudaErrors(hipMalloc((void**) &d_cw32len, mem_size)); checkCudaErrors(hipMalloc((void**) &d_cw32idx, mem_size)); checkCudaErrors(hipMalloc((void**)&d_cindex, num_blocks*sizeof(unsigned int))); checkCudaErrors(hipMalloc((void**)&d_cindex2, num_blocks*sizeof(unsigned int))); TIMESTAMP(t4); time_malloc += ELAPSED(t3, t4); if (unified) { d_sourceData = sourceData; d_codewords = codewords; d_codewordlens = codewordlens; d_destData = destData; } else { checkCudaErrors(hipMemcpy(d_sourceData, sourceData, mem_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_codewords, codewords, NUM_SYMBOLS*symbol_type_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_codewordlens, codewordlens, NUM_SYMBOLS*symbol_type_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_destData, destData, mem_size, hipMemcpyHostToDevice)); } TIMESTAMP(t5); time_copy_in += ELAPSED(t4, t5); dim3 grid_size(num_blocks,1,1); dim3 block_size(num_block_threads, 1, 1); unsigned int sm_size; unsigned int NT = 10; //number of runs for each execution time //////////////////* CPU ENCODER */////////////////////////////////// unsigned int refbytesize; cpu_vlc_encode((unsigned int*)sourceData, num_elements, (unsigned int*)crefData, &refbytesize, codewords, codewordlens); unsigned int num_ints = refbytesize/4 + ((refbytesize%4 ==0)?0:1); //////////////////* END CPU */////////////////////////////////// //////////////////* SM64HUFF KERNEL */////////////////////////////////// grid_size.x = num_blocks; block_size.x = num_block_threads; sm_size = block_size.x*sizeof(unsigned int); #ifdef CACHECWLUT sm_size = 2*NUM_SYMBOLS*sizeof(int) + block_size.x*sizeof(unsigned int); #endif hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); TIMESTAMP(t6); time_serial += ELAPSED(t5, t6); hipEventRecord( start, 0 ); for (int i=0; i<NT; i++) { hipLaunchKernelGGL(( vlc_encode_kernel_sm64huff), dim3(grid_size), dim3(block_size), sm_size, 0, d_sourceData, d_codewords, d_codewordlens, d_cw32, d_cw32len, d_cw32idx, d_destData, d_cindex); //testedOK2 } hipDeviceSynchronize(); hipEventRecord( stop, 0 ) ; hipEventSynchronize( stop ) ; TIMESTAMP(t7); time_kernel += ELAPSED(t6, t7); float elapsedTime; hipEventElapsedTime( &elapsedTime, start, stop ) ; printf("CUDA-reported GPU Encoding time (SM64HUFF): %f (ms)\n", elapsedTime/NT); //////////////////* END KERNEL */////////////////////////////////// unsigned int num_scan_elements = grid_size.x; preallocBlockSums(num_scan_elements); hipMemset(d_destDataPacked, 0, mem_size); printf("Num_blocks to be passed to scan is %d.\n", num_scan_elements); hipStream_t stream; checkCudaErrors(hipStreamCreate(&stream)); prescanArray(d_cindex2, d_cindex, num_scan_elements, stream); hipLaunchKernelGGL(( pack2), dim3(num_scan_elements/16), dim3(16), 0, stream, (unsigned int*)d_destData, d_cindex, d_cindex2, (unsigned int*)d_destDataPacked, num_elements/num_scan_elements); checkCudaErrors(hipStreamSynchronize(stream)); TIMESTAMP(t8); time_kernel += ELAPSED(t7, t8); deallocBlockSums(); TIMESTAMP(t9); time_free += ELAPSED(t8, t9); checkCudaErrors(hipMemcpy(destData, d_destDataPacked, mem_size, hipMemcpyDeviceToHost)); TIMESTAMP(t10); time_copy_out += ELAPSED(t9, t10); compare_vectors((unsigned int*)crefData, (unsigned int*)destData, num_ints); TIMESTAMP(t11); time_post += ELAPSED(t10, t11); if (unified) { checkCudaErrors(hipFree(sourceData)); checkCudaErrors(hipFree(destData)); checkCudaErrors(hipFree(codewords)); checkCudaErrors(hipFree(codewordlens)); } else { free(sourceData); free(destData); free(codewords); free(codewordlens); free(cw32); free(cw32len); free(crefData); checkCudaErrors(hipFree(d_sourceData)); checkCudaErrors(hipFree(d_destData)); checkCudaErrors(hipFree(d_destDataPacked)); checkCudaErrors(hipFree(d_codewords)); checkCudaErrors(hipFree(d_codewordlens)); } checkCudaErrors(hipFree(d_cw32)); checkCudaErrors(hipFree(d_cw32len)); checkCudaErrors(hipFree(d_cw32idx)); checkCudaErrors(hipFree(d_cindex)); checkCudaErrors(hipFree(d_cindex2)); free(cindex2); TIMESTAMP(t12); time_free += ELAPSED(t11, t12); printf("====Timing info====\n"); printf("time malloc = %f ms\n", time_malloc); printf("time pre = %f ms\n", time_pre); printf("time copyIn = %f ms\n", time_copy_in); printf("time kernel = %f ms\n", time_kernel); printf("time serial = %f ms\n", time_serial); printf("time copyOut = %f ms\n", time_copy_out); printf("time post = %f ms\n", time_post); printf("time free = %f ms\n", time_free); printf("time end-to-end = %f ms\n", ELAPSED(t0, t12)); exit(EXIT_SUCCESS); }
3dda27e38d5cffe6511d6bf2d54a0973a37a925f.cu
/* * PAVLE - Parallel Variable-Length Encoder for CUDA. Main file. * * Copyright (C) 2009 Ana Balevic <ana.balevic@gmail.com> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it under the terms of the * MIT License. Read the full licence: http://www.opensource.org/licenses/mit-license.php * * If you find this program useful, please contact me and reference PAVLE home page in your work. * */ #include <cuda_runtime.h> #include "helper_cuda.h" #include "print_helpers.h" #include "comparison_helpers.h" #include "stats_logger.h" #include "load_data.h" #include <sys/time.h> #include "vlc_kernel_sm64huff.cu" #include "scan.cu" #include "pack_kernels.cu" #include "cpuencode.h" #include "../timing.h" void runVLCTest(char *file_name, uint num_block_threads, bool unified=false, uint num_blocks=1); extern "C" void cpu_vlc_encode(unsigned int* indata, unsigned int num_elements, unsigned int* outdata, unsigned int *outsize, unsigned int *codewords, unsigned int* codewordlens); int main(int argc, char* argv[]){ unsigned int num_block_threads = 256; if (argc > 2) { bool unified = (bool) (atoi(argv[1]) != 0); for (int i=2; i<argc; i++) { runVLCTest(argv[i], num_block_threads, unified); } } else if (argc == 2) { bool unified = (bool) (atoi(argv[1]) != 0); runVLCTest(NULL, num_block_threads, 1024, unified); } checkCudaErrors(cudaThreadExit()); return 0; } void runVLCTest(char *file_name, uint num_block_threads, bool unified, uint num_blocks) { float time_pre = 0; float time_post = 0; float time_serial = 0; float time_copy_in = 0; float time_copy_out = 0; float time_kernel = 0; float time_malloc = 0; float time_free = 0; printf("CUDA! Starting VLC Tests!\n"); unsigned int num_elements; //uint num_elements = num_blocks * num_block_threads; unsigned int mem_size; //uint mem_size = num_elements * sizeof(int); unsigned int symbol_type_size = sizeof(int); //////// LOAD DATA /////////////// double H; // entropy TIMESTAMP(t0); initParams(file_name, num_block_threads, num_blocks, num_elements, mem_size, symbol_type_size); printf("Parameters: num_elements: %d, num_blocks: %d, num_block_threads: %d\n----------------------------\n", num_elements, num_blocks, num_block_threads); TIMESTAMP(t1); time_pre += ELAPSED(t0, t1); ////////LOAD DATA /////////////// uint *sourceData; uint *destData; uint *crefData; crefData= (uint*) malloc(mem_size); uint *codewords; uint *codewordlens; if (unified) { checkCudaErrors(cudaMallocManaged(&sourceData, mem_size)); checkCudaErrors(cudaMallocManaged(&destData, mem_size)); checkCudaErrors(cudaMallocManaged(&codewords, NUM_SYMBOLS * symbol_type_size)); checkCudaErrors(cudaMallocManaged(&codewordlens, NUM_SYMBOLS * symbol_type_size)); } else { sourceData = (uint*) malloc(mem_size); destData = (uint*) malloc(mem_size); codewords = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); codewordlens = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); } uint *cw32 = (uint*) malloc(mem_size); uint *cw32len = (uint*) malloc(mem_size); uint *cw32idx = (uint*) malloc(mem_size); uint *cindex2= (uint*) malloc(num_blocks*sizeof(int)); TIMESTAMP(t2); time_malloc += ELAPSED(t1, t2); memset(sourceData, 0, mem_size); memset(destData, 0, mem_size); memset(crefData, 0, mem_size); memset(cw32, 0, mem_size); memset(cw32len, 0, mem_size); memset(cw32idx, 0, mem_size); memset(codewords, 0, NUM_SYMBOLS*symbol_type_size); memset(codewordlens, 0, NUM_SYMBOLS*symbol_type_size); memset(cindex2, 0, num_blocks*sizeof(int)); TIMESTAMP(t3); time_pre += ELAPSED(t2, t3); //////// LOAD DATA /////////////// loadData(file_name, sourceData, codewords, codewordlens, num_elements, mem_size, H); //////// LOAD DATA /////////////// TIMESTAMP(t3p); unsigned int *d_sourceData, *d_destData, *d_destDataPacked; unsigned int *d_codewords, *d_codewordlens; unsigned int *d_cw32, *d_cw32len, *d_cw32idx, *d_cindex, *d_cindex2; if (unified) { checkCudaErrors(cudaMallocManaged((void**) &d_destDataPacked, mem_size)); } else { checkCudaErrors(cudaMalloc((void**) &d_sourceData, mem_size)); checkCudaErrors(cudaMalloc((void**) &d_destData, mem_size)); checkCudaErrors(cudaMalloc((void**) &d_destDataPacked, mem_size)); checkCudaErrors(cudaMalloc((void**) &d_codewords, NUM_SYMBOLS*symbol_type_size)); checkCudaErrors(cudaMalloc((void**) &d_codewordlens, NUM_SYMBOLS*symbol_type_size)); } checkCudaErrors(cudaMalloc((void**) &d_cw32, mem_size)); checkCudaErrors(cudaMalloc((void**) &d_cw32len, mem_size)); checkCudaErrors(cudaMalloc((void**) &d_cw32idx, mem_size)); checkCudaErrors(cudaMalloc((void**)&d_cindex, num_blocks*sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void**)&d_cindex2, num_blocks*sizeof(unsigned int))); TIMESTAMP(t4); time_malloc += ELAPSED(t3, t4); if (unified) { d_sourceData = sourceData; d_codewords = codewords; d_codewordlens = codewordlens; d_destData = destData; } else { checkCudaErrors(cudaMemcpy(d_sourceData, sourceData, mem_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_codewords, codewords, NUM_SYMBOLS*symbol_type_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_codewordlens, codewordlens, NUM_SYMBOLS*symbol_type_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_destData, destData, mem_size, cudaMemcpyHostToDevice)); } TIMESTAMP(t5); time_copy_in += ELAPSED(t4, t5); dim3 grid_size(num_blocks,1,1); dim3 block_size(num_block_threads, 1, 1); unsigned int sm_size; unsigned int NT = 10; //number of runs for each execution time //////////////////* CPU ENCODER */////////////////////////////////// unsigned int refbytesize; cpu_vlc_encode((unsigned int*)sourceData, num_elements, (unsigned int*)crefData, &refbytesize, codewords, codewordlens); unsigned int num_ints = refbytesize/4 + ((refbytesize%4 ==0)?0:1); //////////////////* END CPU */////////////////////////////////// //////////////////* SM64HUFF KERNEL */////////////////////////////////// grid_size.x = num_blocks; block_size.x = num_block_threads; sm_size = block_size.x*sizeof(unsigned int); #ifdef CACHECWLUT sm_size = 2*NUM_SYMBOLS*sizeof(int) + block_size.x*sizeof(unsigned int); #endif cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); TIMESTAMP(t6); time_serial += ELAPSED(t5, t6); cudaEventRecord( start, 0 ); for (int i=0; i<NT; i++) { vlc_encode_kernel_sm64huff<<<grid_size, block_size, sm_size>>>(d_sourceData, d_codewords, d_codewordlens, d_cw32, d_cw32len, d_cw32idx, d_destData, d_cindex); //testedOK2 } cudaThreadSynchronize(); cudaEventRecord( stop, 0 ) ; cudaEventSynchronize( stop ) ; TIMESTAMP(t7); time_kernel += ELAPSED(t6, t7); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ) ; printf("CUDA-reported GPU Encoding time (SM64HUFF): %f (ms)\n", elapsedTime/NT); //////////////////* END KERNEL */////////////////////////////////// unsigned int num_scan_elements = grid_size.x; preallocBlockSums(num_scan_elements); cudaMemset(d_destDataPacked, 0, mem_size); printf("Num_blocks to be passed to scan is %d.\n", num_scan_elements); cudaStream_t stream; checkCudaErrors(cudaStreamCreate(&stream)); prescanArray(d_cindex2, d_cindex, num_scan_elements, stream); pack2<<< num_scan_elements/16, 16, 0, stream>>>((unsigned int*)d_destData, d_cindex, d_cindex2, (unsigned int*)d_destDataPacked, num_elements/num_scan_elements); checkCudaErrors(cudaStreamSynchronize(stream)); TIMESTAMP(t8); time_kernel += ELAPSED(t7, t8); deallocBlockSums(); TIMESTAMP(t9); time_free += ELAPSED(t8, t9); checkCudaErrors(cudaMemcpy(destData, d_destDataPacked, mem_size, cudaMemcpyDeviceToHost)); TIMESTAMP(t10); time_copy_out += ELAPSED(t9, t10); compare_vectors((unsigned int*)crefData, (unsigned int*)destData, num_ints); TIMESTAMP(t11); time_post += ELAPSED(t10, t11); if (unified) { checkCudaErrors(cudaFree(sourceData)); checkCudaErrors(cudaFree(destData)); checkCudaErrors(cudaFree(codewords)); checkCudaErrors(cudaFree(codewordlens)); } else { free(sourceData); free(destData); free(codewords); free(codewordlens); free(cw32); free(cw32len); free(crefData); checkCudaErrors(cudaFree(d_sourceData)); checkCudaErrors(cudaFree(d_destData)); checkCudaErrors(cudaFree(d_destDataPacked)); checkCudaErrors(cudaFree(d_codewords)); checkCudaErrors(cudaFree(d_codewordlens)); } checkCudaErrors(cudaFree(d_cw32)); checkCudaErrors(cudaFree(d_cw32len)); checkCudaErrors(cudaFree(d_cw32idx)); checkCudaErrors(cudaFree(d_cindex)); checkCudaErrors(cudaFree(d_cindex2)); free(cindex2); TIMESTAMP(t12); time_free += ELAPSED(t11, t12); printf("====Timing info====\n"); printf("time malloc = %f ms\n", time_malloc); printf("time pre = %f ms\n", time_pre); printf("time copyIn = %f ms\n", time_copy_in); printf("time kernel = %f ms\n", time_kernel); printf("time serial = %f ms\n", time_serial); printf("time copyOut = %f ms\n", time_copy_out); printf("time post = %f ms\n", time_post); printf("time free = %f ms\n", time_free); printf("time end-to-end = %f ms\n", ELAPSED(t0, t12)); exit(EXIT_SUCCESS); }
a1c12edf583824006aa99bbe81b2736483d2d198.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include "sceneStructs.h" #include <cutil_math.h> #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" #include <vector> void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } //LOOK: This function demonstrates how to use thrust for random number generation on the GPU! //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } //TODO: IMPLEMENT THIS FUNCTION //Function that does the initial raycast from the camera __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) { ray r; r.origin = eye; glm::vec3 AVEC,BVEC,MVEC,HVEC,VVEC,Ppoint;//from CIS560 float Sx = x / (resolution.x ); float Sy = y / (resolution.y ); AVEC = glm::cross(view, up);//view is the CVEC, up is UVEC BVEC = glm::cross(AVEC, view); MVEC = eye + view;//Midpoint of screen HVEC = view.length() * tan(fov.x) * glm::normalize(AVEC); VVEC = view.length() * tan(fov.y) * glm::normalize(BVEC); Ppoint = MVEC + ( 2*Sx - 1 ) * HVEC + ( 2*Sy -1 ) * VVEC; r.direction = glm::normalize(Ppoint - eye); return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } //generate rays for further ray tracing __global__ void generateRay(ray *rays, cameraData cam) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * cam.resolution.x); if(x <= cam.resolution.x && y <= cam.resolution.y) rays[index] = raycastFromCameraKernel(cam.resolution, 0.0f, x, y, cam.position, cam.view, cam.up, cam.fov); __syncthreads(); } //TODO: IMPLEMENT THIS FUNCTION //Core raytracer kernel __global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* materials, ray* rays)//Added cudaMaterial { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x);//pixel if(x <= cam.resolution.x && y <= cam.resolution.y) {//ray r = raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov); ray r = rays[index]; int currentDepth; bool continueFlag = true; int hitCounter = 0; if(currentDepth > 4) return; glm::vec3 intersecP, norm, colResult; float d = -1;//distance to intersection float min_d = 1000000.0;//the distance to closest object int closestGeomIndex= -1; glm::vec3 lightPos; float lightEmi; int lightGeoIndex=-1;//store the index of the light source, Only one currently float amibient = 0.5, diffuse = 0.5, specular = 0.75; for( int i = 0; i < numberOfGeoms; ++i ) {//this loop find out the closest object and light source if(geoms[i].type == SPHERE) d = sphereIntersectionTest(geoms[i],r,intersecP,norm); else if(geoms[i].type == CUBE ) d = boxIntersectionTest(geoms[i],r,intersecP,norm); if( d > 0 && d < min_d ) {//find out the closest geometry's Index min_d = d; closestGeomIndex = i; } if( (lightEmi = materials[ geoms[i].materialid ].emittance) > 0 ) {//this object is a light source lightPos = geoms[i].translation; colResult = materials[ geoms[i].materialid ].color; lightGeoIndex = i; continueFlag = false; } else lightEmi = 0; } ray light; light.origin = intersecP; light.direction = glm::normalize(lightPos - intersecP); glm::vec3 lightCol = materials[ geoms[lightGeoIndex].materialid ].color; light.origin += light.direction*0.1f;//move back 0.1 to change the intersection test float dLignt = -1; float min_dlight = 10000; float fordifuse = glm::dot(norm,light.direction); int geoIndex; glm::vec3 lightIntersecP, lightIntersecNorm; glm::vec3 geoCol; if(closestGeomIndex >= 0 ) {//intersection occurred //if(rayDepth == 0) colResult = glm::vec3(0,0,0); if(materials[ geoms[closestGeomIndex].materialid ].emittance > 1) {//the object is light source colResult = materials[ geoms[closestGeomIndex].materialid ].color; continueFlag = false;//don't need to keep going } //for(int i = 0; (i < numberOfGeoms) && (i != lightGeoIndex); ++i) else {//if the object is not light source object geoCol = materials[ geoms[closestGeomIndex].materialid ].color; colResult = amibient * materials[ geoms[closestGeomIndex].materialid ].color ; for(int i = 0; i < numberOfGeoms;++i ) {//Shadow light intersection test if(geoms[i].type == SPHERE) dLignt = sphereIntersectionTest(geoms[i], light, lightIntersecP, lightIntersecNorm); else if(geoms[i].type == CUBE) dLignt = boxIntersectionTest(geoms[i],light, lightIntersecP, lightIntersecNorm); if(dLignt > 0 && dLignt < min_dlight) { min_dlight = dLignt; geoIndex = i; } } if(geoIndex == lightGeoIndex ||geoIndex ==closestGeomIndex )//Only hit the light { colResult += diffuse * fordifuse ; if( materials[geoms[closestGeomIndex].materialid].specularExponent != 0 ) { //colResult = glm::vec3(0,0,1); glm::vec3 reflectionRay = calculateReflectionDirection(norm,r.direction); //specular colResult += specular * materials[geoms[closestGeomIndex].materialid].specularColor * pow( fabs (glm::dot( r.direction, reflectionRay )), materials[geoms[closestGeomIndex].materialid].specularExponent); r.origin = intersecP; r.direction = reflectionRay; } if(materials[geoms[closestGeomIndex].materialid].hasReflective != 0) { r.direction = calculateReflectionDirection(norm, r.direction);//normalized in function r.origin = intersecP + r.direction * 0.01f; continueFlag = true; } } //else if( geoIndex ==closestGeomIndex ) colResult += diffuse * fordifuse ; else colResult=glm::vec3(0,0,0); //else colResult=glm::vec3(0,0,0);//test which part goes to "else" } } else {//does not intersect with anything colResult= glm::vec3(0.0, 0.0, 1.0); continueFlag = false; } colors[index] = colResult; }//this is for the if(x <= cam.resolution.x && y <= cam.resolution.y) } //TODO: FINISH THIS FUNCTION // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){ int traceDepth = 4; //determines how many bounces the raytracer traces // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize))); //send image to GPU glm::vec3* cudaimage = NULL; hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice); //package geometry and materials and sent to GPU staticGeom* geomList = new staticGeom[numberOfGeoms]; for(int i=0; i<numberOfGeoms; i++){ staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[i] = newStaticGeom; } staticGeom* cudageoms = NULL; hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice); //package materials and sent to GPU material* materialsList = new material[numberOfMaterials]; for(int i=0; i<numberOfMaterials; i++){ material newStaticMaterial; newStaticMaterial.color = materials[i].color; newStaticMaterial.specularExponent = materials[i].specularExponent; newStaticMaterial.specularColor = materials[i].specularColor; newStaticMaterial.hasReflective = materials[i].hasReflective; newStaticMaterial.hasRefractive = materials[i].hasRefractive; newStaticMaterial.indexOfRefraction = materials[i].indexOfRefraction; newStaticMaterial.hasScatter = materials[i].hasScatter; newStaticMaterial.absorptionCoefficient = materials[i].absorptionCoefficient; newStaticMaterial.reducedScatterCoefficient = materials[i].reducedScatterCoefficient; newStaticMaterial.emittance = materials[i].emittance; materialsList[i] = newStaticMaterial; } material* cudaMaterials = NULL; hipMalloc((void**)&cudaMaterials, numberOfMaterials*sizeof(material)); hipMemcpy( cudaMaterials, materialsList, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice); //package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; //Package rays int numOfRays = cam.resolution.x * cam.resolution.y; ray *rays = new ray[numOfRays]; ray *cudarays = NULL; hipMalloc((void**)&cudarays, numOfRays * sizeof(ray)); hipMemcpy(cudarays, rays, numOfRays * sizeof(ray), hipMemcpyHostToDevice); hipLaunchKernelGGL(( generateRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudarays,cam); //kernel launches //traceDepth = 4; //for(int i; i < traceDepth; ++i) //{ hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudaMaterials,cudarays); //} hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage); //retrieve image from GPU hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost); //free up stuff, or else we'll leak memory like a madman hipFree( cudaimage ); hipFree( cudageoms ); hipFree( cudaMaterials ); hipFree(cudarays); delete geomList; delete materialsList; delete rays; // make certain the kernel has completed hipDeviceSynchronize(); checkCUDAError("Kernel failed!"); }
a1c12edf583824006aa99bbe81b2736483d2d198.cu
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <cuda.h> #include <cmath> #include "sceneStructs.h" #include <cutil_math.h> #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" #include <vector> void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } //LOOK: This function demonstrates how to use thrust for random number generation on the GPU! //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } //TODO: IMPLEMENT THIS FUNCTION //Function that does the initial raycast from the camera __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) { ray r; r.origin = eye; glm::vec3 AVEC,BVEC,MVEC,HVEC,VVEC,Ppoint;//from CIS560 float Sx = x / (resolution.x ); float Sy = y / (resolution.y ); AVEC = glm::cross(view, up);//view is the CVEC, up is UVEC BVEC = glm::cross(AVEC, view); MVEC = eye + view;//Midpoint of screen HVEC = view.length() * tan(fov.x) * glm::normalize(AVEC); VVEC = view.length() * tan(fov.y) * glm::normalize(BVEC); Ppoint = MVEC + ( 2*Sx - 1 ) * HVEC + ( 2*Sy -1 ) * VVEC; r.direction = glm::normalize(Ppoint - eye); return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } //generate rays for further ray tracing __global__ void generateRay(ray *rays, cameraData cam) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * cam.resolution.x); if(x <= cam.resolution.x && y <= cam.resolution.y) rays[index] = raycastFromCameraKernel(cam.resolution, 0.0f, x, y, cam.position, cam.view, cam.up, cam.fov); __syncthreads(); } //TODO: IMPLEMENT THIS FUNCTION //Core raytracer kernel __global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* materials, ray* rays)//Added cudaMaterial { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x);//pixel if(x <= cam.resolution.x && y <= cam.resolution.y) {//ray r = raycastFromCameraKernel(resolution,time,x,y,cam.position,cam.view,cam.up,cam.fov); ray r = rays[index]; int currentDepth; bool continueFlag = true; int hitCounter = 0; if(currentDepth > 4) return; glm::vec3 intersecP, norm, colResult; float d = -1;//distance to intersection float min_d = 1000000.0;//the distance to closest object int closestGeomIndex= -1; glm::vec3 lightPos; float lightEmi; int lightGeoIndex=-1;//store the index of the light source, Only one currently float amibient = 0.5, diffuse = 0.5, specular = 0.75; for( int i = 0; i < numberOfGeoms; ++i ) {//this loop find out the closest object and light source if(geoms[i].type == SPHERE) d = sphereIntersectionTest(geoms[i],r,intersecP,norm); else if(geoms[i].type == CUBE ) d = boxIntersectionTest(geoms[i],r,intersecP,norm); if( d > 0 && d < min_d ) {//find out the closest geometry's Index min_d = d; closestGeomIndex = i; } if( (lightEmi = materials[ geoms[i].materialid ].emittance) > 0 ) {//this object is a light source lightPos = geoms[i].translation; colResult = materials[ geoms[i].materialid ].color; lightGeoIndex = i; continueFlag = false; } else lightEmi = 0; } ray light; light.origin = intersecP; light.direction = glm::normalize(lightPos - intersecP); glm::vec3 lightCol = materials[ geoms[lightGeoIndex].materialid ].color; light.origin += light.direction*0.1f;//move back 0.1 to change the intersection test float dLignt = -1; float min_dlight = 10000; float fordifuse = glm::dot(norm,light.direction); int geoIndex; glm::vec3 lightIntersecP, lightIntersecNorm; glm::vec3 geoCol; if(closestGeomIndex >= 0 ) {//intersection occurred //if(rayDepth == 0) colResult = glm::vec3(0,0,0); if(materials[ geoms[closestGeomIndex].materialid ].emittance > 1) {//the object is light source colResult = materials[ geoms[closestGeomIndex].materialid ].color; continueFlag = false;//don't need to keep going } //for(int i = 0; (i < numberOfGeoms) && (i != lightGeoIndex); ++i) else {//if the object is not light source object geoCol = materials[ geoms[closestGeomIndex].materialid ].color; colResult = amibient * materials[ geoms[closestGeomIndex].materialid ].color ; for(int i = 0; i < numberOfGeoms;++i ) {//Shadow light intersection test if(geoms[i].type == SPHERE) dLignt = sphereIntersectionTest(geoms[i], light, lightIntersecP, lightIntersecNorm); else if(geoms[i].type == CUBE) dLignt = boxIntersectionTest(geoms[i],light, lightIntersecP, lightIntersecNorm); if(dLignt > 0 && dLignt < min_dlight) { min_dlight = dLignt; geoIndex = i; } } if(geoIndex == lightGeoIndex ||geoIndex ==closestGeomIndex )//Only hit the light { colResult += diffuse * fordifuse ; if( materials[geoms[closestGeomIndex].materialid].specularExponent != 0 ) { //colResult = glm::vec3(0,0,1); glm::vec3 reflectionRay = calculateReflectionDirection(norm,r.direction); //specular colResult += specular * materials[geoms[closestGeomIndex].materialid].specularColor * pow( fabs (glm::dot( r.direction, reflectionRay )), materials[geoms[closestGeomIndex].materialid].specularExponent); r.origin = intersecP; r.direction = reflectionRay; } if(materials[geoms[closestGeomIndex].materialid].hasReflective != 0) { r.direction = calculateReflectionDirection(norm, r.direction);//normalized in function r.origin = intersecP + r.direction * 0.01f; continueFlag = true; } } //else if( geoIndex ==closestGeomIndex ) colResult += diffuse * fordifuse ; else colResult=glm::vec3(0,0,0); //else colResult=glm::vec3(0,0,0);//test which part goes to "else" } } else {//does not intersect with anything colResult= glm::vec3(0.0, 0.0, 1.0); continueFlag = false; } colors[index] = colResult; }//this is for the if(x <= cam.resolution.x && y <= cam.resolution.y) } //TODO: FINISH THIS FUNCTION // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){ int traceDepth = 4; //determines how many bounces the raytracer traces // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize))); //send image to GPU glm::vec3* cudaimage = NULL; cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice); //package geometry and materials and sent to GPU staticGeom* geomList = new staticGeom[numberOfGeoms]; for(int i=0; i<numberOfGeoms; i++){ staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[i] = newStaticGeom; } staticGeom* cudageoms = NULL; cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice); //package materials and sent to GPU material* materialsList = new material[numberOfMaterials]; for(int i=0; i<numberOfMaterials; i++){ material newStaticMaterial; newStaticMaterial.color = materials[i].color; newStaticMaterial.specularExponent = materials[i].specularExponent; newStaticMaterial.specularColor = materials[i].specularColor; newStaticMaterial.hasReflective = materials[i].hasReflective; newStaticMaterial.hasRefractive = materials[i].hasRefractive; newStaticMaterial.indexOfRefraction = materials[i].indexOfRefraction; newStaticMaterial.hasScatter = materials[i].hasScatter; newStaticMaterial.absorptionCoefficient = materials[i].absorptionCoefficient; newStaticMaterial.reducedScatterCoefficient = materials[i].reducedScatterCoefficient; newStaticMaterial.emittance = materials[i].emittance; materialsList[i] = newStaticMaterial; } material* cudaMaterials = NULL; cudaMalloc((void**)&cudaMaterials, numberOfMaterials*sizeof(material)); cudaMemcpy( cudaMaterials, materialsList, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice); //package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; //Package rays int numOfRays = cam.resolution.x * cam.resolution.y; ray *rays = new ray[numOfRays]; ray *cudarays = NULL; cudaMalloc((void**)&cudarays, numOfRays * sizeof(ray)); cudaMemcpy(cudarays, rays, numOfRays * sizeof(ray), cudaMemcpyHostToDevice); generateRay<<<fullBlocksPerGrid, threadsPerBlock>>>(cudarays,cam); //kernel launches //traceDepth = 4; //for(int i; i < traceDepth; ++i) //{ raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms,cudaMaterials,cudarays); //} sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage); //retrieve image from GPU cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost); //free up stuff, or else we'll leak memory like a madman cudaFree( cudaimage ); cudaFree( cudageoms ); cudaFree( cudaMaterials ); cudaFree(cudarays); delete geomList; delete materialsList; delete rays; // make certain the kernel has completed cudaThreadSynchronize(); checkCUDAError("Kernel failed!"); }
f51821dc72fb9b7de097dcef370fabd9938084c7.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] " #define CHECK(res) if(res!=hipSuccess){exit(-1);} #define rows 5 #define cols 3 using namespace std; typedef float FLOAT; // __global__ void vec_add(FLOAT **a,const int rows,const int cols) __global__ void vec_add(FLOAT **a) { __shared__ float A[rows][cols]; int x=threadIdx.x; int y=threadIdx.y; if(x>=cols || y>=rows) return; A[y][x]=a[y][x]+2; __syncthreads(); a[y][x]=A[y][x]; // a[y][x]+=2; } int main() { mycout<<"(CPUGPU) \n"<< "()"<<endl; // int rows=5,cols=3; FLOAT **a=nullptr; // // a=(FLOAT**)malloc(rows*sizeof(FLOAT*)); CHECK(hipMallocManaged((void**)&a,rows*sizeof(FLOAT*))); for(int i=0;i<rows;++i) { // a[i]=(FLOAT *)malloc(cols*sizeof(FLOAT)); CHECK(hipMallocManaged((void**)&a[i],cols*sizeof(FLOAT))); } // for(int i=0;i<rows;++i) { for(int j=0;j<cols;++j) { a[i][j]=j+i*cols; } } // dim3 threads(32,32); //hipLaunchKernelGGL(( vec_add), dim3(1),dim3(threads), 0, 0, a,rows,cols); hipLaunchKernelGGL(( vec_add), dim3(1),dim3(threads), 0, 0, a); hipDeviceSynchronize(); //GPU // for(int i=0;i<rows;++i) { for(int j=0;j<cols;++j) { cout<<a[i][j]<<" "; } cout<<endl; } // free for(int i=0;i<rows;++i) { if(a[i]!=NULL) // free(a[i]); CHECK(hipFree(a[i])); } if(a!=NULL) // free(a); CHECK(hipFree(a)); return 0; }
f51821dc72fb9b7de097dcef370fabd9938084c7.cu
#include <iostream> #include <cuda.h> #define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] " #define CHECK(res) if(res!=cudaSuccess){exit(-1);} #define rows 5 #define cols 3 using namespace std; typedef float FLOAT; // __global__ void vec_add(FLOAT **a,const int rows,const int cols) __global__ void vec_add(FLOAT **a) { __shared__ float A[rows][cols]; int x=threadIdx.x; int y=threadIdx.y; if(x>=cols || y>=rows) return; A[y][x]=a[y][x]+2; __syncthreads(); a[y][x]=A[y][x]; // a[y][x]+=2; } int main() { mycout<<"虚拟统一内存使用(CPU与GPU都能访问) 并使用共享内存\n"<< "使用二维数组(二维数组其实可以展开为一维数组处理)"<<endl; // int rows=5,cols=3; FLOAT **a=nullptr; // 分配内存 // a=(FLOAT**)malloc(rows*sizeof(FLOAT*)); CHECK(cudaMallocManaged((void**)&a,rows*sizeof(FLOAT*))); for(int i=0;i<rows;++i) { // a[i]=(FLOAT *)malloc(cols*sizeof(FLOAT)); CHECK(cudaMallocManaged((void**)&a[i],cols*sizeof(FLOAT))); } // 赋值 for(int i=0;i<rows;++i) { for(int j=0;j<cols;++j) { a[i][j]=j+i*cols; } } // 启动核函数 dim3 threads(32,32); // vec_add<<<1,threads>>>(a,rows,cols); vec_add<<<1,threads>>>(a); cudaDeviceSynchronize(); //等待GPU执行完成, 有多种方式 // 打印 for(int i=0;i<rows;++i) { for(int j=0;j<cols;++j) { cout<<a[i][j]<<" "; } cout<<endl; } // free for(int i=0;i<rows;++i) { if(a[i]!=NULL) // free(a[i]); CHECK(cudaFree(a[i])); } if(a!=NULL) // free(a); CHECK(cudaFree(a)); return 0; }
1a159b74019cf8ad8b566d4645ebc517bd70e034.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Discrete Cosine Transform in row wise (DCT one) * DCT_I_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_I_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DCTI_Row_Kernel_GPUA(double const * const A, double const * const B, double * const C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } __global__ void DCTI_Row_Kernel(double *A, double *B, double *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void CalculateTransform(double * A, double * B, double * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //double * hostComputedC; double * deviceA=0; double * deviceB=0; double * deviceC=0; //hostA = (double *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; // Allocate GPU buffers for three vectors (two input, one output) . //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns); hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns); hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns); hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTI_Row_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize();//To synchronize the device // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); C = hostC; hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray const *DCOS; mxGPUArray *B; double const *d_A, *d_DCOS; double *d_B; // mxArray * hostcos; //test // double * hostcos, *pointer; double *pointer; //int N; int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* Throw an error if the input is not a GPU array. */ if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) { mexErrMsgIdAndTxt(errId, errMsg); } A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } numDCOSRows=numDCOSColumns=numAColumns; numCRows = numARows; numCColumns = numDCOSColumns; mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL); pointer = mxGetPr(COS); for (int i = 0; i < numDCOSRows; i++){ for (int j = 0; j < numDCOSColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostB[i * numBColumns + j] = 1; //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns); //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns]; if (numDCOSColumns != 1){ pointer[i + j* numDCOSColumns] = cos((j*PI_d*i / (numDCOSColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns); //hostB[i + j* numBColumns] = 1; } else{ pointer[i + j* numDCOSColumns] =1; } } } DCOS=mxGPUCreateFromMxArray(COS); // DCOS=mxGPUCreateFromMxArray(hostcos); if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTI_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns); // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(DCOS); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numBRows = numBColumns = numAColumns; numCRows = numARows; numCColumns = numBColumns; //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix double * hostB ; // The B matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } /* Initialize the MathWorks GPU API. */ //mxInitGPU(); /* Throw an error if the input is not a GPU array. */ //if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) { //mexErrMsgIdAndTxt(errId, errMsg); //} //hostA = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns); hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns); //const mxArray *G =prhs[0]; // if ((nrhs != 1) || (mxIsGPUArray(G))) { //mexErrMsgIdAndTxt(errId, errMsg); // G = gather(G); // } hostA = (double *)mxGetData(prhs[0]); // hostA = (double *)mxGetData(G); //Discrete Cosine Transform in row wise for (int i = 0; i < numBRows; i++){ for (int j = 0; j < numBColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostB[i * numBColumns + j] = 1; //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns); //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns]; if (numBColumns != 1){ hostB[i + j* numBColumns] = cos((j*PI_d*i / (numBColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns); //hostB[i + j* numBColumns] = 1; } else{ hostB[i + j* numBColumns] =1; } } } //plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL); //hostC = (double*)mxGetData(plhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); free(hostB); } }
1a159b74019cf8ad8b566d4645ebc517bd70e034.cu
/* * Discrete Cosine Transform in row wise (DCT one) * DCT_I_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_I_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DCTI_Row_Kernel_GPUA(double const * const A, double const * const B, double * const C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } __global__ void DCTI_Row_Kernel(double *A, double *B, double *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void CalculateTransform(double * A, double * B, double * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //double * hostComputedC; double * deviceA=0; double * deviceB=0; double * deviceC=0; //hostA = (double *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; // Allocate GPU buffers for three vectors (two input, one output) . //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns); cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns); cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns); cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTI_Row_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize();//To synchronize the device // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); C = hostC; cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray const *DCOS; mxGPUArray *B; double const *d_A, *d_DCOS; double *d_B; // mxArray * hostcos; //test // double * hostcos, *pointer; double *pointer; //int N; int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* Throw an error if the input is not a GPU array. */ if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) { mexErrMsgIdAndTxt(errId, errMsg); } A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } numDCOSRows=numDCOSColumns=numAColumns; numCRows = numARows; numCColumns = numDCOSColumns; mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL); pointer = mxGetPr(COS); for (int i = 0; i < numDCOSRows; i++){ for (int j = 0; j < numDCOSColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostB[i * numBColumns + j] = 1; //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns); //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns]; if (numDCOSColumns != 1){ pointer[i + j* numDCOSColumns] = cos((j*PI_d*i / (numDCOSColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns); //hostB[i + j* numBColumns] = 1; } else{ pointer[i + j* numDCOSColumns] =1; } } } DCOS=mxGPUCreateFromMxArray(COS); // DCOS=mxGPUCreateFromMxArray(hostcos); if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTI_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns); // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(DCOS); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numBRows = numBColumns = numAColumns; numCRows = numARows; numCColumns = numBColumns; //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix double * hostB ; // The B matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } /* Initialize the MathWorks GPU API. */ //mxInitGPU(); /* Throw an error if the input is not a GPU array. */ //if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) { //mexErrMsgIdAndTxt(errId, errMsg); //} //hostA = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns); hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns); //const mxArray *G =prhs[0]; // if ((nrhs != 1) || (mxIsGPUArray(G))) { //mexErrMsgIdAndTxt(errId, errMsg); // G = gather(G); // } hostA = (double *)mxGetData(prhs[0]); // hostA = (double *)mxGetData(G); //Discrete Cosine Transform in row wise for (int i = 0; i < numBRows; i++){ for (int j = 0; j < numBColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostB[i * numBColumns + j] = 1; //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns); //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns]; if (numBColumns != 1){ hostB[i + j* numBColumns] = cos((j*PI_d*i / (numBColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns); //hostB[i + j* numBColumns] = 1; } else{ hostB[i + j* numBColumns] =1; } } } //plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL); //hostC = (double*)mxGetData(plhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); free(hostB); } }
e49022214850c633ee31ad10ca5b14ac808ae459.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "itf/engine/layer.hpp" #include "itf/engine/util/math_functions.hpp" #include "itf/engine/vision_layers.hpp" namespace itf { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_data[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h; int hend = min(hstart + kernel_h, height); int wstart = pw * stride_w; int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_data[h * width + w]; } } float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_data[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_data[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h; int hend = min(hstart + kernel_h, height); int wstart = pw * stride_w; int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_data[h * width + w]; cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int* mask, const Dtype* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; int phend = min((h + pad_h) / stride_h + 1, pooled_height); int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; if (mask) { mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } } } } else { top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width + pad_w; int h = (index / width) % height + pad_h; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, pooled_height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* rand_idx, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, pooled_height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; rand_idx += (n * channels + c) * pooled_height * pooled_width; top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace itf
e49022214850c633ee31ad10ca5b14ac808ae459.cu
#include <algorithm> #include <cfloat> #include <vector> #include "itf/engine/layer.hpp" #include "itf/engine/util/math_functions.hpp" #include "itf/engine/vision_layers.hpp" namespace itf { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_data[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h; int hend = min(hstart + kernel_h, height); int wstart = pw * stride_w; int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_data[h * width + w]; } } float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_data[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_data[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h; int hend = min(hstart + kernel_h, height); int wstart = pw * stride_w; int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_data[h * width + w]; cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int* mask, const Dtype* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; int phend = min((h + pad_h) / stride_h + 1, pooled_height); int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; if (mask) { mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } } } } else { top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width + pad_w; int h = (index / width) % height + pad_h; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, pooled_height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* rand_idx, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, pooled_height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; rand_idx += (n * channels + c) * pooled_height * pooled_width; top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace itf
cud.hip
// !!! This is a file automatically generated by hipify!!! size_t free, total; printf("\n"); hipMemGetInfo(&free,&total); printf("%d KB free of total %d KB\n",free/1024,total/1024);
cud.cu
size_t free, total; printf("\n"); cudaMemGetInfo(&free,&total); printf("%d KB free of total %d KB\n",free/1024,total/1024);
926bbe2313219af76afbe7a5d9aace9d3be5b3fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/gpu/device/vec_math.hpp> #include <opencv2/gpu/device/functional.hpp> #include "detector.h" void writeMatToFile(const Mat &image, char* file_name) { ofstream fout(file_name); float *ptr; ptr=(float *)image.data; for(int i=0; i< image.rows; i++){ for(int j=0; j<image.cols; j++){ fout<<*ptr<<"\t"; ptr++; } fout<<endl; } fout.close(); } void read_yml(GpuMat& result, string yml_name, string var_name) { Mat temp; FileStorage fin( yml_name, FileStorage::READ); fin[var_name] >> temp; temp.convertTo(temp, CV_32FC1); result.upload(temp); } void show_gpu_result(const GpuMat& image, int index, int height, string yml_name, string image_name) { gpu::GpuMat d_score(Size(image.cols,1), CV_32FC1, (void*)(image.ptr<float>(index)), image.step); Mat score(d_score); if(yml_name!=""){ FileStorage fb(yml_name, FileStorage::WRITE); fb << "score" << score; fb.release(); } if(image_name!=""){ score=score.reshape(0, height); normalize( score, score, 0, 255, NORM_MINMAX, -1); imwrite(image_name,score); } } void write_gpu_result(const GpuMat& image, int index, int height, string txt_name, string image_name) { gpu::GpuMat d_score(Size(image.cols,1), CV_32FC1, (void*)(image.ptr<float>(index)), image.step); Mat score(d_score); score=score.reshape(0, height); if(txt_name!=""){ char file_name[txt_name.size()+1]; strcpy(file_name, txt_name.c_str()); ofstream fout(file_name); float *ptr; ptr=(float *)score.data; for(int i=0; i< score.rows; i++){ for(int j=0; j<score.cols; j++){ fout<<*ptr<<"\t"; ptr++; } fout<<endl; } fout.close(); } if(image_name!=""){ normalize( score, score, 0, 255, NORM_MINMAX, -1); imwrite(image_name,score); } } //vectorized normalized cross-correlation void compute_response_map(const GpuMat& d_image, const GpuMat& temp_dst, const GpuMat& d_log, int temp_w, int temp_h, GpuMat& level_score) { dim3 threads(16, 8); gpu::GpuMat d_img, log_img; log_img.create(d_image.rows, d_image.cols, CV_32FC1); int img_w = d_image.cols - d_log.cols + 1; int img_h = d_image.rows - d_log.rows + 1; d_img.create(img_h, img_w, CV_32FC1); // prepare for transforming the image gpu::GpuMat img_dst; int bound_rows= img_h - temp_h + 1; int bound_cols= img_w - temp_w + 1; img_dst.create(temp_w * temp_h, bound_cols * bound_rows, CV_32FC1); gpu::GpuMat image_mean, image_stdev; image_mean.create( bound_rows, bound_cols, CV_32FC1); image_stdev.create( bound_rows, bound_cols, CV_32FC1); gpu::GpuMat d_test; //d_test.create(temp_dst.rows, img_dst.cols, CV_32FC1); // convolve the image with the given LOG kernel dim3 grid3(divUp(d_image.cols, threads.x), divUp(d_image.rows, threads.y)); hipLaunchKernelGGL(( convolution), dim3(grid3), dim3(threads), 0, 0, d_log.cols, d_log.rows, img_w, img_h, d_image, d_log, d_img); //transpose the image dim3 grid4(divUp(bound_cols, threads.x), divUp(bound_rows, threads.y)); hipLaunchKernelGGL(( mean_stdev), dim3(grid4), dim3(threads), 0, 0, temp_w, temp_h, bound_cols, bound_rows, d_img, image_mean, image_stdev, img_dst); // matrix multiplication gpu::gemm( temp_dst, img_dst, 1, d_test, 0, level_score, 0); /*hipFree(log_img.ptr()); hipFree(d_img.ptr()); hipFree(image_mean.ptr()); hipFree(image_stdev.ptr()); hipFree(img_dst.ptr());*/ } //visulize the alignment result void visualize_alignment(Mat &depth, float position[][3], int angle[][3], map<float, Output, greater<float> > max_map, vector<int>& temp_num, int m) { Mat templ, image; char filename[50]; int ROW_NUM = 1080; int COL_NUM = 1920; float temp_scale=0.5; Output output=max_map.begin()->second; //calculate the 2d object center int rows= (output.rows + 45/output.scale)*2.5; int cols= (output.cols + 45/output.scale)*2.5; //cout<< "rows" << rows << " " << cols << endl; //convert 2d position back to 3d position sprintf(filename, "1/frame%04d.txt", m); std::ifstream file(filename); for(int row=0; row<ROW_NUM; row++) for (int col=0; col<COL_NUM; col++) file >> depth.at<float>(row,col); //file >> depth[row][col]; //resize( depth, depth, Size(depth.cols*0.4, depth.rows*0.4) ); position[m][2]=depth.at<float>(rows, cols)/1000; position[m][0]=(cols-944.9)*(position[m][2]/1068.5); position[m][1]=(rows-549.5)*(position[m][2]/1275.9); cout << position[m][2] << " " << output.scale << endl; double min, max; minMaxIdx(depth, &min, &max); //calculate the angles int t=( output.id/6 ) % 3; int j=( output.id )%6; angle[m][0]= (t-1)*15; angle[m][1]= 10*j+20; angle[m][2]= 0; //cout<< angle[m][0] << " " << angle[m][1] << " " << angle[m][2] << " " << endl; temp_num.push_back(output.id+1); map < float, Output>::iterator iter= max_map.begin(); for(int top=0; top<5; top++) { rows= iter->second.rows; cols= iter->second.cols; //printf("cols=%d,rows=%d\t", cols, rows); float img_scale = iter->second.scale; sprintf(filename, "model/template%03d.png",iter->second.id); templ = imread(filename,1); resize( templ, templ, Size(templ.cols*temp_scale/img_scale,templ.rows*temp_scale/img_scale) ); sprintf(filename, "1/frame%04d.jpg", m); image = imread(filename,1); resize( image, image, Size(image.cols*0.4,image.rows*0.4) ); //resize( image, image, Size(image.cols*0.8,image.rows*0.8) ); for(int row = 0; row < templ.rows; row++) for (int col = 0; col < templ.cols; col++){ if(templ.at<Vec3b>(row, col)[0]==0 && templ.at<Vec3b>(row, col)[1]==0 && templ.at<Vec3b>(row, col)[2]==0) continue; else{ image.at<Vec3b>(row+rows, col+cols)[0]=templ.at<Vec3b>(row, col)[0]; image.at<Vec3b>(row+rows, col+cols)[1]=templ.at<Vec3b>(row, col)[1]; image.at<Vec3b>(row+rows, col+cols)[2]=templ.at<Vec3b>(row, col)[2]; //printf("%d",templ.at<Vec3b>(row, col)[0]); } } //sprintf(filename, "frame%03d_top%d",m,top+1); //imshow(filename, image); sprintf(filename, "top5/frame%03d_top%d.png",m,top+1); imwrite(filename,image); //depth alignment sprintf(filename, "model/template%03d.png",iter->second.id); templ = imread(filename,0); templ.convertTo(templ, CV_8UC1); resize( templ, templ, Size(templ.cols*temp_scale/img_scale,templ.rows*temp_scale/img_scale) ); Mat adjMap; depth.convertTo(adjMap, CV_8UC1, 255 / (max-min), -min); sprintf(filename, "1/depth%04d.png",m); imwrite(filename, adjMap); resize( adjMap, adjMap, Size(adjMap.cols*0.4,adjMap.rows*0.4) ); for(int row = 0; row < templ.rows; row++) for (int col = 0; col < templ.cols; col++){ if(templ.at<uchar>(row, col)==0) continue; else{ adjMap.at<uchar>(row+rows, col+cols)=templ.at<uchar>(row, col); //printf("%d",templ.at<Vec3b>(row, col)[0]); } } //sprintf(filename, "frame%03d_top%d",m,top+1); //imshow(filename, image); sprintf(filename, "top5/depth%03d_top%d.png",m,top+1); imwrite(filename,adjMap); iter++; } } // convert 2d position back to 3D position void convert2Dto3D( Mat depth_image, Mat intrinsics, map<float, Output, greater<float> > max_map, string object_name, cv::Vec3f& position, cv::Vec3f& angle){ Output output=max_map.begin()->second; //calculate the 2d object center int rows= (output.rows + 45/output.scale)*2.5; int cols= (output.cols + 45/output.scale)*2.5; float p_3d[3]; p_3d[2] =depth_image.at<float>(rows, cols)/1000; //cout<<"Depth reading at bowl is "<<p_3d[2]<<endl; p_3d[0]=(cols-intrinsics.at<float>(0,2))*(p_3d[2]/intrinsics.at<float>(0,0)); //cout<<"Intrinsics val "<<intrinsics.at<float>(1,0)<<endl; p_3d[1]=(rows-intrinsics.at<float>(1,2))*(p_3d[2]/intrinsics.at<float>(1,1)); position= Vec3f(p_3d[0], p_3d[1], p_3d[2]); //cout << position[m][2] << " " << output.scale << endl; //calculate the angles int t=( output.id/6 ) % 3; int j=( output.id )%6; angle = Vec3f( (t-1)*15, 10*j+20, 0); } // the controller of the program bool DetectObject(Mat color_image, Mat depth_image, Mat intrinsics, string training_file_path, string object_name, cv::Vec3f& position, cv::Vec3f& angle){ Mat image, img, templ, img_display; gpu::GpuMat d_image; double maxVal; Point maxLoc; vector<int> temp_num; dim3 threads(16, 8); size_t time = clock(); gpu::GpuMat d_log; read_yml(d_log, training_file_path+"pre/log_19.yml", "c"); gpu::GpuMat temp_level1; string filename=training_file_path+"pre/"+object_name+".yml"; cout<<filename<<endl; read_yml(temp_level1, filename, "temp_level1"); //read_yml(temp_level1, "pre/temp_level1.yml", "temp_level1"); int temp_w=72; int temp_h=72; vector<float> best_scale; cvtColor( color_image, image, CV_BGR2GRAY ); resize( image, image, Size(image.cols*0.4,image.rows*0.4) ); map<float, Output, greater<float> > max_map; // eight scales for(int n=0; n<8; n++){ //process the image float img_scale = 0.6 + 0.05*n; resize( image, img, Size(image.cols*img_scale,image.rows*img_scale) ); img.copyTo( img_display ); d_image.upload(img); int img_w = img.cols - d_log.cols - temp_w + 2; int img_h = img.rows - d_log.rows - temp_h + 2; //calculate the first level score gpu::GpuMat firstlevel_score; firstlevel_score.create(18, img_w*img_h, CV_32FC1); compute_response_map(d_image, temp_level1, d_log, temp_w, temp_h, firstlevel_score); // multiple hypotheses for(int v=0;v<9;v++){ // number of contours int interval=firstlevel_score.rows/9; int t=v*interval; gpu::GpuMat vec_t(Size(firstlevel_score.cols,interval), CV_32FC1, (void*)(firstlevel_score.ptr<float>(t)), firstlevel_score.step); gpu::minMaxLoc( vec_t, NULL, &maxVal, NULL, &maxLoc); Output output; //cout<< "max" << maxLoc.x << endl; output.rows= (maxLoc.x/ img_w) /img_scale; output.cols= (maxLoc.x % img_w) /img_scale; output.id = maxLoc.y+t; output.scale = img_scale; //if(maxVal>0.98) max_map[maxVal] = output; } } //set a threshold for object detection if((max_map.begin()->first) > 0.55){ //convert 2d position back to 3D position convert2Dto3D( depth_image, intrinsics, max_map, object_name, position, angle); printf("Runtime: %f ms\n", (double(clock() - time)/CLOCKS_PER_SEC*1000.0)); return 1; } else return 0; }
926bbe2313219af76afbe7a5d9aace9d3be5b3fc.cu
#include <opencv2/gpu/device/vec_math.hpp> #include <opencv2/gpu/device/functional.hpp> #include "detector.h" void writeMatToFile(const Mat &image, char* file_name) { ofstream fout(file_name); float *ptr; ptr=(float *)image.data; for(int i=0; i< image.rows; i++){ for(int j=0; j<image.cols; j++){ fout<<*ptr<<"\t"; ptr++; } fout<<endl; } fout.close(); } void read_yml(GpuMat& result, string yml_name, string var_name) { Mat temp; FileStorage fin( yml_name, FileStorage::READ); fin[var_name] >> temp; temp.convertTo(temp, CV_32FC1); result.upload(temp); } void show_gpu_result(const GpuMat& image, int index, int height, string yml_name, string image_name) { gpu::GpuMat d_score(Size(image.cols,1), CV_32FC1, (void*)(image.ptr<float>(index)), image.step); Mat score(d_score); if(yml_name!=""){ FileStorage fb(yml_name, FileStorage::WRITE); fb << "score" << score; fb.release(); } if(image_name!=""){ score=score.reshape(0, height); normalize( score, score, 0, 255, NORM_MINMAX, -1); imwrite(image_name,score); } } void write_gpu_result(const GpuMat& image, int index, int height, string txt_name, string image_name) { gpu::GpuMat d_score(Size(image.cols,1), CV_32FC1, (void*)(image.ptr<float>(index)), image.step); Mat score(d_score); score=score.reshape(0, height); if(txt_name!=""){ char file_name[txt_name.size()+1]; strcpy(file_name, txt_name.c_str()); ofstream fout(file_name); float *ptr; ptr=(float *)score.data; for(int i=0; i< score.rows; i++){ for(int j=0; j<score.cols; j++){ fout<<*ptr<<"\t"; ptr++; } fout<<endl; } fout.close(); } if(image_name!=""){ normalize( score, score, 0, 255, NORM_MINMAX, -1); imwrite(image_name,score); } } //vectorized normalized cross-correlation void compute_response_map(const GpuMat& d_image, const GpuMat& temp_dst, const GpuMat& d_log, int temp_w, int temp_h, GpuMat& level_score) { dim3 threads(16, 8); gpu::GpuMat d_img, log_img; log_img.create(d_image.rows, d_image.cols, CV_32FC1); int img_w = d_image.cols - d_log.cols + 1; int img_h = d_image.rows - d_log.rows + 1; d_img.create(img_h, img_w, CV_32FC1); // prepare for transforming the image gpu::GpuMat img_dst; int bound_rows= img_h - temp_h + 1; int bound_cols= img_w - temp_w + 1; img_dst.create(temp_w * temp_h, bound_cols * bound_rows, CV_32FC1); gpu::GpuMat image_mean, image_stdev; image_mean.create( bound_rows, bound_cols, CV_32FC1); image_stdev.create( bound_rows, bound_cols, CV_32FC1); gpu::GpuMat d_test; //d_test.create(temp_dst.rows, img_dst.cols, CV_32FC1); // convolve the image with the given LOG kernel dim3 grid3(divUp(d_image.cols, threads.x), divUp(d_image.rows, threads.y)); convolution<<<grid3, threads, 0>>>(d_log.cols, d_log.rows, img_w, img_h, d_image, d_log, d_img); //transpose the image dim3 grid4(divUp(bound_cols, threads.x), divUp(bound_rows, threads.y)); mean_stdev<<<grid4, threads, 0>>>(temp_w, temp_h, bound_cols, bound_rows, d_img, image_mean, image_stdev, img_dst); // matrix multiplication gpu::gemm( temp_dst, img_dst, 1, d_test, 0, level_score, 0); /*cudaFree(log_img.ptr()); cudaFree(d_img.ptr()); cudaFree(image_mean.ptr()); cudaFree(image_stdev.ptr()); cudaFree(img_dst.ptr());*/ } //visulize the alignment result void visualize_alignment(Mat &depth, float position[][3], int angle[][3], map<float, Output, greater<float> > max_map, vector<int>& temp_num, int m) { Mat templ, image; char filename[50]; int ROW_NUM = 1080; int COL_NUM = 1920; float temp_scale=0.5; Output output=max_map.begin()->second; //calculate the 2d object center int rows= (output.rows + 45/output.scale)*2.5; int cols= (output.cols + 45/output.scale)*2.5; //cout<< "rows" << rows << " " << cols << endl; //convert 2d position back to 3d position sprintf(filename, "1/frame%04d.txt", m); std::ifstream file(filename); for(int row=0; row<ROW_NUM; row++) for (int col=0; col<COL_NUM; col++) file >> depth.at<float>(row,col); //file >> depth[row][col]; //resize( depth, depth, Size(depth.cols*0.4, depth.rows*0.4) ); position[m][2]=depth.at<float>(rows, cols)/1000; position[m][0]=(cols-944.9)*(position[m][2]/1068.5); position[m][1]=(rows-549.5)*(position[m][2]/1275.9); cout << position[m][2] << " " << output.scale << endl; double min, max; minMaxIdx(depth, &min, &max); //calculate the angles int t=( output.id/6 ) % 3; int j=( output.id )%6; angle[m][0]= (t-1)*15; angle[m][1]= 10*j+20; angle[m][2]= 0; //cout<< angle[m][0] << " " << angle[m][1] << " " << angle[m][2] << " " << endl; temp_num.push_back(output.id+1); map < float, Output>::iterator iter= max_map.begin(); for(int top=0; top<5; top++) { rows= iter->second.rows; cols= iter->second.cols; //printf("cols=%d,rows=%d\t", cols, rows); float img_scale = iter->second.scale; sprintf(filename, "model/template%03d.png",iter->second.id); templ = imread(filename,1); resize( templ, templ, Size(templ.cols*temp_scale/img_scale,templ.rows*temp_scale/img_scale) ); sprintf(filename, "1/frame%04d.jpg", m); image = imread(filename,1); resize( image, image, Size(image.cols*0.4,image.rows*0.4) ); //resize( image, image, Size(image.cols*0.8,image.rows*0.8) ); for(int row = 0; row < templ.rows; row++) for (int col = 0; col < templ.cols; col++){ if(templ.at<Vec3b>(row, col)[0]==0 && templ.at<Vec3b>(row, col)[1]==0 && templ.at<Vec3b>(row, col)[2]==0) continue; else{ image.at<Vec3b>(row+rows, col+cols)[0]=templ.at<Vec3b>(row, col)[0]; image.at<Vec3b>(row+rows, col+cols)[1]=templ.at<Vec3b>(row, col)[1]; image.at<Vec3b>(row+rows, col+cols)[2]=templ.at<Vec3b>(row, col)[2]; //printf("%d",templ.at<Vec3b>(row, col)[0]); } } //sprintf(filename, "frame%03d_top%d",m,top+1); //imshow(filename, image); sprintf(filename, "top5/frame%03d_top%d.png",m,top+1); imwrite(filename,image); //depth alignment sprintf(filename, "model/template%03d.png",iter->second.id); templ = imread(filename,0); templ.convertTo(templ, CV_8UC1); resize( templ, templ, Size(templ.cols*temp_scale/img_scale,templ.rows*temp_scale/img_scale) ); Mat adjMap; depth.convertTo(adjMap, CV_8UC1, 255 / (max-min), -min); sprintf(filename, "1/depth%04d.png",m); imwrite(filename, adjMap); resize( adjMap, adjMap, Size(adjMap.cols*0.4,adjMap.rows*0.4) ); for(int row = 0; row < templ.rows; row++) for (int col = 0; col < templ.cols; col++){ if(templ.at<uchar>(row, col)==0) continue; else{ adjMap.at<uchar>(row+rows, col+cols)=templ.at<uchar>(row, col); //printf("%d",templ.at<Vec3b>(row, col)[0]); } } //sprintf(filename, "frame%03d_top%d",m,top+1); //imshow(filename, image); sprintf(filename, "top5/depth%03d_top%d.png",m,top+1); imwrite(filename,adjMap); iter++; } } // convert 2d position back to 3D position void convert2Dto3D( Mat depth_image, Mat intrinsics, map<float, Output, greater<float> > max_map, string object_name, cv::Vec3f& position, cv::Vec3f& angle){ Output output=max_map.begin()->second; //calculate the 2d object center int rows= (output.rows + 45/output.scale)*2.5; int cols= (output.cols + 45/output.scale)*2.5; float p_3d[3]; p_3d[2] =depth_image.at<float>(rows, cols)/1000; //cout<<"Depth reading at bowl is "<<p_3d[2]<<endl; p_3d[0]=(cols-intrinsics.at<float>(0,2))*(p_3d[2]/intrinsics.at<float>(0,0)); //cout<<"Intrinsics val "<<intrinsics.at<float>(1,0)<<endl; p_3d[1]=(rows-intrinsics.at<float>(1,2))*(p_3d[2]/intrinsics.at<float>(1,1)); position= Vec3f(p_3d[0], p_3d[1], p_3d[2]); //cout << position[m][2] << " " << output.scale << endl; //calculate the angles int t=( output.id/6 ) % 3; int j=( output.id )%6; angle = Vec3f( (t-1)*15, 10*j+20, 0); } // the controller of the program bool DetectObject(Mat color_image, Mat depth_image, Mat intrinsics, string training_file_path, string object_name, cv::Vec3f& position, cv::Vec3f& angle){ Mat image, img, templ, img_display; gpu::GpuMat d_image; double maxVal; Point maxLoc; vector<int> temp_num; dim3 threads(16, 8); size_t time = clock(); gpu::GpuMat d_log; read_yml(d_log, training_file_path+"pre/log_19.yml", "c"); gpu::GpuMat temp_level1; string filename=training_file_path+"pre/"+object_name+".yml"; cout<<filename<<endl; read_yml(temp_level1, filename, "temp_level1"); //read_yml(temp_level1, "pre/temp_level1.yml", "temp_level1"); int temp_w=72; int temp_h=72; vector<float> best_scale; cvtColor( color_image, image, CV_BGR2GRAY ); resize( image, image, Size(image.cols*0.4,image.rows*0.4) ); map<float, Output, greater<float> > max_map; // eight scales for(int n=0; n<8; n++){ //process the image float img_scale = 0.6 + 0.05*n; resize( image, img, Size(image.cols*img_scale,image.rows*img_scale) ); img.copyTo( img_display ); d_image.upload(img); int img_w = img.cols - d_log.cols - temp_w + 2; int img_h = img.rows - d_log.rows - temp_h + 2; //calculate the first level score gpu::GpuMat firstlevel_score; firstlevel_score.create(18, img_w*img_h, CV_32FC1); compute_response_map(d_image, temp_level1, d_log, temp_w, temp_h, firstlevel_score); // multiple hypotheses for(int v=0;v<9;v++){ // number of contours int interval=firstlevel_score.rows/9; int t=v*interval; gpu::GpuMat vec_t(Size(firstlevel_score.cols,interval), CV_32FC1, (void*)(firstlevel_score.ptr<float>(t)), firstlevel_score.step); gpu::minMaxLoc( vec_t, NULL, &maxVal, NULL, &maxLoc); Output output; //cout<< "max" << maxLoc.x << endl; output.rows= (maxLoc.x/ img_w) /img_scale; output.cols= (maxLoc.x % img_w) /img_scale; output.id = maxLoc.y+t; output.scale = img_scale; //if(maxVal>0.98) max_map[maxVal] = output; } } //set a threshold for object detection if((max_map.begin()->first) > 0.55){ //convert 2d position back to 3D position convert2Dto3D( depth_image, intrinsics, max_map, object_name, position, angle); printf("Runtime: %f ms\n", (double(clock() - time)/CLOCKS_PER_SEC*1000.0)); return 1; } else return 0; }
047af0f38417549058c620bd709b018ebb8aece6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cmath> #include <iostream> #include <cstdio> #include "nvtx_macros.h" #define CUDA_CALL( call ) \ { \ hipError_t err = call; \ if ( hipSuccess != err) \ fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,hipGetErrorString(err));\ } const float PI = 2.0f*std::asin(1.0f); __global__ void jacobi_iteration(float * __restrict__ const a_new, float const * __restrict__ const a, const int nx, const int ny, const float weight) { for(int iy = 1+blockIdx.y*blockDim.y+threadIdx.y; iy < (ny-1); iy += gridDim.y*blockDim.y) { for(int ix = 1+blockIdx.x*blockDim.x+threadIdx.x; ix < (nx-1); ix += gridDim.x*blockDim.x) { const float a_new_val = 0.25f* ( a[(iy+0)*nx+(ix+1)]+a[(iy+0)*nx+(ix-1)] + a[(iy+1)*nx+(ix+0)]+a[(iy-1)*nx+(ix+0)]); a_new[iy*nx+ix] = weight*a_new_val+(1.0f-weight)*a[iy*nx+ix]; } } } __global__ void apply_periodic_bc(float * __restrict__ const a, const int nx, const int ny) { for(int ix = blockIdx.x*blockDim.x+threadIdx.x; ix < nx; ix += gridDim.x*blockDim.x) { a[ 0*nx+ix]=a[(ny-2)*nx+ix]; a[(ny-1)*nx+ix]=a[ 1*nx+ix]; } } void init(float * __restrict__ const a, float * __restrict__ const a_new, const int nx, const int ny, float* __restrict__ const weights, const int n_weights) { memset(a, 0, nx*ny*sizeof(float)); memset(a_new, 0, nx*ny*sizeof(float)); // set boundary conditions for (int iy = 0; iy < ny; ++iy) { const float y0 = std::sin( 2.0f * PI * iy / (ny-1)); a [iy*nx+0] = y0; a [iy*nx+(nx-1)] = y0; a_new[iy*nx+0] = y0; a_new[iy*nx+(nx-1)] = y0; } for (int i = 0; i < n_weights; ++i) { weights[i] = 2.0f/3.0f; } } int main() { int nx = 512; int ny = 512; int n_weights = 16; const int iter_max = 1000; float * a; float * a_new; float * weights; float * d_a; float * d_a_new; float * d_weights; // TODO: Replace the calls to hipMallocManaged: allocate memory on host (malloc) and GPU // (hipMalloc). Remember to use different variable names. //CUDA_CALL(hipMallocManaged(&a, nx*ny*sizeof(float))); /* assert ( a = (float*) malloc (nx * ny * sizeof(float)) ); assert ( a_new = (float*) malloc (nx * ny * sizeof(float)) ); assert ( weights = (float*) malloc (n_weights * sizeof(float)) );*/ a = (float*) malloc (nx * ny * sizeof(float)) ; a_new = (float*) malloc (nx * ny * sizeof(float)) ; weights = (float*) malloc (n_weights * sizeof(float)) ; CUDA_CALL(hipMalloc((void **) &d_a, nx * ny * sizeof(float))); CUDA_CALL(hipMalloc((void **) &d_a_new, nx * ny * sizeof(float))); CUDA_CALL(hipMalloc((void **) &d_weights, n_weights * sizeof(float))); //CUDA_CALL(hipMalloc((void **) &a_new, nx*ny*sizeof(float))); //hipMalloc((void **) &n_weights, nx*ny*sizeof(float)); //CUDA_CALL(hipMallocManaged(&a_new, nx*ny*sizeof(float))); //CUDA_CALL(hipMallocManaged(&weights, n_weights*sizeof(float))); init(a,a_new,nx,ny,weights,n_weights); hipEvent_t start,stop; CUDA_CALL(hipEventCreate(&start)); CUDA_CALL(hipEventCreate(&stop)); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipEventRecord(start)); PUSH_RANGE("while loop",0) int iter = 0; const float weight = weights[0]; // TODO: Transfer data from host to device. hipMemcpy(d_a, a, nx * ny * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_a_new, a_new, nx * ny * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_weights, weights, n_weights * sizeof(float), hipMemcpyHostToDevice); while ( iter <= iter_max ) { PUSH_RANGE("jacobi step",1) // TODO: Call the kernel with the right pointers. hipLaunchKernelGGL(( jacobi_iteration), dim3(dim3(nx/32,ny/4)),dim3(dim3(32,4)), 0, 0, d_a_new,d_a,nx,ny,weight); CUDA_CALL(hipGetLastError()); #ifndef NO_SYNC CUDA_CALL(hipDeviceSynchronize()); #endif POP_RANGE // TODO: Check what std::swap does. Can you use it with GPU pointers? std::swap(a,a_new); PUSH_RANGE("periodic boundary conditions",2) //Apply periodic boundary conditions // TODO: Call the kernel with the right pointers. hipLaunchKernelGGL(( apply_periodic_bc), dim3(dim3(nx/128)),dim3(dim3(128)), 0, 0, d_a,nx,ny); CUDA_CALL(hipGetLastError()); #ifndef NO_SYNC CUDA_CALL(hipDeviceSynchronize()); #endif POP_RANGE if ( 0 == iter%100 ) { #ifdef NO_SYNC CUDA_CALL(hipDeviceSynchronize()); #endif std::cout<<iter<<std::endl; } iter++; } CUDA_CALL(hipEventRecord(stop)); CUDA_CALL(hipDeviceSynchronize()); POP_RANGE float runtime = 0.0f; CUDA_CALL(hipEventElapsedTime(&runtime,start,stop)); std::cout<<"Runtime "<<runtime/1000.0f<<" seconds."<<std::endl; hipEventDestroy(stop); hipEventDestroy(start); // TODO: Use free and hipFree for the appropriate pointers. free(a); free(a_new); free(weights); hipFree(d_weights); hipFree(d_a_new); hipFree(d_a); hipDeviceReset(); return 0; }
047af0f38417549058c620bd709b018ebb8aece6.cu
/* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cmath> #include <iostream> #include <cstdio> #include "nvtx_macros.h" #define CUDA_CALL( call ) \ { \ cudaError_t err = call; \ if ( cudaSuccess != err) \ fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,cudaGetErrorString(err));\ } const float PI = 2.0f*std::asin(1.0f); __global__ void jacobi_iteration(float * __restrict__ const a_new, float const * __restrict__ const a, const int nx, const int ny, const float weight) { for(int iy = 1+blockIdx.y*blockDim.y+threadIdx.y; iy < (ny-1); iy += gridDim.y*blockDim.y) { for(int ix = 1+blockIdx.x*blockDim.x+threadIdx.x; ix < (nx-1); ix += gridDim.x*blockDim.x) { const float a_new_val = 0.25f* ( a[(iy+0)*nx+(ix+1)]+a[(iy+0)*nx+(ix-1)] + a[(iy+1)*nx+(ix+0)]+a[(iy-1)*nx+(ix+0)]); a_new[iy*nx+ix] = weight*a_new_val+(1.0f-weight)*a[iy*nx+ix]; } } } __global__ void apply_periodic_bc(float * __restrict__ const a, const int nx, const int ny) { for(int ix = blockIdx.x*blockDim.x+threadIdx.x; ix < nx; ix += gridDim.x*blockDim.x) { a[ 0*nx+ix]=a[(ny-2)*nx+ix]; a[(ny-1)*nx+ix]=a[ 1*nx+ix]; } } void init(float * __restrict__ const a, float * __restrict__ const a_new, const int nx, const int ny, float* __restrict__ const weights, const int n_weights) { memset(a, 0, nx*ny*sizeof(float)); memset(a_new, 0, nx*ny*sizeof(float)); // set boundary conditions for (int iy = 0; iy < ny; ++iy) { const float y0 = std::sin( 2.0f * PI * iy / (ny-1)); a [iy*nx+0] = y0; a [iy*nx+(nx-1)] = y0; a_new[iy*nx+0] = y0; a_new[iy*nx+(nx-1)] = y0; } for (int i = 0; i < n_weights; ++i) { weights[i] = 2.0f/3.0f; } } int main() { int nx = 512; int ny = 512; int n_weights = 16; const int iter_max = 1000; float * a; float * a_new; float * weights; float * d_a; float * d_a_new; float * d_weights; // TODO: Replace the calls to cudaMallocManaged: allocate memory on host (malloc) and GPU // (cudaMalloc). Remember to use different variable names. //CUDA_CALL(cudaMallocManaged(&a, nx*ny*sizeof(float))); /* assert ( a = (float*) malloc (nx * ny * sizeof(float)) ); assert ( a_new = (float*) malloc (nx * ny * sizeof(float)) ); assert ( weights = (float*) malloc (n_weights * sizeof(float)) );*/ a = (float*) malloc (nx * ny * sizeof(float)) ; a_new = (float*) malloc (nx * ny * sizeof(float)) ; weights = (float*) malloc (n_weights * sizeof(float)) ; CUDA_CALL(cudaMalloc((void **) &d_a, nx * ny * sizeof(float))); CUDA_CALL(cudaMalloc((void **) &d_a_new, nx * ny * sizeof(float))); CUDA_CALL(cudaMalloc((void **) &d_weights, n_weights * sizeof(float))); //CUDA_CALL(cudaMalloc((void **) &a_new, nx*ny*sizeof(float))); //cudaMalloc((void **) &n_weights, nx*ny*sizeof(float)); //CUDA_CALL(cudaMallocManaged(&a_new, nx*ny*sizeof(float))); //CUDA_CALL(cudaMallocManaged(&weights, n_weights*sizeof(float))); init(a,a_new,nx,ny,weights,n_weights); cudaEvent_t start,stop; CUDA_CALL(cudaEventCreate(&start)); CUDA_CALL(cudaEventCreate(&stop)); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaEventRecord(start)); PUSH_RANGE("while loop",0) int iter = 0; const float weight = weights[0]; // TODO: Transfer data from host to device. cudaMemcpy(d_a, a, nx * ny * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_a_new, a_new, nx * ny * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_weights, weights, n_weights * sizeof(float), cudaMemcpyHostToDevice); while ( iter <= iter_max ) { PUSH_RANGE("jacobi step",1) // TODO: Call the kernel with the right pointers. jacobi_iteration<<<dim3(nx/32,ny/4),dim3(32,4)>>>(d_a_new,d_a,nx,ny,weight); CUDA_CALL(cudaGetLastError()); #ifndef NO_SYNC CUDA_CALL(cudaDeviceSynchronize()); #endif POP_RANGE // TODO: Check what std::swap does. Can you use it with GPU pointers? std::swap(a,a_new); PUSH_RANGE("periodic boundary conditions",2) //Apply periodic boundary conditions // TODO: Call the kernel with the right pointers. apply_periodic_bc<<<dim3(nx/128),dim3(128)>>>(d_a,nx,ny); CUDA_CALL(cudaGetLastError()); #ifndef NO_SYNC CUDA_CALL(cudaDeviceSynchronize()); #endif POP_RANGE if ( 0 == iter%100 ) { #ifdef NO_SYNC CUDA_CALL(cudaDeviceSynchronize()); #endif std::cout<<iter<<std::endl; } iter++; } CUDA_CALL(cudaEventRecord(stop)); CUDA_CALL(cudaDeviceSynchronize()); POP_RANGE float runtime = 0.0f; CUDA_CALL(cudaEventElapsedTime(&runtime,start,stop)); std::cout<<"Runtime "<<runtime/1000.0f<<" seconds."<<std::endl; cudaEventDestroy(stop); cudaEventDestroy(start); // TODO: Use free and cudaFree for the appropriate pointers. free(a); free(a_new); free(weights); cudaFree(d_weights); cudaFree(d_a_new); cudaFree(d_a); cudaDeviceReset(); return 0; }
8b457bab1ee79118cd4f0e5656ebbb2b7c8953ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ inline T get_pixel_val( const T* tensor, const int idx, const int H, const int W, const int y, const int x, const int V, const int U, const int v, const int u, const T pad_val) { if ((y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) || (u < 0) || (u >= U)) { return pad_val; } else { return tensor[(((idx * V + v) * U + u) * H + y) * W + x]; } } template <typename T> __device__ inline void add_pixel_val( T* tensor, const T val, const int idx, const int H, const int W, const int y, const int x, const int V, const int U, const int v, const int u) { if ((val == 0.) || (y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) || (u < 0) || (u >= U)) { return; } else { atomicAdd(tensor + ((((idx * V + v) * U + u) * H + y) * W + x), val); } } template <typename T> __global__ void SwapAlign2NatForwardFeat( const int nthreads, const T* bottom_data, const int Vout, const int Uout, const float hVout, const float hUout, const int Vin, const int Uin, const float lambda, const int Hin, const int Win, const int Hout, const int Wout, const T pad_val, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int idx = index; const int x = idx % Wout; idx /= Wout; const int y = idx % Hout; idx /= Hout; const int u = idx % Uout; idx /= Uout; const int v = idx % Vout; idx /= Vout; const float ox = x * lambda + u - hUout + 0.5; const int xf = static_cast<int>(floor(ox)); const int xc = static_cast<int>(ceil(ox)); const float xwc = ox - xf; const float xwf = 1. - xwc; const float oy = y * lambda + v - hVout + 0.5; const int yf = static_cast<int>(floor(oy)); const int yc = static_cast<int>(ceil(oy)); const float ywc = oy - yf; const float ywf = 1. - ywc; const float ou = (u + 0.5) / lambda - 0.5; const int uf = static_cast<int>(floor(ou)); const int uc = static_cast<int>(ceil(ou)); const float uwc = ou - uf; const float uwf = 1. - uwc; const float ov = (v + 0.5) / lambda - 0.5; const int vf = static_cast<int>(floor(ov)); const int vc = static_cast<int>(ceil(ov)); const float vwc = ov - vf; const float vwf = 1. - vwc; T val = ywf * xwf * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf, pad_val) + ywf * xwf * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc, pad_val) + ywf * xwf * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf, pad_val) + ywf * xwf * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc, pad_val) + ywf * xwc * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf, pad_val) + ywf * xwc * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc, pad_val) + ywf * xwc * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf, pad_val) + ywf * xwc * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc, pad_val) + ywc * xwf * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf, pad_val) + ywc * xwf * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc, pad_val) + ywc * xwf * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf, pad_val) + ywc * xwf * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc, pad_val) + ywc * xwc * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf, pad_val) + ywc * xwc * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc, pad_val) + ywc * xwc * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf, pad_val) + ywc * xwc * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc, pad_val); top_data[index] = val; } } template <typename T> __global__ void SwapAlign2NatBackwardFeat( const int nthreads, const T* top_diff, const int Vout, const int Uout, const float hVout, const float hUout, const int Vin, const int Uin, const float lambda, const int Hin, const int Win, const int Hout, const int Wout, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int idx = index; const int x = idx % Wout; idx /= Wout; const int y = idx % Hout; idx /= Hout; const int u = idx % Uout; idx /= Uout; const int v = idx % Vout; idx /= Vout; const float ox = x * lambda + u - hUout + 0.5; const int xf = static_cast<int>(floor(ox)); const int xc = static_cast<int>(ceil(ox)); const float xwc = ox - xf; const float xwf = 1. - xwc; const float oy = y * lambda + v - hVout + 0.5; const int yf = static_cast<int>(floor(oy)); const int yc = static_cast<int>(ceil(oy)); const float ywc = oy - yf; const float ywf = 1. - ywc; const float ou = (u + 0.5) / lambda - 0.5; const int uf = static_cast<int>(floor(ou)); const int uc = static_cast<int>(ceil(ou)); const float uwc = ou - uf; const float uwf = 1. - uwc; const float ov = (v + 0.5) / lambda - 0.5; const int vf = static_cast<int>(floor(ov)); const int vc = static_cast<int>(ceil(ov)); const float vwc = ov - vf; const float vwf = 1. - vwc; const T grad = top_diff[index]; add_pixel_val( bottom_diff, ywf * xwf * vwf * uwf * grad, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywf * xwf * vwf * uwc * grad, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywf * xwf * vwc * uwf * grad, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywf * xwf * vwc * uwc * grad, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc); add_pixel_val( bottom_diff, ywf * xwc * vwf * uwf * grad, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywf * xwc * vwf * uwc * grad, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywf * xwc * vwc * uwf * grad, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywf * xwc * vwc * uwc * grad, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc); add_pixel_val( bottom_diff, ywc * xwf * vwf * uwf * grad, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywc * xwf * vwf * uwc * grad, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywc * xwf * vwc * uwf * grad, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywc * xwf * vwc * uwc * grad, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc); add_pixel_val( bottom_diff, ywc * xwc * vwf * uwf * grad, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywc * xwc * vwf * uwc * grad, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywc * xwc * vwc * uwf * grad, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywc * xwc * vwc * uwc * grad, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc); } } namespace cvpods { at::Tensor SwapAlign2Nat_forward_cuda( const at::Tensor& X, const int lambda_val, const float pad_val) { AT_ASSERTM(X.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(X.ndimension() == 4, "input must be a 4D tensor"); AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); const int N = X.size(0); const int C = X.size(1); const int Vin = static_cast<int>(sqrt(static_cast<float>(C))); const int Uin = C / Vin; AT_ASSERTM( C == Vin * Uin && Vin == Uin, "#channels should be a square number"); const int Vout = lambda_val * Vin; const int Uout = lambda_val * Uin; const int Hin = X.size(2); const int Win = X.size(3); const float lambda = static_cast<float>(lambda_val); const int Hout = static_cast<int>(ceil(Hin / lambda)); const int Wout = static_cast<int>(ceil(Win / lambda)); const float hVout = Vout / 2.; const float hUout = Uout / 2.; at::hip::HIPGuardMasqueradingAsCUDA device_guard(X.device()); at::Tensor Y = at::empty({N, Vout * Uout, Hout, Wout}, X.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(at::cuda::ATenCeilDiv(Y.numel(), 512L), 4096L)); dim3 block(512); if (Y.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return Y; } AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "SwapAlign2Nat_forward", [&] { hipLaunchKernelGGL(( SwapAlign2NatForwardFeat<scalar_t>), dim3(grid), dim3(block), 0, stream, Y.numel(), X.contiguous().data_ptr<scalar_t>(), Vout, Uout, hVout, hUout, Vin, Uin, lambda, Hin, Win, Hout, Wout, pad_val, Y.data_ptr<scalar_t>()); }); hipDeviceSynchronize(); AT_CUDA_CHECK(hipGetLastError()); return Y; } at::Tensor SwapAlign2Nat_backward_cuda( const at::Tensor& gY, const int lambda_val, const int batch_size, const int channel, const int height, const int width) { AT_ASSERTM(gY.device().is_cuda(), "input gradient must be a CUDA tensor"); AT_ASSERTM(gY.ndimension() == 4, "input gradient must be a 4D tensor"); AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); const int Vin = static_cast<int>(sqrt(static_cast<float>(channel))); const int Uin = channel / Vin; const int Vout = lambda_val * Vin; const int Uout = lambda_val * Uin; const float hVout = Vout / 2.; const float hUout = Uout / 2.; const int Hout = gY.size(2); const int Wout = gY.size(3); at::hip::HIPGuardMasqueradingAsCUDA device_guard(gY.device()); at::Tensor gX = at::zeros({batch_size, channel, height, width}, gY.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(at::cuda::ATenCeilDiv(gY.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (gY.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return gX; } AT_DISPATCH_FLOATING_TYPES(gY.scalar_type(), "SwapAlign2Nat_backward", [&] { hipLaunchKernelGGL(( SwapAlign2NatBackwardFeat<scalar_t>), dim3(grid), dim3(block), 0, stream, gY.numel(), gY.contiguous().data_ptr<scalar_t>(), Vout, Uout, hVout, hUout, Vin, Uin, static_cast<float>(lambda_val), height, width, Hout, Wout, gX.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(hipGetLastError()); return gX; } } // namespace cvpods
8b457bab1ee79118cd4f0e5656ebbb2b7c8953ab.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ inline T get_pixel_val( const T* tensor, const int idx, const int H, const int W, const int y, const int x, const int V, const int U, const int v, const int u, const T pad_val) { if ((y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) || (u < 0) || (u >= U)) { return pad_val; } else { return tensor[(((idx * V + v) * U + u) * H + y) * W + x]; } } template <typename T> __device__ inline void add_pixel_val( T* tensor, const T val, const int idx, const int H, const int W, const int y, const int x, const int V, const int U, const int v, const int u) { if ((val == 0.) || (y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) || (u < 0) || (u >= U)) { return; } else { atomicAdd(tensor + ((((idx * V + v) * U + u) * H + y) * W + x), val); } } template <typename T> __global__ void SwapAlign2NatForwardFeat( const int nthreads, const T* bottom_data, const int Vout, const int Uout, const float hVout, const float hUout, const int Vin, const int Uin, const float lambda, const int Hin, const int Win, const int Hout, const int Wout, const T pad_val, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int idx = index; const int x = idx % Wout; idx /= Wout; const int y = idx % Hout; idx /= Hout; const int u = idx % Uout; idx /= Uout; const int v = idx % Vout; idx /= Vout; const float ox = x * lambda + u - hUout + 0.5; const int xf = static_cast<int>(floor(ox)); const int xc = static_cast<int>(ceil(ox)); const float xwc = ox - xf; const float xwf = 1. - xwc; const float oy = y * lambda + v - hVout + 0.5; const int yf = static_cast<int>(floor(oy)); const int yc = static_cast<int>(ceil(oy)); const float ywc = oy - yf; const float ywf = 1. - ywc; const float ou = (u + 0.5) / lambda - 0.5; const int uf = static_cast<int>(floor(ou)); const int uc = static_cast<int>(ceil(ou)); const float uwc = ou - uf; const float uwf = 1. - uwc; const float ov = (v + 0.5) / lambda - 0.5; const int vf = static_cast<int>(floor(ov)); const int vc = static_cast<int>(ceil(ov)); const float vwc = ov - vf; const float vwf = 1. - vwc; T val = ywf * xwf * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf, pad_val) + ywf * xwf * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc, pad_val) + ywf * xwf * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf, pad_val) + ywf * xwf * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc, pad_val) + ywf * xwc * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf, pad_val) + ywf * xwc * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc, pad_val) + ywf * xwc * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf, pad_val) + ywf * xwc * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc, pad_val) + ywc * xwf * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf, pad_val) + ywc * xwf * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc, pad_val) + ywc * xwf * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf, pad_val) + ywc * xwf * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc, pad_val) + ywc * xwc * vwf * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf, pad_val) + ywc * xwc * vwf * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc, pad_val) + ywc * xwc * vwc * uwf * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf, pad_val) + ywc * xwc * vwc * uwc * get_pixel_val( bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc, pad_val); top_data[index] = val; } } template <typename T> __global__ void SwapAlign2NatBackwardFeat( const int nthreads, const T* top_diff, const int Vout, const int Uout, const float hVout, const float hUout, const int Vin, const int Uin, const float lambda, const int Hin, const int Win, const int Hout, const int Wout, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int idx = index; const int x = idx % Wout; idx /= Wout; const int y = idx % Hout; idx /= Hout; const int u = idx % Uout; idx /= Uout; const int v = idx % Vout; idx /= Vout; const float ox = x * lambda + u - hUout + 0.5; const int xf = static_cast<int>(floor(ox)); const int xc = static_cast<int>(ceil(ox)); const float xwc = ox - xf; const float xwf = 1. - xwc; const float oy = y * lambda + v - hVout + 0.5; const int yf = static_cast<int>(floor(oy)); const int yc = static_cast<int>(ceil(oy)); const float ywc = oy - yf; const float ywf = 1. - ywc; const float ou = (u + 0.5) / lambda - 0.5; const int uf = static_cast<int>(floor(ou)); const int uc = static_cast<int>(ceil(ou)); const float uwc = ou - uf; const float uwf = 1. - uwc; const float ov = (v + 0.5) / lambda - 0.5; const int vf = static_cast<int>(floor(ov)); const int vc = static_cast<int>(ceil(ov)); const float vwc = ov - vf; const float vwf = 1. - vwc; const T grad = top_diff[index]; add_pixel_val( bottom_diff, ywf * xwf * vwf * uwf * grad, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywf * xwf * vwf * uwc * grad, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywf * xwf * vwc * uwf * grad, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywf * xwf * vwc * uwc * grad, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc); add_pixel_val( bottom_diff, ywf * xwc * vwf * uwf * grad, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywf * xwc * vwf * uwc * grad, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywf * xwc * vwc * uwf * grad, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywf * xwc * vwc * uwc * grad, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc); add_pixel_val( bottom_diff, ywc * xwf * vwf * uwf * grad, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywc * xwf * vwf * uwc * grad, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywc * xwf * vwc * uwf * grad, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywc * xwf * vwc * uwc * grad, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc); add_pixel_val( bottom_diff, ywc * xwc * vwf * uwf * grad, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf); add_pixel_val( bottom_diff, ywc * xwc * vwf * uwc * grad, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc); add_pixel_val( bottom_diff, ywc * xwc * vwc * uwf * grad, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf); add_pixel_val( bottom_diff, ywc * xwc * vwc * uwc * grad, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc); } } namespace cvpods { at::Tensor SwapAlign2Nat_forward_cuda( const at::Tensor& X, const int lambda_val, const float pad_val) { AT_ASSERTM(X.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(X.ndimension() == 4, "input must be a 4D tensor"); AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); const int N = X.size(0); const int C = X.size(1); const int Vin = static_cast<int>(sqrt(static_cast<float>(C))); const int Uin = C / Vin; AT_ASSERTM( C == Vin * Uin && Vin == Uin, "#channels should be a square number"); const int Vout = lambda_val * Vin; const int Uout = lambda_val * Uin; const int Hin = X.size(2); const int Win = X.size(3); const float lambda = static_cast<float>(lambda_val); const int Hout = static_cast<int>(ceil(Hin / lambda)); const int Wout = static_cast<int>(ceil(Win / lambda)); const float hVout = Vout / 2.; const float hUout = Uout / 2.; at::cuda::CUDAGuard device_guard(X.device()); at::Tensor Y = at::empty({N, Vout * Uout, Hout, Wout}, X.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(at::cuda::ATenCeilDiv(Y.numel(), 512L), 4096L)); dim3 block(512); if (Y.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return Y; } AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "SwapAlign2Nat_forward", [&] { SwapAlign2NatForwardFeat<scalar_t><<<grid, block, 0, stream>>>( Y.numel(), X.contiguous().data_ptr<scalar_t>(), Vout, Uout, hVout, hUout, Vin, Uin, lambda, Hin, Win, Hout, Wout, pad_val, Y.data_ptr<scalar_t>()); }); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); return Y; } at::Tensor SwapAlign2Nat_backward_cuda( const at::Tensor& gY, const int lambda_val, const int batch_size, const int channel, const int height, const int width) { AT_ASSERTM(gY.device().is_cuda(), "input gradient must be a CUDA tensor"); AT_ASSERTM(gY.ndimension() == 4, "input gradient must be a 4D tensor"); AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); const int Vin = static_cast<int>(sqrt(static_cast<float>(channel))); const int Uin = channel / Vin; const int Vout = lambda_val * Vin; const int Uout = lambda_val * Uin; const float hVout = Vout / 2.; const float hUout = Uout / 2.; const int Hout = gY.size(2); const int Wout = gY.size(3); at::cuda::CUDAGuard device_guard(gY.device()); at::Tensor gX = at::zeros({batch_size, channel, height, width}, gY.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(at::cuda::ATenCeilDiv(gY.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (gY.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return gX; } AT_DISPATCH_FLOATING_TYPES(gY.scalar_type(), "SwapAlign2Nat_backward", [&] { SwapAlign2NatBackwardFeat<scalar_t><<<grid, block, 0, stream>>>( gY.numel(), gY.contiguous().data_ptr<scalar_t>(), Vout, Uout, hVout, hUout, Vin, Uin, static_cast<float>(lambda_val), height, width, Hout, Wout, gX.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return gX; } } // namespace cvpods
3af048974d9fceb5e56170ab948c901d7fd874fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * cudinv.cu * * Created on: February 12, 2015 * Author: Denis Tananaev */ #include "CTensor.h" #include "timer.h" #include <string> typedef uchar4 Color; /* * CTensor stores the three colors separately in a 3D array * We want to store the image in a 2D matrix where each element holds the RGB value */ void CTensorToColorCMatrix(CMatrix<Color>& out, const CTensor<unsigned char>& in) { out.setSize(in.xSize(), in.ySize()); for( int y = 0; y < out.ySize(); ++y ) for( int x = 0; x < out.xSize(); ++x ) { out(x,y).x = in(x,y,0); // R out(x,y).y = in(x,y,1); // G out(x,y).z = in(x,y,2); // B } } /* * The inverse function to CTensorToColorMatrix() */ void ColorCMatrixToCTensor(CTensor<unsigned char>& out, const CMatrix<Color>& in) { out.setSize(in.xSize(), in.ySize(), 3); for( int y = 0; y < out.ySize(); ++y ) for( int x = 0; x < out.xSize(); ++x ) { out(x,y,0) = in(x,y).x; // R out(x,y,1) = in(x,y).y; // G out(x,y,2) = in(x,y).z; // B } } /* * Inverts a RGB color image * * img The input and output image * x_size image width in px * y_size image height in px * pitch row size in bytes */ __global__ void invert_kernel( Color* img, int x_size, int y_size, int pitch ) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if( x >= x_size || y >= y_size ) return; Color* ptr = (Color*)((char*)img + y * pitch) + x; Color color = *ptr; color.x = 255-color.x; color.y = 255-color.y; color.z = 255-color.z; *ptr = color; } int main( int argc, char** argv ) { std::string fileNameInput; if (argc==2){ fileNameInput=argv[1]; }else{ std::cout<<"!!!WRONG INPUT!!!"<<"\n"; std::cout<<"Usage: cuinv inputfile "<<"\n"; std::cout<<"The command should contain input file name."<<"\n"; return 0; } // Read the image from disk R G B CTensor<unsigned char> tmp; tmp.readFromPPM((fileNameInput+".ppm").c_str()); // Store the image in an appropriate format CMatrix<Color> img; CTensorToColorCMatrix(img, tmp); // Copy image to the device Color* d_img; hipMalloc((void**)&d_img, sizeof(Color)*img.size()); hipMemcpy((void*)d_img, (void*)img.data(), sizeof(Color)*img.size(), hipMemcpyHostToDevice); // Setup kernel launch dim3 block(16,16,1); dim3 grid; grid.x = ::ceil( img.xSize()/(float)block.x ); grid.y = ::ceil( img.ySize()/(float)block.y ); timer::start("uncoalesced global memory access"); hipLaunchKernelGGL(( invert_kernel), dim3(grid),dim3(block), 0, 0, d_img, img.xSize(), img.ySize(), sizeof(Color)*img.xSize()); timer::stop("uncoalesced global memory access"); // Copy result back hipMemcpy((void*)img.data(), (void*)d_img, sizeof(Color)*img.size(), hipMemcpyDeviceToHost); // Write inverted image to the disk ColorCMatrixToCTensor(tmp, img); tmp.writeToPPM((fileNameInput+"_cuinv.ppm").c_str()); Color* d_img_aligned; size_t pitch; /* * * Allocate memory here in d_img_aligned using hipMallocPitch() * */ hipMallocPitch((void**)&d_img_aligned, &pitch, sizeof(Color)*img.xSize(),img.ySize()); std::cout << "Image row size " << sizeof(Color)*img.xSize() << std::endl; std::cout << "Pitch " << pitch << std::endl; /* * * Copy the image in 'img' to the device with hipMemcpy2D() * Make sure you use the right pitch for 'img' and 'd_img_aligned' * */ hipMemcpy2D((void*)d_img_aligned, pitch, (void*)img.data(), sizeof(Color)*img.xSize(),sizeof(Color)*img.xSize(),img.ySize() , hipMemcpyHostToDevice); //timer timer::start("coalesced global memory access"); hipLaunchKernelGGL(( invert_kernel), dim3(grid),dim3(block), 0, 0, d_img_aligned, img.xSize(), img.ySize(), pitch); timer::stop("coalesced global memory access"); /* Copy the image back to the host with hipMemcpy2D() */ hipMemcpy2D((void*)img.data(),sizeof(Color)*img.xSize(), (void*)d_img_aligned,pitch,sizeof(Color)*img.xSize(),img.ySize(), hipMemcpyDeviceToHost); // Write the inverted inverted image to the disk ColorCMatrixToCTensor(tmp, img); tmp.writeToPPM((fileNameInput+"_cuinv_restored.ppm").c_str()); timer::printToScreen(); hipFree((void*)d_img); hipFree((void*)d_img_aligned); return 0; }
3af048974d9fceb5e56170ab948c901d7fd874fa.cu
/* * cudinv.cu * * Created on: February 12, 2015 * Author: Denis Tananaev */ #include "CTensor.h" #include "timer.h" #include <string> typedef uchar4 Color; /* * CTensor stores the three colors separately in a 3D array * We want to store the image in a 2D matrix where each element holds the RGB value */ void CTensorToColorCMatrix(CMatrix<Color>& out, const CTensor<unsigned char>& in) { out.setSize(in.xSize(), in.ySize()); for( int y = 0; y < out.ySize(); ++y ) for( int x = 0; x < out.xSize(); ++x ) { out(x,y).x = in(x,y,0); // R out(x,y).y = in(x,y,1); // G out(x,y).z = in(x,y,2); // B } } /* * The inverse function to CTensorToColorMatrix() */ void ColorCMatrixToCTensor(CTensor<unsigned char>& out, const CMatrix<Color>& in) { out.setSize(in.xSize(), in.ySize(), 3); for( int y = 0; y < out.ySize(); ++y ) for( int x = 0; x < out.xSize(); ++x ) { out(x,y,0) = in(x,y).x; // R out(x,y,1) = in(x,y).y; // G out(x,y,2) = in(x,y).z; // B } } /* * Inverts a RGB color image * * img The input and output image * x_size image width in px * y_size image height in px * pitch row size in bytes */ __global__ void invert_kernel( Color* img, int x_size, int y_size, int pitch ) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if( x >= x_size || y >= y_size ) return; Color* ptr = (Color*)((char*)img + y * pitch) + x; Color color = *ptr; color.x = 255-color.x; color.y = 255-color.y; color.z = 255-color.z; *ptr = color; } int main( int argc, char** argv ) { std::string fileNameInput; if (argc==2){ fileNameInput=argv[1]; }else{ std::cout<<"!!!WRONG INPUT!!!"<<"\n"; std::cout<<"Usage: cuinv inputfile "<<"\n"; std::cout<<"The command should contain input file name."<<"\n"; return 0; } // Read the image from disk R G B CTensor<unsigned char> tmp; tmp.readFromPPM((fileNameInput+".ppm").c_str()); // Store the image in an appropriate format CMatrix<Color> img; CTensorToColorCMatrix(img, tmp); // Copy image to the device Color* d_img; cudaMalloc((void**)&d_img, sizeof(Color)*img.size()); cudaMemcpy((void*)d_img, (void*)img.data(), sizeof(Color)*img.size(), cudaMemcpyHostToDevice); // Setup kernel launch dim3 block(16,16,1); dim3 grid; grid.x = std::ceil( img.xSize()/(float)block.x ); grid.y = std::ceil( img.ySize()/(float)block.y ); timer::start("uncoalesced global memory access"); invert_kernel<<<grid,block>>>(d_img, img.xSize(), img.ySize(), sizeof(Color)*img.xSize()); timer::stop("uncoalesced global memory access"); // Copy result back cudaMemcpy((void*)img.data(), (void*)d_img, sizeof(Color)*img.size(), cudaMemcpyDeviceToHost); // Write inverted image to the disk ColorCMatrixToCTensor(tmp, img); tmp.writeToPPM((fileNameInput+"_cuinv.ppm").c_str()); Color* d_img_aligned; size_t pitch; /* * * Allocate memory here in d_img_aligned using cudaMallocPitch() * */ cudaMallocPitch((void**)&d_img_aligned, &pitch, sizeof(Color)*img.xSize(),img.ySize()); std::cout << "Image row size " << sizeof(Color)*img.xSize() << std::endl; std::cout << "Pitch " << pitch << std::endl; /* * * Copy the image in 'img' to the device with cudaMemcpy2D() * Make sure you use the right pitch for 'img' and 'd_img_aligned' * */ cudaMemcpy2D((void*)d_img_aligned, pitch, (void*)img.data(), sizeof(Color)*img.xSize(),sizeof(Color)*img.xSize(),img.ySize() , cudaMemcpyHostToDevice); //timer timer::start("coalesced global memory access"); invert_kernel<<<grid,block>>>(d_img_aligned, img.xSize(), img.ySize(), pitch); timer::stop("coalesced global memory access"); /* Copy the image back to the host with cudaMemcpy2D() */ cudaMemcpy2D((void*)img.data(),sizeof(Color)*img.xSize(), (void*)d_img_aligned,pitch,sizeof(Color)*img.xSize(),img.ySize(), cudaMemcpyDeviceToHost); // Write the inverted inverted image to the disk ColorCMatrixToCTensor(tmp, img); tmp.writeToPPM((fileNameInput+"_cuinv_restored.ppm").c_str()); timer::printToScreen(); cudaFree((void*)d_img); cudaFree((void*)d_img_aligned); return 0; }