hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
368c715dbd285e60ffada671249d733adc6ca7ff.hip | // !!! This is a file automatically generated by hipify!!!
/**
MIT License
Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSize_H, const uint gridSize_W, const uint numOutputClasses,
const uint numBBoxes)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= gridSize_W) || (y_id >= gridSize_H) || (z_id >= numBBoxes))
{
return;
}
const int numGridCells = gridSize_H * gridSize_W;
const int bbindex = y_id * gridSize_W + x_id;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
for (uint i = 0; i < numOutputClasses; ++i)
{
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
}
}
hipError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSize_H, const uint& gridSize_W,
const uint& numOutputClasses, const uint& numBBoxes,
uint64_t outputSize, hipStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSize_W / threads_per_block.x) + 1,
(gridSize_H / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (int batch = 0; batch < batchSize; ++batch)
{
hipLaunchKernelGGL(( gpuYoloLayerV3), dim3(number_of_blocks), dim3(threads_per_block), 0, stream,
reinterpret_cast<const float*>(input) + (batch * outputSize),
reinterpret_cast<float*>(output) + (batch * outputSize), gridSize_H, gridSize_W, numOutputClasses,
numBBoxes);
}
return hipGetLastError();
} | 368c715dbd285e60ffada671249d733adc6ca7ff.cu | /**
MIT License
Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSize_H, const uint gridSize_W, const uint numOutputClasses,
const uint numBBoxes)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= gridSize_W) || (y_id >= gridSize_H) || (z_id >= numBBoxes))
{
return;
}
const int numGridCells = gridSize_H * gridSize_W;
const int bbindex = y_id * gridSize_W + x_id;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
for (uint i = 0; i < numOutputClasses; ++i)
{
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
}
}
cudaError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSize_H, const uint& gridSize_W,
const uint& numOutputClasses, const uint& numBBoxes,
uint64_t outputSize, cudaStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSize_W / threads_per_block.x) + 1,
(gridSize_H / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (int batch = 0; batch < batchSize; ++batch)
{
gpuYoloLayerV3<<<number_of_blocks, threads_per_block, 0, stream>>>(
reinterpret_cast<const float*>(input) + (batch * outputSize),
reinterpret_cast<float*>(output) + (batch * outputSize), gridSize_H, gridSize_W, numOutputClasses,
numBBoxes);
}
return cudaGetLastError();
} |
93e93d2b8a0fc47a6cc371e44267e989435b39af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "magma_templates.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_zgemm_reduce,
// because it depends on the CUDA architecture at runtime.
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void zgemm_reduce_kernel(
int m, int n, int k,
magmaDoubleComplex alpha,
const magmaDoubleComplex* __restrict__ dA, int lda,
const magmaDoubleComplex* __restrict__ dB, int ldb,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ magmaDoubleComplex sum[BLK_K][BLK_M+1][BLK_N+1];
magmaDoubleComplex lsum;
/* w := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_Z_CNJG( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_Z_EQUAL(beta, MAGMA_Z_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
//==============================================================================
/**
Purpose
-------
ZGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_zblas3
********************************************************************/
extern "C" void
magmablas_zgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dB, magma_int_t lddb,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( zgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( zgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
//==============================================================================
| 93e93d2b8a0fc47a6cc371e44267e989435b39af.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "magma_templates.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_zgemm_reduce,
// because it depends on the CUDA architecture at runtime.
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void zgemm_reduce_kernel(
int m, int n, int k,
magmaDoubleComplex alpha,
const magmaDoubleComplex* __restrict__ dA, int lda,
const magmaDoubleComplex* __restrict__ dB, int ldb,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ magmaDoubleComplex sum[BLK_K][BLK_M+1][BLK_N+1];
magmaDoubleComplex lsum;
/* w := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_Z_CNJG( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_Z_EQUAL(beta, MAGMA_Z_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
//==============================================================================
/**
Purpose
-------
ZGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_zblas3
********************************************************************/
extern "C" void
magmablas_zgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dB, magma_int_t lddb,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
zgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
zgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
//==============================================================================
|
1bf2a331492634acc795b1046b5b60495d9a5a72.hip | // !!! This is a file automatically generated by hipify!!!
#include "dense_matrix.h"
#include "cuda_rand_kernel.cuh"
#include "cuda_unary_kernel.cuh"
#include "cuda_binary_kernel.cuh"
#include "cuda_helper.h"
#include "sparse_matrix.h"
#include <thrust/extrema.h>
#include <hip/hip_runtime.h>
#include <iostream>
#define min(x, y) (x < y ? x : y)
template<typename Dtype>
DenseMat<GPU, Dtype>::~DenseMat()
{
pointer_buf.clear();
hipStreamSynchronize(GPUHandle::streams[streamid]);
MatUtils<GPU>::DelArr(data);
}
template<typename Dtype>
DenseMat<GPU, Dtype>::DenseMat(unsigned int _streamid)
{
mem_size = this->count = this->rows = this->cols = 0U;
streamid = _streamid;
data = nullptr;
pointer_buf.clear();
}
template<typename Dtype>
DenseMat<GPU, Dtype>::DenseMat(size_t _rows, size_t _cols, unsigned int _streamid)
{
this->rows = _rows;
this->cols = _cols;
this->count = _rows * _cols;
mem_size = this->count + (this->count & 1);
MatUtils<GPU>::MallocArr(data, sizeof(Dtype) * mem_size);
hipMemset(data, 0, sizeof(Dtype) * mem_size);
dev_ptr = thrust::device_pointer_cast(data);
pointer_buf.clear();
streamid = _streamid;
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Resize(size_t _newRows, size_t _newCols)
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
this->rows = _newRows;
this->cols = _newCols;
this->count = this->rows * this->cols;
if (this->count > mem_size)
{
mem_size = this->count + (this->count & 1);
MatUtils<GPU>::DelArr(data);
MatUtils<GPU>::MallocArr(data, sizeof(Dtype) * mem_size);
dev_ptr = thrust::device_pointer_cast(data);
hipMemset(data, 0, sizeof(Dtype) * mem_size);
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(DenseMat<CPU, Dtype>& src)
{
Resize(src.rows, src.cols);
hipMemcpyAsync(data, src.data, sizeof(Dtype) * this->count, hipMemcpyHostToDevice, GPUHandle::streams[streamid]);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
hipMemcpyAsync(data, src.data, sizeof(Dtype) * this->count, hipMemcpyDeviceToDevice, GPUHandle::streams[streamid]);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(SparseMat<CPU, Dtype>& src)
{
Resize(src.rows, src.cols);
throw "not implemented";
//memcpy(data, src.data, sizeof(Dtype) * this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(SparseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
throw "not implemented";
//memcpy(data, src.data, sizeof(Dtype) * this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Zeros(size_t _rows, size_t _cols)
{
Resize(_rows, _cols);
hipMemset(data, 0, this->count * sizeof(Dtype));
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Zeros()
{
if (this->count)
hipMemset(data, 0, this->count * sizeof(Dtype));
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandN(Dtype mean, Dtype std, size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, NormalRandomizer<Dtype>(mean, std), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandU(Dtype lb, Dtype ub, size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, UniformRandomizer<Dtype>(lb, ub), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandSign(size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, BinomialRandomizer<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandChi2(Dtype degree, size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, ChisquareRandomizer<Dtype>(degree), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Add(Dtype scalar)
{
UnaryOp(this->data, this->count, UnaryAdd<Dtype>(scalar), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Fill(Dtype scalar)
{
UnaryOp(data, this->count, UnarySet<Dtype>(scalar), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Scale(Dtype scalar)
{
if (scalar == 0.0)
{
Fill(0);
} else if (scalar != 1)
UnaryOp(data, this->count, UnaryScale<Dtype>(scalar), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Power(Dtype scalar)
{
throw "not implemented";
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Inv()
{
UnaryOp(this->data, this->count, UnaryInv<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::InvSqrt()
{
UnaryOp(this->data, this->count, UnaryInvSqrt<Dtype>(), streamid);
}
// Copied from https://github.com/torch/cunn/blob/master/SoftMax.cu
template<typename Dtype>
__global__ void cunn_SoftMax_updateOutput_kernel(Dtype *orig_ptr, int batch_size, int dim)
{
__shared__ Dtype buffer[SOFTMAX_THREADS + 1];
Dtype* dst = orig_ptr + blockIdx.x * dim + blockIdx.y;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
Dtype z;
// max?
if (i_start < dim)
buffer[threadIdx.x] = dst[i_start];
for (int i = i_start; i < i_end; i += i_step)
{
z = dst[i];
if(buffer[threadIdx.x] < z)
buffer[threadIdx.x] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
z = buffer[0];
for (int i = 1; i < min(dim, blockDim.x); i++)
{
if(z < buffer[i])
z = buffer[i];
}
buffer[SOFTMAX_THREADS] = z;
}
__syncthreads();
// sum?
Dtype max_k = buffer[SOFTMAX_THREADS];
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step)
{
z = cuda_exp(dst[i] - max_k);
buffer[threadIdx.x] += z;
dst[i] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
z = 0;
for (int i = 0; i < blockDim.x; i++)
z += buffer[i];
buffer[SOFTMAX_THREADS] = z;
}
__syncthreads();
// softmax
Dtype sum_k = buffer[SOFTMAX_THREADS];
for (int i = i_start; i < i_end; i += i_step)
dst[i] /= sum_k;
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Softmax()
{
dim3 blocks(this->rows, 1);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel) , dim3(blocks), dim3(threads), 0, GPUHandle::streams[streamid] , this->data, this->rows, this->cols);
}
template<typename Dtype>
__global__ void IdentityKernel(Dtype* dst, int dim)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dim)
{
dst[i * dim + i] = 1.0;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Identity(size_t dim)
{
if (dim)
Resize(dim, dim);
assert(this->rows == this->cols);
Fill(0.0);
int thread_num, blocksPerGrid;
if (this->rows < c_uCudaThreadNum)
{
thread_num = this->rows;
blocksPerGrid = 1;
} else
{
thread_num = c_uCudaThreadNum;
blocksPerGrid = (this->rows + thread_num - 1) / thread_num;
}
hipLaunchKernelGGL(( IdentityKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid], this->data, this->cols);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Log(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnaryLog<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Log()
{
UnaryOp(this->data, this->count, UnaryLog<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Exp(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnaryExp<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Exp()
{
UnaryOp(this->data, this->count, UnaryExp<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Sin(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnarySin<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Sin()
{
UnaryOp(this->data, this->count, UnarySin<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Cos(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnaryCos<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Cos()
{
UnaryOp(this->data, this->count, UnaryCos<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Sqrt()
{
UnaryOp(data, this->count, UnarySqrt<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Square()
{
UnaryOp(this->data, this->count, UnarySquare<Dtype>(), streamid);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Norm2()
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
return CudaHelper_Norm2(GPUHandle::cublashandle, this->count, data);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Asum()
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
return CudaHelper_Asum(GPUHandle::cublashandle, this->count, data);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Sum()
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
return thrust::reduce(dev_ptr, dev_ptr + this->count);
}
template<typename Dtype>
__global__ void ClipKernel(Dtype *dst, Dtype max_abs, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
if (dst[i] < -max_abs)
dst[i] = -max_abs;
if (dst[i] > max_abs);
dst[i] = max_abs;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Clip(Dtype max_abs)
{
assert(max_abs >= 0);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ClipKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, max_abs, this->count);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Amax()
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
int pos;
CudaHelper_Amax(GPUHandle::cublashandle, this->count, data, &pos);
Dtype result;
hipMemcpy(&result, this->data + pos - 1, sizeof(Dtype), hipMemcpyDeviceToHost);
return fabs(result);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::GetRowMax(size_t row_idx)
{
assert(row_idx < this->rows);
hipStreamSynchronize(GPUHandle::streams[streamid]);
size_t offset = row_idx * this->cols;
auto iter = thrust::max_element(dev_ptr + offset, dev_ptr + offset + this->cols);
return *iter;
}
template<typename Dtype>
size_t DenseMat<GPU, Dtype>::GetRowMaxIdx(size_t row_idx)
{
assert(row_idx < this->rows);
hipStreamSynchronize(GPUHandle::streams[streamid]);
size_t offset = row_idx * this->cols;
auto iter = thrust::max_element(dev_ptr + offset, dev_ptr + offset + this->cols);
return iter - dev_ptr - offset;
}
template<typename Dtype>
__global__ void MulRowKernel(Dtype *dst, Dtype* src, Dtype* factor, int cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements)
{
dst[i] += src[i] * factor[i % cols];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::MulRowVec(DenseMat<GPU, Dtype>& src, DenseMat<GPU, Dtype>& x, Dtype beta)
{
assert(&src != this);
Resize(src.rows, src.cols);
assert(x.count == this->cols);
Scale(beta);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( MulRowKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, src.data, x.data, this->cols, this->count);
}
template<typename Dtype>
__global__ void MulRowKernel(Dtype *dst, Dtype* factor, int cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements)
{
dst[i] = dst[i] * factor[i % cols];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::MulRowVec(DenseMat<GPU, Dtype>& x)
{
assert(x.count == this->cols);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( MulRowKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, x.data, this->cols, this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Mean(DenseMat<GPU, Dtype>& src)
{
Resize(1, src.cols);
if (bias_mult.count < src.rows)
bias_mult.Resize(src.rows);
bias_mult.Fill(1.0 / src.rows);
Dtype alpha = 1.0, beta = 0.0;
hipStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_GeMV(GPUHandle::cublashandle, GPU_T(Trans::N),
src.cols, src.rows,
&alpha, src.data, src.cols,
bias_mult.data, 1,
&beta, this->data, 1);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::AddRowVec(DenseMat<GPU, Dtype>& x, Dtype alpha)
{
assert(x.count == this->cols);
if (bias_mult.count < this->rows)
{
bias_mult.Resize(this->rows);
bias_mult.Fill(1.0);
}
hipStreamSynchronize(GPUHandle::streams[streamid]);
// cublas is col-major
CudaHelper_Ger(GPUHandle::cublashandle,
this->cols, this->rows,
&alpha,
x.data, bias_mult.data, data);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::AddColVec(DenseMat<GPU, Dtype>& x, Dtype alpha)
{
assert(x.count == this->rows);
if (bias_mult.count < this->cols)
{
bias_mult.Resize(this->cols);
bias_mult.Fill(1.0);
}
hipStreamSynchronize(GPUHandle::streams[streamid]);
// cublas is col-major
CudaHelper_Ger(GPUHandle::cublashandle,
this->cols, this->rows,
&alpha,
bias_mult.data, x.data, data);
}
template<typename Dtype>
__global__ void CSRSubmatAddKernel(Dtype* dst, int* row_ptr, int* col_idx, Dtype* val, int nnz, int n_rows, int dst_cols)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz)
{
int l = 0, r = n_rows - 1, row;
while (l <= r)
{
row = (l + r) / 2;
if (row_ptr[row] <= i)
{
if (row_ptr[row + 1] > i)
break;
else
l = row + 1;
} else r = row - 1;
}
dst[row * dst_cols + col_idx[i]] = val[i];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SubmatAdd(size_t row_start, size_t col_start, SparseMat<GPU, Dtype>& src, Dtype beta)
{
assert(row_start + src.rows <= this->rows);
assert(col_start + src.cols <= this->cols);
Scale(beta);
int thread_num = min(c_uCudaThreadNum, src.data->nnz);
int blocksPerGrid = (src.data->nnz + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( CSRSubmatAddKernel), dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data + row_start * this->cols + col_start, src.data->ptr, src.data->col_idx, src.data->val, src.data->nnz, src.rows, this->cols);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SubmatAdd(size_t row_start, size_t col_start, DenseMat<GPU, Dtype>& src, Dtype beta)
{
assert(row_start + src.rows <= this->rows);
assert(col_start + src.cols <= this->cols);
Scale(beta);
Dtype alpha = 1.0;
Dtype* dst_ptr = this->data + row_start * this->cols + col_start;
hipStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_GeaM(GPUHandle::cublashandle,
GPU_T(Trans::N), GPU_T(Trans::N),
src.cols, src.rows,
&alpha, src.data, src.cols,
&beta, dst_ptr, this->cols,
dst_ptr, this->cols);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::GetPointerBuf(std::vector< DenseMat<GPU, Dtype>* >& mat_list)
{
if (mat_list.size() > pointer_buf.size())
{
pointer_buf.resize(mat_list.size());
}
for (size_t i = 0; i < mat_list.size(); ++i)
pointer_buf[i] = mat_list[i]->data;
}
template<typename Dtype>
__global__ void ScatterColsKernel(Dtype** dst_list, Dtype* src, const int other_cols, const int this_cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_col = i % this_cols;
Dtype* dst = dst_list[cur_col / other_cols];
int dst_offset = (i / this_cols) * other_cols + cur_col % other_cols;
dst[dst_offset] = src[i];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ScatterCols(std::vector< DenseMat<GPU, Dtype>* >& dst_list)
{
assert(dst_list.size() > 0);
assert(this->cols % dst_list.size() == 0);
for (size_t i = 0; i < dst_list.size(); ++i)
dst_list[i]->Resize(this->rows, this->cols / dst_list.size());
GetPointerBuf(dst_list);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ScatterColsKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , thrust::raw_pointer_cast(&pointer_buf[0]), this->data, dst_list[0]->cols, this->cols, this->count);
}
template<typename Dtype>
__global__ void ConcatColsKernel(Dtype* dst, Dtype* src, const int src_cols, const int dst_cols, const int num_parts, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_col = i % dst_cols;
int src_offset = (i - cur_col) / num_parts;
dst[i] = src[src_offset + cur_col % src_cols];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ConcatCols(DenseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows);
assert(this->cols % src.cols == 0);
int num_parts = this->cols / src.cols;
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ConcatColsKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, src.data, src.cols, this->cols, num_parts, this->count);
}
template<typename Dtype>
__global__ void ConcatColsKernel(Dtype* dst, Dtype** src_list, const int other_cols, const int this_cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_col = i % this_cols;
Dtype* src = src_list[cur_col / other_cols];
int src_offset = (i / this_cols) * other_cols + cur_col % other_cols;
dst[i] = src[src_offset];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ConcatCols(std::vector< DenseMat<GPU, Dtype>* >& src_list)
{
assert(src_list.size() > 0);
assert(src_list.size() * src_list[0]->cols == this->cols);
GetPointerBuf(src_list);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ConcatColsKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, thrust::raw_pointer_cast(&pointer_buf[0]), src_list[0]->cols, this->cols, this->count);
}
template<typename Dtype>
__global__ void SparseEleWiseMulKernel(Dtype* dst, int* row_ptr, int* col_idx, Dtype* val, int n_cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_row = i / n_cols;
int cur_col = i % n_cols;
int l = row_ptr[cur_row], r = row_ptr[cur_row + 1] - 1, idx;
while (l <= r)
{
idx = (l + r) / 2;
if (col_idx[idx] < cur_col)
l = idx + 1;
else if (col_idx[idx] > cur_col)
r = idx - 1;
else {
dst[i] *= val[idx];
return;
}
}
dst[i] = 0;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseMul(SparseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows && this->cols == src.cols);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( SparseEleWiseMulKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, src.data->ptr, src.data->col_idx, src.data->val, this->cols, this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseMul(DenseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows && this->cols == src.cols);
BinaryOp(this->data, src.data, this->count, BinaryMul<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseMul(DenseMat<GPU, Dtype>& lhs, DenseMat<GPU, Dtype>& rhs)
{
assert(lhs.rows == rhs.rows && lhs.cols == rhs.cols);
Resize(lhs.rows, lhs.cols);
BinaryOp(this->data, lhs.data, rhs.data, this->count, BinaryMul<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseDiv(DenseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows && this->cols == src.cols);
BinaryOp(this->data, src.data, this->count, BinaryDiv<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseDiv(DenseMat<GPU, Dtype>& lhs, DenseMat<GPU, Dtype>& rhs)
{
assert(lhs.rows == rhs.rows && lhs.cols == rhs.cols);
Resize(lhs.rows, lhs.cols);
BinaryOp(this->data, lhs.data, rhs.data, this->count, BinaryDiv<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::AddSubmat(DenseMat<GPU, Dtype>& src, size_t row_start, size_t col_start, Dtype beta)
{
assert(row_start + this->rows <= src.rows);
assert(col_start + this->cols <= src.cols);
Dtype alpha = 1.0;
Dtype* src_ptr = src.data + row_start * src.cols + col_start;
hipStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_GeaM(GPUHandle::cublashandle,
GPU_T(Trans::N), GPU_T(Trans::N),
this->cols, this->rows,
&alpha, src_ptr, src.cols,
&beta, this->data, this->cols,
this->data, this->cols);
}
template<typename Dtype>
__global__ void ShuffleColsKernel(Dtype* dst, Dtype* src, const int* perm, int cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int cur_col = i % cols;
if(i < numElements)
{
dst[i] = src[i + perm[cur_col] - cur_col];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ShuffleCols(DenseMat<GPU, Dtype>& src, const int* perm)
{
Resize(src.rows, src.cols);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ShuffleColsKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, src.data, perm, this->cols, this->count);
}
template<typename Dtype>
__global__ void ReduceColsKernel(Dtype* dst, Dtype* src, const int cols, const int num_parts, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements)
{
int src_offset = i % cols, j;
src_offset += (i - src_offset) * num_parts;
dst[i] = 0;
for (j = 0; j < num_parts; ++j)
{
dst[i] += src[src_offset];
src_offset += cols;
}
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ReduceCols(DenseMat<GPU, Dtype>& src)
{
assert(src.cols % this->cols == 0);
int num_parts = src.cols / this->cols;
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ReduceColsKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, src.data, this->cols, num_parts, this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::GeaM(Dtype alpha, Trans transa, DenseMat<GPU, Dtype>& A, Dtype beta, Trans transb, DenseMat<GPU, Dtype>& B)
{
if (transa == Trans::N)
Resize(A.rows, A.cols);
else
Resize(A.cols, A.rows);
CudaHelper_GeaM(GPUHandle::cublashandle,
GPU_T(transa), GPU_T(transb),
this->cols, this->rows,
&alpha, A.data, A.cols,
&beta, B.data, B.cols,
data, this->cols);
}
template<typename Dtype>
__global__ void AxpbyKernel(Dtype* y, Dtype* x, Dtype a, Dtype b, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
y[i] = y[i] * b + a * x[i];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Axpby(Dtype a, DenseMat<GPU, Dtype>& x, Dtype b)
{
assert(x.count == this->count);
Scale(b);
Axpy(a, x);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Axpy(Dtype alpha, DenseMat<GPU, Dtype>& x)
{
assert(x.rows == this->rows && x.cols == this->cols);
hipStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_Axpy(GPUHandle::cublashandle, this->count, &alpha, x.data, data);
}
template<typename Dtype>
__global__ void SpAxpyKernel(Dtype* dst, int* row_ptr, int* col_idx, Dtype* val, int nnz, int n_rows, int n_cols, Dtype alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz)
{
int l = 0, r = n_rows - 1, row;
while (l <= r)
{
row = (l + r) / 2;
if (row_ptr[row] <= i)
{
if (row_ptr[row + 1] > i)
break;
else
l = row + 1;
} else r = row - 1;
}
dst[row * n_cols + col_idx[i]] += val[i] * alpha;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Axpy(Dtype alpha, SparseMat<GPU, Dtype>& x)
{
assert(x.rows == this->rows && x.cols == this->cols);
int thread_num = min(c_uCudaThreadNum, x.data->nnz);
int blocksPerGrid = (x.data->nnz + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( SpAxpyKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , this->data, x.data->ptr, x.data->col_idx, x.data->val, x.data->nnz, this->rows, this->cols, alpha);
}
template<>
void DenseMat<GPU, double>::GeMM(DenseMat<GPU, double>& A, DenseMat<GPU, double>& B, Trans transa, Trans transb, double alpha, double beta)
{
size_t m, n, k;
GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k);
Resize(m, n);
hipblasDgemm(GPUHandle::cublashandle,
GPU_T(transb), GPU_T(transa),
n, m, k,
&alpha, B.data, B.cols, A.data, A.cols,
&beta, data, this->cols);
}
template<>
void DenseMat<GPU, float>::GeMM(DenseMat<GPU, float>& A, DenseMat<GPU, float>& B, Trans transa, Trans transb, float alpha, float beta)
{
size_t m, n, k;
GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k);
Resize(m, n);
hipblasSgemm(GPUHandle::cublashandle,
GPU_T(transb), GPU_T(transa),
n, m, k,
&alpha, B.data, B.cols, A.data, A.cols,
&beta, data, this->cols);
}
template<typename Dtype>
__global__ void CSRMMKernel(Dtype alpha, int* ptr, int* col_idx, Dtype* val, Dtype* dense_data, int src_cols, Dtype* dst, int dst_cols, int numElements)
{
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < numElements)
{
int i = offset / dst_cols, j = offset % dst_cols;
for (int t = ptr[i]; t < ptr[i + 1]; ++t)
{
dst[offset] += alpha * val[t] * dense_data[col_idx[t] * src_cols + j];
}
}
}
template<typename Dtype>
__global__ void CSRMMKernel_T(Dtype alpha, int n_ptr, int* ptr, int* row_idx, Dtype* val, Dtype* dense_data, int src_cols, Dtype* dst, int dst_cols)
{
int cur_col = blockDim.x * blockIdx.x + threadIdx.x;
if (cur_col < dst_cols)
{
for (int x = 0; x < n_ptr - 1; ++x)
{
for (int t = ptr[x]; t < ptr[x + 1]; ++t)
{
dst[row_idx[t] * dst_cols + cur_col] += alpha * val[t] * dense_data[x * src_cols + cur_col];
}
}
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SparseMM(SparseMat<GPU, Dtype>& A, DenseMat<GPU, Dtype>& B, Trans transa, Trans transb, Dtype alpha, Dtype beta)
{
assert(transb == Trans::N);
size_t m, n, k;
GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k);
Resize(m, n);
this->Scale(beta);
if (transa == Trans::N)
{
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( CSRMMKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , alpha, A.data->ptr, A.data->col_idx, A.data->val, B.data, B.cols, this->data, this->cols, this->count);
} else
{
int thread_num = min(c_uCudaThreadNum, this->cols);
int blocksPerGrid = (this->cols + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( CSRMMKernel_T) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[streamid] , alpha, A.data->len_ptr, A.data->ptr, A.data->col_idx, A.data->val, B.data, B.cols, this->data, this->cols);
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Serialize(FILE* fid)
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
IMatrix<GPU, Dtype>::Serialize(fid);
assert(fwrite(&mem_size, sizeof(size_t), 1, fid) == 1);
Dtype* buf = new Dtype[mem_size];
hipMemcpy(buf, data, sizeof(Dtype) * mem_size, hipMemcpyDeviceToHost);
assert(fwrite(buf, sizeof(Dtype), mem_size, fid) == mem_size);
delete[] buf;
assert(fwrite(&is_submat, sizeof(bool), 1, fid) == 1);
bias_mult.Serialize(fid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Deserialize(FILE* fid)
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
IMatrix<GPU, Dtype>::Deserialize(fid);
assert(fread(&mem_size, sizeof(size_t), 1, fid) == 1);
MatUtils<GPU>::DelArr(data);
MatUtils<GPU>::MallocArr(data, sizeof(Dtype) * mem_size);
dev_ptr = thrust::device_pointer_cast(data);
Dtype* buf = new Dtype[mem_size];
assert(fread(buf, sizeof(Dtype), mem_size, fid) == mem_size);
hipMemcpy(data, buf, sizeof(Dtype) * mem_size, hipMemcpyHostToDevice);
delete[] buf;
assert(fread(&is_submat, sizeof(bool), 1, fid) == 1);
bias_mult.Deserialize(fid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Print2Screen() //debug only
{
hipStreamSynchronize(GPUHandle::streams[streamid]);
Dtype* cpu_mem = new Dtype[this->count];
hipMemcpy(cpu_mem, data, sizeof(Dtype) * this->count, hipMemcpyDeviceToHost);
std::cerr << "========mat content========" << std::endl;
for (size_t i = 0; i < this->rows; ++i)
{
for (size_t j = 0; j < this->cols; ++j)
std::cerr << cpu_mem[i * this->cols + j] << " ";
std::cerr << std::endl;
}
std::cerr << "======== end ========" << std::endl;
delete[] cpu_mem;
}
template class DenseMat<GPU, double>;
template class DenseMat<GPU, float>;
| 1bf2a331492634acc795b1046b5b60495d9a5a72.cu | #include "dense_matrix.h"
#include "cuda_rand_kernel.cuh"
#include "cuda_unary_kernel.cuh"
#include "cuda_binary_kernel.cuh"
#include "cuda_helper.h"
#include "sparse_matrix.h"
#include <thrust/extrema.h>
#include <cuda_runtime.h>
#include <iostream>
#define min(x, y) (x < y ? x : y)
template<typename Dtype>
DenseMat<GPU, Dtype>::~DenseMat()
{
pointer_buf.clear();
cudaStreamSynchronize(GPUHandle::streams[streamid]);
MatUtils<GPU>::DelArr(data);
}
template<typename Dtype>
DenseMat<GPU, Dtype>::DenseMat(unsigned int _streamid)
{
mem_size = this->count = this->rows = this->cols = 0U;
streamid = _streamid;
data = nullptr;
pointer_buf.clear();
}
template<typename Dtype>
DenseMat<GPU, Dtype>::DenseMat(size_t _rows, size_t _cols, unsigned int _streamid)
{
this->rows = _rows;
this->cols = _cols;
this->count = _rows * _cols;
mem_size = this->count + (this->count & 1);
MatUtils<GPU>::MallocArr(data, sizeof(Dtype) * mem_size);
cudaMemset(data, 0, sizeof(Dtype) * mem_size);
dev_ptr = thrust::device_pointer_cast(data);
pointer_buf.clear();
streamid = _streamid;
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Resize(size_t _newRows, size_t _newCols)
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
this->rows = _newRows;
this->cols = _newCols;
this->count = this->rows * this->cols;
if (this->count > mem_size)
{
mem_size = this->count + (this->count & 1);
MatUtils<GPU>::DelArr(data);
MatUtils<GPU>::MallocArr(data, sizeof(Dtype) * mem_size);
dev_ptr = thrust::device_pointer_cast(data);
cudaMemset(data, 0, sizeof(Dtype) * mem_size);
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(DenseMat<CPU, Dtype>& src)
{
Resize(src.rows, src.cols);
cudaMemcpyAsync(data, src.data, sizeof(Dtype) * this->count, cudaMemcpyHostToDevice, GPUHandle::streams[streamid]);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
cudaMemcpyAsync(data, src.data, sizeof(Dtype) * this->count, cudaMemcpyDeviceToDevice, GPUHandle::streams[streamid]);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(SparseMat<CPU, Dtype>& src)
{
Resize(src.rows, src.cols);
throw "not implemented";
//memcpy(data, src.data, sizeof(Dtype) * this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::CopyFrom(SparseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
throw "not implemented";
//memcpy(data, src.data, sizeof(Dtype) * this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Zeros(size_t _rows, size_t _cols)
{
Resize(_rows, _cols);
cudaMemset(data, 0, this->count * sizeof(Dtype));
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Zeros()
{
if (this->count)
cudaMemset(data, 0, this->count * sizeof(Dtype));
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandN(Dtype mean, Dtype std, size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, NormalRandomizer<Dtype>(mean, std), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandU(Dtype lb, Dtype ub, size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, UniformRandomizer<Dtype>(lb, ub), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandSign(size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, BinomialRandomizer<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SetRandChi2(Dtype degree, size_t _rows, size_t _cols)
{
if (_rows && _cols)
{
Resize(_rows, _cols);
}
SetRand(this->data, this->count, ChisquareRandomizer<Dtype>(degree), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Add(Dtype scalar)
{
UnaryOp(this->data, this->count, UnaryAdd<Dtype>(scalar), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Fill(Dtype scalar)
{
UnaryOp(data, this->count, UnarySet<Dtype>(scalar), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Scale(Dtype scalar)
{
if (scalar == 0.0)
{
Fill(0);
} else if (scalar != 1)
UnaryOp(data, this->count, UnaryScale<Dtype>(scalar), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Power(Dtype scalar)
{
throw "not implemented";
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Inv()
{
UnaryOp(this->data, this->count, UnaryInv<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::InvSqrt()
{
UnaryOp(this->data, this->count, UnaryInvSqrt<Dtype>(), streamid);
}
// Copied from https://github.com/torch/cunn/blob/master/SoftMax.cu
template<typename Dtype>
__global__ void cunn_SoftMax_updateOutput_kernel(Dtype *orig_ptr, int batch_size, int dim)
{
__shared__ Dtype buffer[SOFTMAX_THREADS + 1];
Dtype* dst = orig_ptr + blockIdx.x * dim + blockIdx.y;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
Dtype z;
// max?
if (i_start < dim)
buffer[threadIdx.x] = dst[i_start];
for (int i = i_start; i < i_end; i += i_step)
{
z = dst[i];
if(buffer[threadIdx.x] < z)
buffer[threadIdx.x] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
z = buffer[0];
for (int i = 1; i < min(dim, blockDim.x); i++)
{
if(z < buffer[i])
z = buffer[i];
}
buffer[SOFTMAX_THREADS] = z;
}
__syncthreads();
// sum?
Dtype max_k = buffer[SOFTMAX_THREADS];
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step)
{
z = cuda_exp(dst[i] - max_k);
buffer[threadIdx.x] += z;
dst[i] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
z = 0;
for (int i = 0; i < blockDim.x; i++)
z += buffer[i];
buffer[SOFTMAX_THREADS] = z;
}
__syncthreads();
// softmax
Dtype sum_k = buffer[SOFTMAX_THREADS];
for (int i = i_start; i < i_end; i += i_step)
dst[i] /= sum_k;
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Softmax()
{
dim3 blocks(this->rows, 1);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateOutput_kernel <<< blocks, threads, 0, GPUHandle::streams[streamid] >>> (this->data, this->rows, this->cols);
}
template<typename Dtype>
__global__ void IdentityKernel(Dtype* dst, int dim)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dim)
{
dst[i * dim + i] = 1.0;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Identity(size_t dim)
{
if (dim)
Resize(dim, dim);
assert(this->rows == this->cols);
Fill(0.0);
int thread_num, blocksPerGrid;
if (this->rows < c_uCudaThreadNum)
{
thread_num = this->rows;
blocksPerGrid = 1;
} else
{
thread_num = c_uCudaThreadNum;
blocksPerGrid = (this->rows + thread_num - 1) / thread_num;
}
IdentityKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid]>>>(this->data, this->cols);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Log(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnaryLog<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Log()
{
UnaryOp(this->data, this->count, UnaryLog<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Exp(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnaryExp<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Exp()
{
UnaryOp(this->data, this->count, UnaryExp<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Sin(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnarySin<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Sin()
{
UnaryOp(this->data, this->count, UnarySin<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Cos(DenseMat<GPU, Dtype>& src)
{
Resize(src.rows, src.cols);
UnaryOp(this->data, src.data, this->count, UnaryCos<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Cos()
{
UnaryOp(this->data, this->count, UnaryCos<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Sqrt()
{
UnaryOp(data, this->count, UnarySqrt<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Square()
{
UnaryOp(this->data, this->count, UnarySquare<Dtype>(), streamid);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Norm2()
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
return CudaHelper_Norm2(GPUHandle::cublashandle, this->count, data);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Asum()
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
return CudaHelper_Asum(GPUHandle::cublashandle, this->count, data);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Sum()
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
return thrust::reduce(dev_ptr, dev_ptr + this->count);
}
template<typename Dtype>
__global__ void ClipKernel(Dtype *dst, Dtype max_abs, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
if (dst[i] < -max_abs)
dst[i] = -max_abs;
if (dst[i] > max_abs);
dst[i] = max_abs;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Clip(Dtype max_abs)
{
assert(max_abs >= 0);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
ClipKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>>(this->data, max_abs, this->count);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::Amax()
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
int pos;
CudaHelper_Amax(GPUHandle::cublashandle, this->count, data, &pos);
Dtype result;
cudaMemcpy(&result, this->data + pos - 1, sizeof(Dtype), cudaMemcpyDeviceToHost);
return fabs(result);
}
template<typename Dtype>
Dtype DenseMat<GPU, Dtype>::GetRowMax(size_t row_idx)
{
assert(row_idx < this->rows);
cudaStreamSynchronize(GPUHandle::streams[streamid]);
size_t offset = row_idx * this->cols;
auto iter = thrust::max_element(dev_ptr + offset, dev_ptr + offset + this->cols);
return *iter;
}
template<typename Dtype>
size_t DenseMat<GPU, Dtype>::GetRowMaxIdx(size_t row_idx)
{
assert(row_idx < this->rows);
cudaStreamSynchronize(GPUHandle::streams[streamid]);
size_t offset = row_idx * this->cols;
auto iter = thrust::max_element(dev_ptr + offset, dev_ptr + offset + this->cols);
return iter - dev_ptr - offset;
}
template<typename Dtype>
__global__ void MulRowKernel(Dtype *dst, Dtype* src, Dtype* factor, int cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements)
{
dst[i] += src[i] * factor[i % cols];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::MulRowVec(DenseMat<GPU, Dtype>& src, DenseMat<GPU, Dtype>& x, Dtype beta)
{
assert(&src != this);
Resize(src.rows, src.cols);
assert(x.count == this->cols);
Scale(beta);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
MulRowKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, src.data, x.data, this->cols, this->count);
}
template<typename Dtype>
__global__ void MulRowKernel(Dtype *dst, Dtype* factor, int cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements)
{
dst[i] = dst[i] * factor[i % cols];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::MulRowVec(DenseMat<GPU, Dtype>& x)
{
assert(x.count == this->cols);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
MulRowKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, x.data, this->cols, this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Mean(DenseMat<GPU, Dtype>& src)
{
Resize(1, src.cols);
if (bias_mult.count < src.rows)
bias_mult.Resize(src.rows);
bias_mult.Fill(1.0 / src.rows);
Dtype alpha = 1.0, beta = 0.0;
cudaStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_GeMV(GPUHandle::cublashandle, GPU_T(Trans::N),
src.cols, src.rows,
&alpha, src.data, src.cols,
bias_mult.data, 1,
&beta, this->data, 1);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::AddRowVec(DenseMat<GPU, Dtype>& x, Dtype alpha)
{
assert(x.count == this->cols);
if (bias_mult.count < this->rows)
{
bias_mult.Resize(this->rows);
bias_mult.Fill(1.0);
}
cudaStreamSynchronize(GPUHandle::streams[streamid]);
// cublas is col-major
CudaHelper_Ger(GPUHandle::cublashandle,
this->cols, this->rows,
&alpha,
x.data, bias_mult.data, data);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::AddColVec(DenseMat<GPU, Dtype>& x, Dtype alpha)
{
assert(x.count == this->rows);
if (bias_mult.count < this->cols)
{
bias_mult.Resize(this->cols);
bias_mult.Fill(1.0);
}
cudaStreamSynchronize(GPUHandle::streams[streamid]);
// cublas is col-major
CudaHelper_Ger(GPUHandle::cublashandle,
this->cols, this->rows,
&alpha,
bias_mult.data, x.data, data);
}
template<typename Dtype>
__global__ void CSRSubmatAddKernel(Dtype* dst, int* row_ptr, int* col_idx, Dtype* val, int nnz, int n_rows, int dst_cols)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz)
{
int l = 0, r = n_rows - 1, row;
while (l <= r)
{
row = (l + r) / 2;
if (row_ptr[row] <= i)
{
if (row_ptr[row + 1] > i)
break;
else
l = row + 1;
} else r = row - 1;
}
dst[row * dst_cols + col_idx[i]] = val[i];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SubmatAdd(size_t row_start, size_t col_start, SparseMat<GPU, Dtype>& src, Dtype beta)
{
assert(row_start + src.rows <= this->rows);
assert(col_start + src.cols <= this->cols);
Scale(beta);
int thread_num = min(c_uCudaThreadNum, src.data->nnz);
int blocksPerGrid = (src.data->nnz + thread_num - 1) / thread_num;
CSRSubmatAddKernel<<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>>(this->data + row_start * this->cols + col_start, src.data->ptr, src.data->col_idx, src.data->val, src.data->nnz, src.rows, this->cols);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SubmatAdd(size_t row_start, size_t col_start, DenseMat<GPU, Dtype>& src, Dtype beta)
{
assert(row_start + src.rows <= this->rows);
assert(col_start + src.cols <= this->cols);
Scale(beta);
Dtype alpha = 1.0;
Dtype* dst_ptr = this->data + row_start * this->cols + col_start;
cudaStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_GeaM(GPUHandle::cublashandle,
GPU_T(Trans::N), GPU_T(Trans::N),
src.cols, src.rows,
&alpha, src.data, src.cols,
&beta, dst_ptr, this->cols,
dst_ptr, this->cols);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::GetPointerBuf(std::vector< DenseMat<GPU, Dtype>* >& mat_list)
{
if (mat_list.size() > pointer_buf.size())
{
pointer_buf.resize(mat_list.size());
}
for (size_t i = 0; i < mat_list.size(); ++i)
pointer_buf[i] = mat_list[i]->data;
}
template<typename Dtype>
__global__ void ScatterColsKernel(Dtype** dst_list, Dtype* src, const int other_cols, const int this_cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_col = i % this_cols;
Dtype* dst = dst_list[cur_col / other_cols];
int dst_offset = (i / this_cols) * other_cols + cur_col % other_cols;
dst[dst_offset] = src[i];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ScatterCols(std::vector< DenseMat<GPU, Dtype>* >& dst_list)
{
assert(dst_list.size() > 0);
assert(this->cols % dst_list.size() == 0);
for (size_t i = 0; i < dst_list.size(); ++i)
dst_list[i]->Resize(this->rows, this->cols / dst_list.size());
GetPointerBuf(dst_list);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
ScatterColsKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>>(thrust::raw_pointer_cast(&pointer_buf[0]), this->data, dst_list[0]->cols, this->cols, this->count);
}
template<typename Dtype>
__global__ void ConcatColsKernel(Dtype* dst, Dtype* src, const int src_cols, const int dst_cols, const int num_parts, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_col = i % dst_cols;
int src_offset = (i - cur_col) / num_parts;
dst[i] = src[src_offset + cur_col % src_cols];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ConcatCols(DenseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows);
assert(this->cols % src.cols == 0);
int num_parts = this->cols / src.cols;
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
ConcatColsKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, src.data, src.cols, this->cols, num_parts, this->count);
}
template<typename Dtype>
__global__ void ConcatColsKernel(Dtype* dst, Dtype** src_list, const int other_cols, const int this_cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_col = i % this_cols;
Dtype* src = src_list[cur_col / other_cols];
int src_offset = (i / this_cols) * other_cols + cur_col % other_cols;
dst[i] = src[src_offset];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ConcatCols(std::vector< DenseMat<GPU, Dtype>* >& src_list)
{
assert(src_list.size() > 0);
assert(src_list.size() * src_list[0]->cols == this->cols);
GetPointerBuf(src_list);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
ConcatColsKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, thrust::raw_pointer_cast(&pointer_buf[0]), src_list[0]->cols, this->cols, this->count);
}
template<typename Dtype>
__global__ void SparseEleWiseMulKernel(Dtype* dst, int* row_ptr, int* col_idx, Dtype* val, int n_cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
int cur_row = i / n_cols;
int cur_col = i % n_cols;
int l = row_ptr[cur_row], r = row_ptr[cur_row + 1] - 1, idx;
while (l <= r)
{
idx = (l + r) / 2;
if (col_idx[idx] < cur_col)
l = idx + 1;
else if (col_idx[idx] > cur_col)
r = idx - 1;
else {
dst[i] *= val[idx];
return;
}
}
dst[i] = 0;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseMul(SparseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows && this->cols == src.cols);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
SparseEleWiseMulKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, src.data->ptr, src.data->col_idx, src.data->val, this->cols, this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseMul(DenseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows && this->cols == src.cols);
BinaryOp(this->data, src.data, this->count, BinaryMul<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseMul(DenseMat<GPU, Dtype>& lhs, DenseMat<GPU, Dtype>& rhs)
{
assert(lhs.rows == rhs.rows && lhs.cols == rhs.cols);
Resize(lhs.rows, lhs.cols);
BinaryOp(this->data, lhs.data, rhs.data, this->count, BinaryMul<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseDiv(DenseMat<GPU, Dtype>& src)
{
assert(this->rows == src.rows && this->cols == src.cols);
BinaryOp(this->data, src.data, this->count, BinaryDiv<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::EleWiseDiv(DenseMat<GPU, Dtype>& lhs, DenseMat<GPU, Dtype>& rhs)
{
assert(lhs.rows == rhs.rows && lhs.cols == rhs.cols);
Resize(lhs.rows, lhs.cols);
BinaryOp(this->data, lhs.data, rhs.data, this->count, BinaryDiv<Dtype>(), streamid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::AddSubmat(DenseMat<GPU, Dtype>& src, size_t row_start, size_t col_start, Dtype beta)
{
assert(row_start + this->rows <= src.rows);
assert(col_start + this->cols <= src.cols);
Dtype alpha = 1.0;
Dtype* src_ptr = src.data + row_start * src.cols + col_start;
cudaStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_GeaM(GPUHandle::cublashandle,
GPU_T(Trans::N), GPU_T(Trans::N),
this->cols, this->rows,
&alpha, src_ptr, src.cols,
&beta, this->data, this->cols,
this->data, this->cols);
}
template<typename Dtype>
__global__ void ShuffleColsKernel(Dtype* dst, Dtype* src, const int* perm, int cols, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int cur_col = i % cols;
if(i < numElements)
{
dst[i] = src[i + perm[cur_col] - cur_col];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ShuffleCols(DenseMat<GPU, Dtype>& src, const int* perm)
{
Resize(src.rows, src.cols);
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
ShuffleColsKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, src.data, perm, this->cols, this->count);
}
template<typename Dtype>
__global__ void ReduceColsKernel(Dtype* dst, Dtype* src, const int cols, const int num_parts, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements)
{
int src_offset = i % cols, j;
src_offset += (i - src_offset) * num_parts;
dst[i] = 0;
for (j = 0; j < num_parts; ++j)
{
dst[i] += src[src_offset];
src_offset += cols;
}
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::ReduceCols(DenseMat<GPU, Dtype>& src)
{
assert(src.cols % this->cols == 0);
int num_parts = src.cols / this->cols;
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
ReduceColsKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, src.data, this->cols, num_parts, this->count);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::GeaM(Dtype alpha, Trans transa, DenseMat<GPU, Dtype>& A, Dtype beta, Trans transb, DenseMat<GPU, Dtype>& B)
{
if (transa == Trans::N)
Resize(A.rows, A.cols);
else
Resize(A.cols, A.rows);
CudaHelper_GeaM(GPUHandle::cublashandle,
GPU_T(transa), GPU_T(transb),
this->cols, this->rows,
&alpha, A.data, A.cols,
&beta, B.data, B.cols,
data, this->cols);
}
template<typename Dtype>
__global__ void AxpbyKernel(Dtype* y, Dtype* x, Dtype a, Dtype b, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
y[i] = y[i] * b + a * x[i];
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Axpby(Dtype a, DenseMat<GPU, Dtype>& x, Dtype b)
{
assert(x.count == this->count);
Scale(b);
Axpy(a, x);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Axpy(Dtype alpha, DenseMat<GPU, Dtype>& x)
{
assert(x.rows == this->rows && x.cols == this->cols);
cudaStreamSynchronize(GPUHandle::streams[streamid]);
CudaHelper_Axpy(GPUHandle::cublashandle, this->count, &alpha, x.data, data);
}
template<typename Dtype>
__global__ void SpAxpyKernel(Dtype* dst, int* row_ptr, int* col_idx, Dtype* val, int nnz, int n_rows, int n_cols, Dtype alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz)
{
int l = 0, r = n_rows - 1, row;
while (l <= r)
{
row = (l + r) / 2;
if (row_ptr[row] <= i)
{
if (row_ptr[row + 1] > i)
break;
else
l = row + 1;
} else r = row - 1;
}
dst[row * n_cols + col_idx[i]] += val[i] * alpha;
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Axpy(Dtype alpha, SparseMat<GPU, Dtype>& x)
{
assert(x.rows == this->rows && x.cols == this->cols);
int thread_num = min(c_uCudaThreadNum, x.data->nnz);
int blocksPerGrid = (x.data->nnz + thread_num - 1) / thread_num;
SpAxpyKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (this->data, x.data->ptr, x.data->col_idx, x.data->val, x.data->nnz, this->rows, this->cols, alpha);
}
template<>
void DenseMat<GPU, double>::GeMM(DenseMat<GPU, double>& A, DenseMat<GPU, double>& B, Trans transa, Trans transb, double alpha, double beta)
{
size_t m, n, k;
GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k);
Resize(m, n);
cublasDgemm(GPUHandle::cublashandle,
GPU_T(transb), GPU_T(transa),
n, m, k,
&alpha, B.data, B.cols, A.data, A.cols,
&beta, data, this->cols);
}
template<>
void DenseMat<GPU, float>::GeMM(DenseMat<GPU, float>& A, DenseMat<GPU, float>& B, Trans transa, Trans transb, float alpha, float beta)
{
size_t m, n, k;
GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k);
Resize(m, n);
cublasSgemm(GPUHandle::cublashandle,
GPU_T(transb), GPU_T(transa),
n, m, k,
&alpha, B.data, B.cols, A.data, A.cols,
&beta, data, this->cols);
}
template<typename Dtype>
__global__ void CSRMMKernel(Dtype alpha, int* ptr, int* col_idx, Dtype* val, Dtype* dense_data, int src_cols, Dtype* dst, int dst_cols, int numElements)
{
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < numElements)
{
int i = offset / dst_cols, j = offset % dst_cols;
for (int t = ptr[i]; t < ptr[i + 1]; ++t)
{
dst[offset] += alpha * val[t] * dense_data[col_idx[t] * src_cols + j];
}
}
}
template<typename Dtype>
__global__ void CSRMMKernel_T(Dtype alpha, int n_ptr, int* ptr, int* row_idx, Dtype* val, Dtype* dense_data, int src_cols, Dtype* dst, int dst_cols)
{
int cur_col = blockDim.x * blockIdx.x + threadIdx.x;
if (cur_col < dst_cols)
{
for (int x = 0; x < n_ptr - 1; ++x)
{
for (int t = ptr[x]; t < ptr[x + 1]; ++t)
{
dst[row_idx[t] * dst_cols + cur_col] += alpha * val[t] * dense_data[x * src_cols + cur_col];
}
}
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::SparseMM(SparseMat<GPU, Dtype>& A, DenseMat<GPU, Dtype>& B, Trans transa, Trans transb, Dtype alpha, Dtype beta)
{
assert(transb == Trans::N);
size_t m, n, k;
GetDims(A.rows, A.cols, transa, B.rows, B.cols, transb, m, n, k);
Resize(m, n);
this->Scale(beta);
if (transa == Trans::N)
{
int thread_num = min(c_uCudaThreadNum, this->count);
int blocksPerGrid = (this->count + thread_num - 1) / thread_num;
CSRMMKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (alpha, A.data->ptr, A.data->col_idx, A.data->val, B.data, B.cols, this->data, this->cols, this->count);
} else
{
int thread_num = min(c_uCudaThreadNum, this->cols);
int blocksPerGrid = (this->cols + thread_num - 1) / thread_num;
CSRMMKernel_T <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[streamid] >>> (alpha, A.data->len_ptr, A.data->ptr, A.data->col_idx, A.data->val, B.data, B.cols, this->data, this->cols);
}
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Serialize(FILE* fid)
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
IMatrix<GPU, Dtype>::Serialize(fid);
assert(fwrite(&mem_size, sizeof(size_t), 1, fid) == 1);
Dtype* buf = new Dtype[mem_size];
cudaMemcpy(buf, data, sizeof(Dtype) * mem_size, cudaMemcpyDeviceToHost);
assert(fwrite(buf, sizeof(Dtype), mem_size, fid) == mem_size);
delete[] buf;
assert(fwrite(&is_submat, sizeof(bool), 1, fid) == 1);
bias_mult.Serialize(fid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Deserialize(FILE* fid)
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
IMatrix<GPU, Dtype>::Deserialize(fid);
assert(fread(&mem_size, sizeof(size_t), 1, fid) == 1);
MatUtils<GPU>::DelArr(data);
MatUtils<GPU>::MallocArr(data, sizeof(Dtype) * mem_size);
dev_ptr = thrust::device_pointer_cast(data);
Dtype* buf = new Dtype[mem_size];
assert(fread(buf, sizeof(Dtype), mem_size, fid) == mem_size);
cudaMemcpy(data, buf, sizeof(Dtype) * mem_size, cudaMemcpyHostToDevice);
delete[] buf;
assert(fread(&is_submat, sizeof(bool), 1, fid) == 1);
bias_mult.Deserialize(fid);
}
template<typename Dtype>
void DenseMat<GPU, Dtype>::Print2Screen() //debug only
{
cudaStreamSynchronize(GPUHandle::streams[streamid]);
Dtype* cpu_mem = new Dtype[this->count];
cudaMemcpy(cpu_mem, data, sizeof(Dtype) * this->count, cudaMemcpyDeviceToHost);
std::cerr << "========mat content========" << std::endl;
for (size_t i = 0; i < this->rows; ++i)
{
for (size_t j = 0; j < this->cols; ++j)
std::cerr << cpu_mem[i * this->cols + j] << " ";
std::cerr << std::endl;
}
std::cerr << "======== end ========" << std::endl;
delete[] cpu_mem;
}
template class DenseMat<GPU, double>;
template class DenseMat<GPU, float>;
|
ff41c305e012bf1b891427963ef3ba9a30d1d021.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define N (2048*2048)
#define N_THREADS_PER_BLOCK 512
// Adapt vector addition to use both blocks and threads
__global__ void addByCombine(int *a, int *b, int *c)
{
// use the built-in variable blockDim.x for threads per block
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} | ff41c305e012bf1b891427963ef3ba9a30d1d021.cu | #include "includes.h"
#define N (2048*2048)
#define N_THREADS_PER_BLOCK 512
// Adapt vector addition to use both blocks and threads
__global__ void addByCombine(int *a, int *b, int *c)
{
// use the built-in variable blockDim.x for threads per block
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} |
a55019dd90d8a2b50233596ab105569615bde9a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Example inspired by:
// https://devblogs.nvidia.com/even-easier-introduction-cuda/
//
#include <iostream>
#include <math.h>
// CUDA *kernel* function to add the elements of two arrays
//
// The __global__ specifier tells the CUDA C++ compiler that
// this is a function that runs on the GPU and can be called
// from CPU code
//
// These __global__ functions are known as *kernels*, and code
// that runs on the GPU is often called *device code*, while code
// that runs on the CPU is *host code*
//
// Here we'll *spread* calculations over *multiple threads*
// AND *multiple block* which will reduce the execution time
// even further
//
__global__ void add(int n, float* x, float* y)
{
// Properly spread calculations among threads AND blocks
//
// CUDA GPUs have many parallel processors grouped into
// Streaming Multiprocessors (SM). Each SM can run multiple
// concurrent thread blocks
// E.g. a Tesla P100 GPU based on the Pascal GPU Architecture
// has 56 SMs, each can support up to 2048 active threads
// (so 256 threads is a relatively small number of threads
// for such kind of architecture)
// Together, the blocks of parallel threads make up what is
// known as the *grid*.
// To take full advantage of all these threads, we should
// launch the kernel with multiple thread blocks
//
// CUDA C++ provides keywords that let kernels get
// the indices of the running threads.
// Specifically, threadIdx.x contains the index of
// the current thread within its block, and blockDim.x
// contains the number of threads in the block
// CUDA also provides gridDim.x, which contains the number
// of blocks in the grid, and blockIdx.x, which contains
// the index of the current thread block in the grid
//
// In CUDA kernelthe type of loop shown below is often
// *grid-stride loop*
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
//e.g.index = (2) * (256) + (3) = 515
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
// As part of the exercise, try prinf()
/*
if ( i < 1000 )
{
printf( "Thread id=%d, block id=%d \n", threadIdx.x, blockIdx.x );
}
*/
y[i] = x[i] + y[i];
}
}
int main(void)
{
// left shift by 20 bits
//
int N = 1<<20; // (more than) 1M elements
// Allocate memory in *CUDA*
// it's Unified Memory that's accessible
// from CPU and/or GPU
//
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host N elements each
//
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch the add() kernel on GPU
// the <<< >>> syntax is CUDA kernel launch(es)
// Here we pick up multiple blocks of threads,
// with 256 threads per block
// CUDA GPUs run kernels using blocks of threads
// that are a multiple of 32 in size
// Modern architrecture can support more than 2000 threads
// per block, so 256 threads is a reasonable number to choose
// We have to calculate (estimate) number of blocks needed
// to process N elements in parallel (i.e. how many blocks
// we need to get at least N threads)
// NOTE: since N is not necessarily a multiple of 256,
// we may need to round up the number of blocks
// See more comments in the add's kernel code
//
int block_size = 256;
int num_blocks = ( N + block_size -1 ) / block_size;
hipLaunchKernelGGL(( add) , dim3(num_blocks), dim3(block_size) , 0, 0, N, x, y );
// Last but not least !!!
// Tell the *CPU* to wait until the kernel is done
// before accessing results (because CUDA kernel lauches
// don't block calling CPU thread)
//
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
//
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
maxError = fmax(maxError, fabs(y[i]-3.0));
}
std::cout << "Max error: " << maxError << std::endl;
// free memory in CUDA
//
hipFree(x);
hipFree(y);
return 0;
}
| a55019dd90d8a2b50233596ab105569615bde9a3.cu | //
// Example inspired by:
// https://devblogs.nvidia.com/even-easier-introduction-cuda/
//
#include <iostream>
#include <math.h>
// CUDA *kernel* function to add the elements of two arrays
//
// The __global__ specifier tells the CUDA C++ compiler that
// this is a function that runs on the GPU and can be called
// from CPU code
//
// These __global__ functions are known as *kernels*, and code
// that runs on the GPU is often called *device code*, while code
// that runs on the CPU is *host code*
//
// Here we'll *spread* calculations over *multiple threads*
// AND *multiple block* which will reduce the execution time
// even further
//
__global__ void add(int n, float* x, float* y)
{
// Properly spread calculations among threads AND blocks
//
// CUDA GPUs have many parallel processors grouped into
// Streaming Multiprocessors (SM). Each SM can run multiple
// concurrent thread blocks
// E.g. a Tesla P100 GPU based on the Pascal GPU Architecture
// has 56 SMs, each can support up to 2048 active threads
// (so 256 threads is a relatively small number of threads
// for such kind of architecture)
// Together, the blocks of parallel threads make up what is
// known as the *grid*.
// To take full advantage of all these threads, we should
// launch the kernel with multiple thread blocks
//
// CUDA C++ provides keywords that let kernels get
// the indices of the running threads.
// Specifically, threadIdx.x contains the index of
// the current thread within its block, and blockDim.x
// contains the number of threads in the block
// CUDA also provides gridDim.x, which contains the number
// of blocks in the grid, and blockIdx.x, which contains
// the index of the current thread block in the grid
//
// In CUDA kernelthe type of loop shown below is often
// *grid-stride loop*
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
//e.g.index = (2) * (256) + (3) = 515
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
// As part of the exercise, try prinf()
/*
if ( i < 1000 )
{
printf( "Thread id=%d, block id=%d \n", threadIdx.x, blockIdx.x );
}
*/
y[i] = x[i] + y[i];
}
}
int main(void)
{
// left shift by 20 bits
//
int N = 1<<20; // (more than) 1M elements
// Allocate memory in *CUDA*
// it's Unified Memory that's accessible
// from CPU and/or GPU
//
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host N elements each
//
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch the add() kernel on GPU
// the <<< >>> syntax is CUDA kernel launch(es)
// Here we pick up multiple blocks of threads,
// with 256 threads per block
// CUDA GPUs run kernels using blocks of threads
// that are a multiple of 32 in size
// Modern architrecture can support more than 2000 threads
// per block, so 256 threads is a reasonable number to choose
// We have to calculate (estimate) number of blocks needed
// to process N elements in parallel (i.e. how many blocks
// we need to get at least N threads)
// NOTE: since N is not necessarily a multiple of 256,
// we may need to round up the number of blocks
// See more comments in the add's kernel code
//
int block_size = 256;
int num_blocks = ( N + block_size -1 ) / block_size;
add <<< num_blocks, block_size >>>( N, x, y );
// Last but not least !!!
// Tell the *CPU* to wait until the kernel is done
// before accessing results (because CUDA kernel lauches
// don't block calling CPU thread)
//
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
//
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
maxError = fmax(maxError, fabs(y[i]-3.0));
}
std::cout << "Max error: " << maxError << std::endl;
// free memory in CUDA
//
cudaFree(x);
cudaFree(y);
return 0;
}
|
8419fbbf9aa68bd13d9f7e1ef7c51f7aea89a4e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct EqualImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 == x2; }
};
} // namespace
void CudaDevice::Equal(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(EqualImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct NotEqualImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 != x2; }
};
} // namespace
void CudaDevice::NotEqual(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(NotEqualImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct GreaterImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 > x2; }
};
} // namespace
void CudaDevice::Greater(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(GreaterImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct GreaterEqualImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 >= x2; }
};
} // namespace
void CudaDevice::GreaterEqual(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(GreaterEqualImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct LogicalNotImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, bool& out) { out = !x; }
};
} // namespace
void CudaDevice::LogicalNot(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, bool>(LogicalNotImpl<T>{}, x, out);
});
}
} // namespace cuda
} // namespace chainerx
| 8419fbbf9aa68bd13d9f7e1ef7c51f7aea89a4e0.cu | #include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct EqualImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 == x2; }
};
} // namespace
void CudaDevice::Equal(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(EqualImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct NotEqualImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 != x2; }
};
} // namespace
void CudaDevice::NotEqual(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(NotEqualImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct GreaterImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 > x2; }
};
} // namespace
void CudaDevice::Greater(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(GreaterImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct GreaterEqualImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, bool& out) { out = x1 >= x2; }
};
} // namespace
void CudaDevice::GreaterEqual(const Array& x1, const Array& x2, const Array& out) {
CheckDevicesCompatible(x1, x2, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x1.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, const T, bool>(GreaterEqualImpl<T>{}, x1, x2, out);
});
}
namespace {
template <typename T>
struct LogicalNotImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, bool& out) { out = !x; }
};
} // namespace
void CudaDevice::LogicalNot(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
VisitDtype(x.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, bool>(LogicalNotImpl<T>{}, x, out);
});
}
} // namespace cuda
} // namespace chainerx
|
1bf394085b64e9b747ad2227c867fbdb69a1f197.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019-2023, Lawrence Livermore National Security, LLC and
// other Serac Project Developers. See the top-level LICENSE file for
// details.
//
// SPDX-License-Identifier: (BSD-3-Clause)
#include <gtest/gtest.h>
#include "serac/numerics/functional/tensor.hpp"
using namespace serac;
static constexpr double tolerance = 4.0e-16;
// this is intended to mimic GTEST's EXPECT_LT macro, except
// that it works inside a cuda kernel. This macro prints the error message
// on a failed test, and sets an error flag (requires an `int * error` to be
// in the scope where the macro is used)
#define CUDA_EXPECT_LT(value, threshold) \
if (value >= threshold) { \
printf("%s:%d: Failure\n", __FILE__, __LINE__); \
printf("Expected less than %f, actual: %f\n", threshold, value); \
*error = 1; \
}
__global__ void basic_operations(int* error)
{
auto I = Identity<3>();
auto abs = [](auto x) { return (x < 0) ? -x : x; };
tensor<double, 3> u = {1, 2, 3};
tensor<double, 4> v = {4, 5, 6, 7};
// for some reason make_tensor(...) is producing a compiler error about
// "calling constexpr __device__ function from __host__ __device__".
// I have a minimal reproducer for NVIDIA to investigate
tensor<double, 3, 3> A = {{
{0.0, 2.0, 4.0},
{1.0, 3.0, 5.0},
{2.0, 4.0, 6.0},
}};
double squared_normA = 111.0;
CUDA_EXPECT_LT(abs(squared_norm(A) - squared_normA), tolerance);
tensor<double, 3, 3> symA = {{{0, 1.5, 3}, {1.5, 3, 4.5}, {3, 4.5, 6}}};
CUDA_EXPECT_LT(abs(squared_norm(sym(A) - symA)), tolerance);
tensor<double, 3, 3> devA = {{{-3, 2, 4}, {1, 0, 5}, {2, 4, 3}}};
CUDA_EXPECT_LT(abs(squared_norm(dev(A) - devA)), tolerance);
tensor<double, 3, 3> invAp1 = {{{-4, -1, 3}, {-1.5, 0.5, 0.5}, {2, 0, -1}}};
CUDA_EXPECT_LT(abs(squared_norm(inv(A + I) - invAp1)), tolerance);
tensor<double, 3> Au = {16, 22, 28};
CUDA_EXPECT_LT(abs(squared_norm(dot(A, u) - Au)), tolerance);
tensor<double, 3> uA = {8, 20, 32};
CUDA_EXPECT_LT(abs(squared_norm(dot(u, A) - uA)), tolerance);
double uAu = 144;
CUDA_EXPECT_LT(abs(dot(u, A, u) - uAu), tolerance);
tensor<double, 3, 4> B = {{{0.0, -1.0, -2.0, -3.0}, {3.0, 2.0, 1.0, 0.0}, {6.0, 5.0, 4.0, 3.0}}};
double uBv = 300;
CUDA_EXPECT_LT(abs(dot(u, B, v) - uBv), tolerance);
}
TEST(Tensor, BasicOperations)
{
int* error;
hipMallocManaged(&error, sizeof(int));
*error = 0;
hipLaunchKernelGGL(( basic_operations), dim3(1), dim3(1), 0, 0, error);
hipDeviceSynchronize();
EXPECT_EQ(*error, 0);
hipFree(error);
}
__global__ void elasticity(int* error)
{
auto I = Identity<3>();
static auto abs = [](auto x) { return (x < 0) ? -x : x; };
double lambda = 5.0;
double mu = 3.0;
tensor<double, 3, 3, 3, 3> C;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
C(i, j, k, l) = lambda * (i == j) * (k == l) + mu * ((i == k) * (j == l) + (i == l) * (j == k));
}
}
}
}
auto sigma = [=](auto epsilon) { return lambda * tr(epsilon) * I + 2.0 * mu * epsilon; };
tensor<double, 3, 3> grad_u = {{
{0.0, 2.0, 4.0},
{1.0, 3.0, 5.0},
{2.0, 4.0, 6.0},
}};
CUDA_EXPECT_LT(abs(squared_norm(double_dot(C, sym(grad_u)) - sigma(sym(grad_u)))), tolerance);
auto epsilon = sym(make_dual(grad_u));
tensor dsigma_depsilon = get_gradient(sigma(epsilon));
CUDA_EXPECT_LT(abs(squared_norm(dsigma_depsilon - C)), tolerance);
}
TEST(Tensor, Elasticity)
{
int* error;
hipMallocManaged(&error, sizeof(int));
*error = 0;
hipLaunchKernelGGL(( elasticity), dim3(1), dim3(1), 0, 0, error);
hipDeviceSynchronize();
EXPECT_EQ(*error, 0);
hipFree(error);
}
__global__ void navier_stokes(int* error)
{
auto I = Identity<3>();
static auto abs = [](auto x) { return (x < 0) ? -x : x; };
static constexpr double rho = 3.0;
static constexpr double mu = 2.0;
auto sigma = [&](auto p, auto v, auto L) { return rho * outer(v, v) + 2.0 * mu * sym(L) - p * I; };
auto dsigma_dp = [&](auto /*p*/, auto /*v*/, auto /*L*/) { return -1.0 * I; };
auto dsigma_dv = [&](auto /*p*/, auto v, auto /*L*/) {
tensor<double, 3, 3, 3> A{};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
A(i, j, k) = rho * ((i == k) * v[j] + (j == k) * v[i]);
}
}
}
return A;
};
auto dsigma_dL = [&](auto /*p*/, auto /*v*/, auto /*L*/) {
tensor<double, 3, 3, 3, 3> A{};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
A(i, j, k, l) = mu * ((i == k) * (j == l) + (i == l) * (j == k));
}
}
}
}
return A;
};
double p = 3.14;
tensor v = {{1.0, 2.0, 3.0}};
tensor<double, 3, 3> L = {{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}};
{
auto exact = dsigma_dp(p, v, L);
auto ad = get_gradient(sigma(make_dual(p), v, L));
CUDA_EXPECT_LT(abs(squared_norm(exact - ad)), tolerance);
}
{
auto exact = dsigma_dv(p, v, L);
auto ad = get_gradient(sigma(p, make_dual(v), L));
CUDA_EXPECT_LT(abs(squared_norm(exact - ad)), tolerance);
}
{
auto exact = dsigma_dL(p, v, L);
auto ad = get_gradient(sigma(p, v, make_dual(L)));
CUDA_EXPECT_LT(abs(squared_norm(exact - ad)), tolerance);
}
}
TEST(Tensor, NavierStokes)
{
int* error;
hipMallocManaged(&error, sizeof(int));
*error = 0;
hipLaunchKernelGGL(( navier_stokes), dim3(1), dim3(1), 0, 0, error);
hipDeviceSynchronize();
EXPECT_EQ(*error, 0);
hipFree(error);
}
__global__ void isotropic_operations(int* error)
{
auto I = Identity<3>();
double lambda = 5.0;
double mu = 3.0;
tensor<double, 3> u = {1, 2, 3};
tensor<double, 3, 3> A = {{
{0.0, 2.0, 4.0},
{1.0, 3.0, 5.0},
{2.0, 4.0, 6.0},
}};
CUDA_EXPECT_LT(abs(squared_norm(dot(I, u) - u)), tolerance);
CUDA_EXPECT_LT(abs(squared_norm(dot(u, I) - u)), tolerance);
CUDA_EXPECT_LT(abs(squared_norm(dot(I, A) - A)), tolerance);
CUDA_EXPECT_LT(abs(squared_norm(dot(A, I) - A)), tolerance);
CUDA_EXPECT_LT(double_dot(I, A) - tr(A), tolerance);
auto sigma = [=](auto epsilon) { return lambda * tr(epsilon) * I + 2.0 * mu * epsilon; };
isotropic_tensor<double, 3, 3, 3, 3> C{lambda, 2 * mu, 0.0};
auto strain = sym(A);
CUDA_EXPECT_LT(squared_norm(double_dot(C, strain) - sigma(strain)), tolerance);
CUDA_EXPECT_LT(det(I) - 1, tolerance);
CUDA_EXPECT_LT(tr(I) - 3, tolerance);
CUDA_EXPECT_LT(squared_norm(sym(I) - I), tolerance);
}
TEST(Tensor, IsotropicOperations)
{
int* error;
hipMallocManaged(&error, sizeof(int));
*error = 0;
hipLaunchKernelGGL(( isotropic_operations), dim3(1), dim3(1), 0, 0, error);
hipDeviceSynchronize();
EXPECT_EQ(*error, 0);
hipFree(error);
}
| 1bf394085b64e9b747ad2227c867fbdb69a1f197.cu | // Copyright (c) 2019-2023, Lawrence Livermore National Security, LLC and
// other Serac Project Developers. See the top-level LICENSE file for
// details.
//
// SPDX-License-Identifier: (BSD-3-Clause)
#include <gtest/gtest.h>
#include "serac/numerics/functional/tensor.hpp"
using namespace serac;
static constexpr double tolerance = 4.0e-16;
// this is intended to mimic GTEST's EXPECT_LT macro, except
// that it works inside a cuda kernel. This macro prints the error message
// on a failed test, and sets an error flag (requires an `int * error` to be
// in the scope where the macro is used)
#define CUDA_EXPECT_LT(value, threshold) \
if (value >= threshold) { \
printf("%s:%d: Failure\n", __FILE__, __LINE__); \
printf("Expected less than %f, actual: %f\n", threshold, value); \
*error = 1; \
}
__global__ void basic_operations(int* error)
{
auto I = Identity<3>();
auto abs = [](auto x) { return (x < 0) ? -x : x; };
tensor<double, 3> u = {1, 2, 3};
tensor<double, 4> v = {4, 5, 6, 7};
// for some reason make_tensor(...) is producing a compiler error about
// "calling constexpr __device__ function from __host__ __device__".
// I have a minimal reproducer for NVIDIA to investigate
tensor<double, 3, 3> A = {{
{0.0, 2.0, 4.0},
{1.0, 3.0, 5.0},
{2.0, 4.0, 6.0},
}};
double squared_normA = 111.0;
CUDA_EXPECT_LT(abs(squared_norm(A) - squared_normA), tolerance);
tensor<double, 3, 3> symA = {{{0, 1.5, 3}, {1.5, 3, 4.5}, {3, 4.5, 6}}};
CUDA_EXPECT_LT(abs(squared_norm(sym(A) - symA)), tolerance);
tensor<double, 3, 3> devA = {{{-3, 2, 4}, {1, 0, 5}, {2, 4, 3}}};
CUDA_EXPECT_LT(abs(squared_norm(dev(A) - devA)), tolerance);
tensor<double, 3, 3> invAp1 = {{{-4, -1, 3}, {-1.5, 0.5, 0.5}, {2, 0, -1}}};
CUDA_EXPECT_LT(abs(squared_norm(inv(A + I) - invAp1)), tolerance);
tensor<double, 3> Au = {16, 22, 28};
CUDA_EXPECT_LT(abs(squared_norm(dot(A, u) - Au)), tolerance);
tensor<double, 3> uA = {8, 20, 32};
CUDA_EXPECT_LT(abs(squared_norm(dot(u, A) - uA)), tolerance);
double uAu = 144;
CUDA_EXPECT_LT(abs(dot(u, A, u) - uAu), tolerance);
tensor<double, 3, 4> B = {{{0.0, -1.0, -2.0, -3.0}, {3.0, 2.0, 1.0, 0.0}, {6.0, 5.0, 4.0, 3.0}}};
double uBv = 300;
CUDA_EXPECT_LT(abs(dot(u, B, v) - uBv), tolerance);
}
TEST(Tensor, BasicOperations)
{
int* error;
cudaMallocManaged(&error, sizeof(int));
*error = 0;
basic_operations<<<1, 1>>>(error);
cudaDeviceSynchronize();
EXPECT_EQ(*error, 0);
cudaFree(error);
}
__global__ void elasticity(int* error)
{
auto I = Identity<3>();
static auto abs = [](auto x) { return (x < 0) ? -x : x; };
double lambda = 5.0;
double mu = 3.0;
tensor<double, 3, 3, 3, 3> C;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
C(i, j, k, l) = lambda * (i == j) * (k == l) + mu * ((i == k) * (j == l) + (i == l) * (j == k));
}
}
}
}
auto sigma = [=](auto epsilon) { return lambda * tr(epsilon) * I + 2.0 * mu * epsilon; };
tensor<double, 3, 3> grad_u = {{
{0.0, 2.0, 4.0},
{1.0, 3.0, 5.0},
{2.0, 4.0, 6.0},
}};
CUDA_EXPECT_LT(abs(squared_norm(double_dot(C, sym(grad_u)) - sigma(sym(grad_u)))), tolerance);
auto epsilon = sym(make_dual(grad_u));
tensor dsigma_depsilon = get_gradient(sigma(epsilon));
CUDA_EXPECT_LT(abs(squared_norm(dsigma_depsilon - C)), tolerance);
}
TEST(Tensor, Elasticity)
{
int* error;
cudaMallocManaged(&error, sizeof(int));
*error = 0;
elasticity<<<1, 1>>>(error);
cudaDeviceSynchronize();
EXPECT_EQ(*error, 0);
cudaFree(error);
}
__global__ void navier_stokes(int* error)
{
auto I = Identity<3>();
static auto abs = [](auto x) { return (x < 0) ? -x : x; };
static constexpr double rho = 3.0;
static constexpr double mu = 2.0;
auto sigma = [&](auto p, auto v, auto L) { return rho * outer(v, v) + 2.0 * mu * sym(L) - p * I; };
auto dsigma_dp = [&](auto /*p*/, auto /*v*/, auto /*L*/) { return -1.0 * I; };
auto dsigma_dv = [&](auto /*p*/, auto v, auto /*L*/) {
tensor<double, 3, 3, 3> A{};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
A(i, j, k) = rho * ((i == k) * v[j] + (j == k) * v[i]);
}
}
}
return A;
};
auto dsigma_dL = [&](auto /*p*/, auto /*v*/, auto /*L*/) {
tensor<double, 3, 3, 3, 3> A{};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
A(i, j, k, l) = mu * ((i == k) * (j == l) + (i == l) * (j == k));
}
}
}
}
return A;
};
double p = 3.14;
tensor v = {{1.0, 2.0, 3.0}};
tensor<double, 3, 3> L = {{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}};
{
auto exact = dsigma_dp(p, v, L);
auto ad = get_gradient(sigma(make_dual(p), v, L));
CUDA_EXPECT_LT(abs(squared_norm(exact - ad)), tolerance);
}
{
auto exact = dsigma_dv(p, v, L);
auto ad = get_gradient(sigma(p, make_dual(v), L));
CUDA_EXPECT_LT(abs(squared_norm(exact - ad)), tolerance);
}
{
auto exact = dsigma_dL(p, v, L);
auto ad = get_gradient(sigma(p, v, make_dual(L)));
CUDA_EXPECT_LT(abs(squared_norm(exact - ad)), tolerance);
}
}
TEST(Tensor, NavierStokes)
{
int* error;
cudaMallocManaged(&error, sizeof(int));
*error = 0;
navier_stokes<<<1, 1>>>(error);
cudaDeviceSynchronize();
EXPECT_EQ(*error, 0);
cudaFree(error);
}
__global__ void isotropic_operations(int* error)
{
auto I = Identity<3>();
double lambda = 5.0;
double mu = 3.0;
tensor<double, 3> u = {1, 2, 3};
tensor<double, 3, 3> A = {{
{0.0, 2.0, 4.0},
{1.0, 3.0, 5.0},
{2.0, 4.0, 6.0},
}};
CUDA_EXPECT_LT(abs(squared_norm(dot(I, u) - u)), tolerance);
CUDA_EXPECT_LT(abs(squared_norm(dot(u, I) - u)), tolerance);
CUDA_EXPECT_LT(abs(squared_norm(dot(I, A) - A)), tolerance);
CUDA_EXPECT_LT(abs(squared_norm(dot(A, I) - A)), tolerance);
CUDA_EXPECT_LT(double_dot(I, A) - tr(A), tolerance);
auto sigma = [=](auto epsilon) { return lambda * tr(epsilon) * I + 2.0 * mu * epsilon; };
isotropic_tensor<double, 3, 3, 3, 3> C{lambda, 2 * mu, 0.0};
auto strain = sym(A);
CUDA_EXPECT_LT(squared_norm(double_dot(C, strain) - sigma(strain)), tolerance);
CUDA_EXPECT_LT(det(I) - 1, tolerance);
CUDA_EXPECT_LT(tr(I) - 3, tolerance);
CUDA_EXPECT_LT(squared_norm(sym(I) - I), tolerance);
}
TEST(Tensor, IsotropicOperations)
{
int* error;
cudaMallocManaged(&error, sizeof(int));
*error = 0;
isotropic_operations<<<1, 1>>>(error);
cudaDeviceSynchronize();
EXPECT_EQ(*error, 0);
cudaFree(error);
}
|
bde1f57b74b41a4a1af13fe060a8358b6afe2336.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/NumericLimits.cuh>
#include <THH/THH.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + ::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = ::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + ::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
__syncwarp(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal1 = r1(warpVal1, smem[lane * 32 + i]);
warpVal2 = r2(warpVal2, smem[lane * 32 + i + blockDim.x]);
}
__syncwarp(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes,
const float smoothing)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss with label smoothing
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = (max_k + ::log(sumAll) - sum_k / classes) \
* smoothing - log_prob * (1 - smoothing);
max_log_sum_exp[blockIdx.x] = max_k + ::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const float smoothing,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>)
, dim3(grid), dim3(block), 2 * block.x * sizeof(accscalar_t), stream,
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), 2 * block.x * sizeof(accscalar_t), stream,
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
}
);
THCudaCheck(hipGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
}
);
THCudaCheck(hipGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const float smoothing, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, smoothing, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && logits.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, smoothing, half_to_float);
}
| bde1f57b74b41a4a1af13fe060a8358b6afe2336.cu | /**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <THC/THC.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
__syncwarp(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal1 = r1(warpVal1, smem[lane * 32 + i]);
warpVal2 = r2(warpVal2, smem[lane * 32 + i + blockDim.x]);
}
__syncwarp(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes,
const float smoothing)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss with label smoothing
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = (max_k + std::log(sumAll) - sum_k / classes) \
* smoothing - log_prob * (1 - smoothing);
max_log_sum_exp[blockIdx.x] = max_k + std::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
std::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const float smoothing,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
} else {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
}
);
THCudaCheck(cudaGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
} else {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
}
);
THCudaCheck(cudaGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const float smoothing, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, smoothing, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && logits.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, smoothing, half_to_float);
}
|
bfe2a521082b574d65b06a0c965dddcf35eab47d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// adaptation of Pavel's imreconstruction code for openCV
#include "change_kernel.cuh"
#include <sys/time.h>
#include <stdio.h>
#define MAX_THREADS 256
#define YX_THREADS 64
#define YY_THREADS 4
#define X_THREADS 32
#define Y_THREADS 64
#define XX_THREADS 4
#define XY_THREADS 64
#define NEQ(a,b) ( (a) != (b) )
#define WARP_SIZE 32
long ClockGetTime()
{
struct timeval ts;
gettimeofday(&ts, NULL);
// timespec ts;
// clock_gettime(CLOCK_REALTIME, &ts);
return (ts.tv_sec*1000000 + (ts.tv_usec))/1000LL;
// return (uint64_t)ts.tv_sec * 1000000LL + (uint64_t)ts.tv_nsec / 1000LL;
}
namespace nscale { namespace gpu {
////////////////////////////////////////////////////////////////////////////////
// RECONSTRUCTION BY DILATION
////////////////////////////////////////////////////////////////////////////////
/*
* fast code
*/
//template <typename T>
//__global__ void
//iRec1DForward_X_dilation2 (T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
//{
//
// const int ty = threadIdx.x;
// const int by = blockIdx.x * blockDim.x;
//
// volatile __shared__ T s_marker[Y_THREADS][Y_THREADS+1];
// volatile __shared__ T s_mask [Y_THREADS][Y_THREADS+1];
// bool s_change = false;
//
//
//
// int startx, iy, ix;
//
// T s_old;
// // the increment allows overlap by 1 between iterations to move the data to next block.
// for (startx = 0; startx < sx - Y_THREADS; startx += Y_THREADS - 1) {
// // copy part of marker and mask to shared memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// // this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
// if (by + ty < sy) {
// for (ix = 1; ix < Y_THREADS; ++ix) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix-1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// }
// __syncthreads();
//
// }
//
// startx = sx - Y_THREADS;
//
// // copy part of marker and mask to shared memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration
// if (by + ty < sy) {
// for (ix = 1; ix < Y_THREADS; ++ix) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix-1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// if (s_change) *change = true;
// }
// __syncthreads();
//
//
//}
//
//template <typename T>
//__global__ void
//iRec1DBackward_X_dilation2 (T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
//{
//
// const int ty = threadIdx.x;
// const int by = blockIdx.x * Y_THREADS;
// // always 0. const int bz = blockIdx.y;
//
// volatile __shared__ T s_marker[Y_THREADS][Y_THREADS+1];
// volatile __shared__ T s_mask [Y_THREADS][Y_THREADS+1];
// bool s_change = false;
//
//
//
// int startx;
//
// T s_old;
// for (startx = sx - Y_THREADS; startx > 0; startx -= Y_THREADS - 1) {
//
// // copy part of marker and mask to shared memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration
// if (by + ty < sy) {
// for (int ix = Y_THREADS - 2; ix >= 0; ix--) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix+1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// }
// __syncthreads();
//
// }
//
// startx = 0;
//
// // copy part of marker and mask to shared memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration
// if (by + ty < sy) {
// for (int ix = Y_THREADS - 2; ix >= 0; ix--) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix+1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// if (s_change) *change = true;
// }
// __syncthreads();
//
//
//
//}
////////////////////////////////////////////////////////////////////////////////
// RECONSTRUCTION BY DILATION
////////////////////////////////////////////////////////////////////////////////
/*
* warp = 32. shared memory in banks of 32, each 32 bits (128 bytes wide) - interleave of 4 for rows? no need. compute 2 has no conflict for read/write bytes.
* global memory in partitions of 256 bytes. 1 warp at a time at 1, 2, 4, 8, or 16 bytes. width of array and threadblock = warpsize * c,
* try to remove syncthreads by making sure warps do not diverge(and use volatile)
* thread id = x + y * Dx. so this means if x and y are swapped between mem and compute steps, must have sync...
* IF 32 x 8 theads, repeat 4 times in y. read single char from global, then swap x and y to process 32 y at a time, would need to syncthread inside iterations. can use 1 warp to go through all shared mem iteratively, or have each warp compute 4 bytes 4 columns (warps are ordered)
* IF 8x4 or 4x8 threads for a warp, read 1 bytes from global (linearize the warp thread id (e.g. x + y*8 or x+y*4) to read from global sequentially, and repeat 4 or 8 times) then process the memory for this warp 4 y or 8 y iteratively, repeat for all x chunks. essentially the original algorithm. then create threadblock that is just multiplied in y to reach 192 or 256. avoids syncthreads completely.
* or alternatively, treat each warp as 4x8, and each x process columns 8 apart. each warp then do 4 bytes, (8 warps), to generate 8x8 blocks that are completed. - no synthreads needed. - no... would require more kernel iterations
for backward: thread ids should map to the data - so first thread has the last data.... ( for correctness)
for y, similar to this...
for register usage: use unsigned int where possible. maybe use 1D shared array would be better too...
*/
template <typename T>
__global__ void
iRec1DForward_X_dilation ( T* marker, const T* mask, const unsigned int sx, const unsigned int sy, bool* change )
{
const unsigned int x = (threadIdx.x + threadIdx.y * XX_THREADS) % WARP_SIZE;
const unsigned int y = (threadIdx.x + threadIdx.y * XX_THREADS) / WARP_SIZE;
const unsigned int ychunk = WARP_SIZE / XX_THREADS;
const unsigned int xstop = sx - WARP_SIZE;
// printf("(tx, ty) -> (x, y) : (%d, %d)->(%d,%d)\n", threadIdx.x, threadIdx.y, x, y);
// XY_THREADS should be 32==warpSize, XX_THREADS should be 4 or 8.
// init to 0...
volatile __shared__ T s_marker[XY_THREADS][WARP_SIZE+1];
volatile __shared__ T s_mask [XY_THREADS][WARP_SIZE+1];
volatile unsigned int s_change = 0;
T s_old, s_new;
unsigned int startx;
unsigned int start;
s_marker[threadIdx.y][WARP_SIZE] = 0; // only need x=0 to be 0
// the increment allows overlap by 1 between iterations to move the data to next block.
for (startx = 0; startx < xstop; startx += WARP_SIZE) {
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
s_marker[threadIdx.y][0] = s_marker[threadIdx.y][WARP_SIZE];
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x+1] = marker[start + i*sx];
s_mask [y * ychunk+i][x+1] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
// if (threadIdx.x == 0) { // have all threads do the same work
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (unsigned int i = 1; i <= WARP_SIZE; ++i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i-1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x+1];
}
// printf("startx: %d, change = %d\n", startx, s_change);
}
if (startx < sx) {
s_marker[threadIdx.y][0] = s_marker[threadIdx.y][sx-startx]; // getting ix-1st entry, which has been offsetted by 1 in s_marker
// shared mem copy
startx = sx - WARP_SIZE;
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x+1] = marker[start + i*sx];
s_mask [y * ychunk+i][x+1] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (unsigned int i = 1; i <= WARP_SIZE; ++i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i-1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x+1];
}
}
// __syncthreads();
if (s_change > 0) *change = true;
// __syncthreads();
}
template <typename T>
__global__ void
iRec1DBackward_X_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
{
const unsigned int x = (threadIdx.x + threadIdx.y * XX_THREADS) % WARP_SIZE;
const unsigned int y = (threadIdx.x + threadIdx.y * XX_THREADS) / WARP_SIZE;
const unsigned int ychunk = WARP_SIZE / XX_THREADS;
const unsigned int xstop = sx - WARP_SIZE;
// printf("(tx, ty) -> (x, y) : (%d, %d)->(%d,%d)\n", threadIdx.x, threadIdx.y, x, y);
// XY_THREADS should be 32==warpSize, XX_THREADS should be 4 or 8.
// init to 0...
volatile __shared__ T s_marker[XY_THREADS][WARP_SIZE+1];
volatile __shared__ T s_mask [XY_THREADS][WARP_SIZE+1];
volatile unsigned int s_change = 0;
T s_old, s_new;
int startx;
unsigned int start;
s_marker[threadIdx.y][0] = 0; // only need x=WARPSIZE to be 0
// the increment allows overlap by 1 between iterations to move the data to next block.
for (startx = xstop; startx > 0; startx -= WARP_SIZE) {
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
s_marker[threadIdx.y][WARP_SIZE] = s_marker[threadIdx.y][0];
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x] = marker[start + i*sx];
s_mask [y * ychunk+i][x] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
// if (threadIdx.x == 0) { // have all threads do the same work
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (int i = WARP_SIZE - 1; i >= 0; --i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i+1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x];
}
// printf("startx: %d, change = %d\n", startx, s_change);
}
if (startx <= 0) {
s_marker[threadIdx.y][WARP_SIZE] = s_marker[threadIdx.y][-startx]; // getting ix-1st entry, which has been offsetted by 1 in s_marker
// shared mem copy
startx = 0;
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x] = marker[start + i*sx];
s_mask [y * ychunk+i][x] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (int i = WARP_SIZE - 1; i >= 0; --i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i+1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x];
}
}
// __syncthreads();
if (s_change > 0) *change = true;
// __syncthreads();
}
template <typename T>
__global__ void
iRec1DForward_Y_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
{
// parallelize along x.
const int tx = threadIdx.x;
const int bx = blockIdx.x * MAX_THREADS;
unsigned int s_change = 0;
T s_old, s_new, s_prev;
if ( (bx + tx) < sx ) {
s_prev = 0;
for (int iy = 0; iy < sy; ++iy) {
// copy part of marker and mask to shared memory
s_old = marker[iy * sx + bx + tx];
// perform iteration
s_new = min( max( s_prev, s_old ), mask[iy * sx + bx + tx] );
s_change |= s_old ^ s_new;
s_prev = s_new;
// output result back to global memory
marker[iy * sx + bx + tx] = s_new;
}
}
if (s_change != 0) *change = true;
}
template <typename T>
__global__ void
iRec1DBackward_Y_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const unsigned int sx, const unsigned int sy, bool* __restrict__ change )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x * MAX_THREADS;
unsigned int s_change=0;
T s_old, s_new, s_prev;
if ( (bx + tx) < sx ) {
s_prev = 0;
for (int iy = sy - 1; iy >= 0; --iy) {
// copy part of marker and mask to shared memory
s_old = marker[iy * sx + bx + tx];
// perform iteration
s_new = min( max( s_prev, s_old ), mask[iy * sx + bx + tx] );
s_change |= s_old ^ s_new;
s_prev = s_new;
// output result back to global memory
marker[iy * sx + bx + tx] = s_new;
}
}
if (s_change != 0) *change = true;
}
template <typename T>
__global__ void
iRec1DForward_Y_dilation_8 ( T* __restrict__ marker, const T* __restrict__ mask, const unsigned int sx, const unsigned int sy, bool* __restrict__ change )
{
// best thing to do is to use linear arrays. each warp does a column of 32.
// parallelize along x.
const unsigned int tx = threadIdx.x;
const unsigned int bx = blockIdx.x * MAX_THREADS;
volatile __shared__ T s_marker_B[MAX_THREADS+2];
// volatile T* s_marker = s_marker_B + 1;
unsigned int s_change = 0;
int tx1 = tx + 1;
T s_new, s_old, s_prev;
if ( bx + tx < sx ) { // make sure number of threads is a divisor of sx.
s_prev = 0;
for (int iy = 0; iy < sy; ++iy) {
// copy part of marker and mask to shared memory
if (tx == 0) {
s_marker_B[0] = (bx == 0) ? 0 : marker[iy*sx + bx - 1];
s_marker_B[MAX_THREADS + 1] = (bx + MAX_THREADS >= sx) ? 0 : marker[iy*sx + bx + MAX_THREADS];
}
if (tx < WARP_SIZE) {
// first warp, get extra stuff
s_marker_B[tx1] = marker[iy*sx + bx + tx];
}
if (tx < MAX_THREADS - WARP_SIZE) {
s_marker_B[tx1 + WARP_SIZE] = marker[iy*sx + bx + tx + WARP_SIZE];
}
__syncthreads();
// perform iteration
s_old = s_marker_B[tx1];
s_new = min( max( s_prev, s_old ), mask[iy*sx + bx + tx]);
s_change |= s_old ^ s_new;
// output result back to global memory
s_marker_B[tx1] = s_new;
marker[iy*sx + bx + tx] = s_new;
__syncthreads();
s_prev = max( max(s_marker_B[tx1-1], s_marker_B[tx1]), s_marker_B[tx1+1]);
}
}
if (s_change != 0) *change = true;
}
template <typename T>
__global__ void
iRec1DBackward_Y_dilation_8 ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x * MAX_THREADS;
volatile __shared__ T s_marker_B[MAX_THREADS+2];
// volatile T* s_marker = s_marker_B + 1;
unsigned int s_change = 0;
int tx1 = tx + 1; // for accessing s_marker_B
T s_new, s_old, s_prev;
if ( bx + tx < sx ) { //make sure number of threads is a divisor of sx.
s_prev = 0;
for (int iy = sy - 1; iy >= 0; --iy) {
if (tx == 0) {
s_marker_B[0] = (bx == 0) ? 0 : marker[iy*sx + bx - 1];
s_marker_B[MAX_THREADS+1] = (bx + MAX_THREADS >= sx) ? 0 : marker[iy*sx + bx + MAX_THREADS];
}
if (tx < WARP_SIZE) {
// first warp, get extra stuff
s_marker_B[tx1] = marker[iy*sx + bx + tx];
}
if (tx < MAX_THREADS - WARP_SIZE) {
s_marker_B[tx1 + WARP_SIZE] = marker[iy*sx + bx + tx + WARP_SIZE];
}
__syncthreads();
// perform iteration
s_old = s_marker_B[tx1];
s_new = min( max( s_prev, s_old ), mask[iy*sx + bx + tx]);
s_change |= s_old ^ s_new;
// output result back to global memory
s_marker_B[tx1] = s_new;
marker[iy*sx + bx + tx] = s_new;
__syncthreads();
s_prev = max( max(s_marker_B[tx1-1], s_marker_B[tx1]), s_marker_B[tx1+1]);
}
}
if (s_change != 0) *change = true;
}
// connectivity: if 8 conn, need to have border.
template <typename T>
unsigned int imreconstructIntCaller(T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy,
const int connectivity, hipStream_t stream) {
//, unsigned char*h_markerFistPass) {
// here because we are not using streams inside.
// if (stream == 0) hipDeviceSynchronize();
// else hipStreamSynchronize(stream);
// printf("entering imrecon int caller with conn=%d\n", connectivity);
// setup execution parameters
bool conn8 = (connectivity == 8);
dim3 threadsx( XX_THREADS, XY_THREADS );
dim3 blocksx( (sy + threadsx.y - 1) / threadsx.y );
// dim3 threadsx2( Y_THREADS );
// dim3 blocksx2( (sy + threadsx2.y - 1) / threadsx2.y );
dim3 threadsy( MAX_THREADS );
dim3 blocksy( (sx + threadsy.x - 1) / threadsy.x );
// dim3 threadsy2( YX_THREADS, YY_THREADS );
// dim3 blocksy2( (sx + threadsy2.x - 1) / threadsy2.x, (sy + threadsy2.y - 1) / threadsy2.y );
// size_t Nsy = (threadsy.x * 3 + 2) * sizeof(uchar4);
// stability detection
unsigned int iter = 0;
bool *h_change, *d_change;
h_change = (bool*) malloc( sizeof(bool) );
hipMalloc( (void**) &d_change, sizeof(bool) ) ;
*h_change = true;
// printf("completed setup for imrecon int caller \n");
//long t1, t2;
if (conn8) {
while ( (*h_change) && (iter < 100000) ) // repeat until stability
{
// t1 = ClockGetTime();
iter++;
*h_change = false;
hipLaunchKernelGGL(( init_change), dim3(1), dim3(1), 0, stream, d_change );
// dopredny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DForward_X_dilation) , dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// iRec1DForward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DForward_Y_dilation_8), dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DBackward_X_dilation), dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// iRec1DBackward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DBackward_Y_dilation_8), dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
if (stream == 0) hipDeviceSynchronize();
else hipStreamSynchronize(stream);
// printf("%d sync \n", iter);
hipMemcpy( h_change, d_change, sizeof(bool), hipMemcpyDeviceToHost ) ;
// printf("%d read flag : value %s\n", iter, (*h_change ? "true" : "false"));
// t2 = ClockGetTime();
//
// if (iter == 1) {
//
// hipMemcpy( h_markerFistPass, marker, sizeof(unsigned char) * sx * sy, hipMemcpyDeviceToHost ) ;
// printf("first pass 8conn == scan, %lu ms\n", t2-t1);
// }
}
} else {
while ( (*h_change) && (iter < 100000) ) // repeat until stability
{
// t1 = ClockGetTime();
iter++;
*h_change = false;
hipLaunchKernelGGL(( init_change), dim3(1), dim3(1), 0, stream, d_change );
// dopredny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DForward_X_dilation) , dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// iRec1DForward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DForward_Y_dilation) , dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DBackward_X_dilation), dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// iRec1DBackward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DBackward_Y_dilation), dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
if (stream == 0) hipDeviceSynchronize();
else hipStreamSynchronize(stream);
// printf("%d sync \n", iter);
hipMemcpy( h_change, d_change, sizeof(bool), hipMemcpyDeviceToHost ) ;
// printf("%d read flag : value %s\n", iter, (*h_change ? "true" : "false"));
// t2 = ClockGetTime();
// if (iter == 1) {
// // hipMemcpy( h_markerFistPass, marker, sizeof(unsigned char) * sx * sy, hipMemcpyDeviceToDevice ) ;
// printf("first pass 4conn == scan, %lu ms\n", t2-t1);
//// break;
// }
}
}
hipFree(d_change) ;
free(h_change);
// printf("Number of iterations: %d\n", iter);
hipGetLastError();
return iter;
}
//
//__device__ bool checkCandidateNeighbor4(unsigned char *marker, const unsigned char *mask, int x, int y, int ncols, int nrows,unsigned char pval){
// bool isCandidate = false;
// int index = 0;
//
// unsigned char markerXYval;
// unsigned char maskXYval;
// if(x < (ncols-1)){
// // check right pixel
// index = y * ncols + (x+1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval, maskXYval)) ){
// isCandidate = true;
// }
// }
//
// if(y < (nrows-1)){
// // check pixel bellow current
// index = (y+1) * ncols + x;
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check left pixel
// if(x > 0){
// index = y * ncols + (x-1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// if(y > 0){
// // check up pixel
// index = (y-1) * ncols + x;
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
// return isCandidate;
//}
//
//__device__ bool checkCandidateNeighbor8(unsigned char *marker, const unsigned char *mask, int x, int y, int ncols, int nrows,unsigned char pval){
// int index = 0;
// bool isCandidate = checkCandidateNeighbor4(marker, mask, x, y, ncols, nrows, pval);
//// if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0){
//// printf("checkCandidateNeighbor8\n");
//// }
//
// unsigned char markerXYval;
// unsigned char maskXYval;
//
// // check up right corner
// if(x < (ncols-1) && y > 0){
// // check right pixel
// index = (y-1) * ncols + (x+1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval, maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check up left corner
// if(x> 0 && y > 0){
// // check pixel bellow current
// index = (y-1) * ncols + (x-1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check bottom left pixel
// if(x > 0 && y < (nrows-1)){
// index = (y+1) * ncols + (x-1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check bottom right
// if(x < (ncols-1) && y < (nrows-1)){
// index = (y+1) * ncols + (x+1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
// return isCandidate;
//}
//
//
//__global__ void initQueuePixels(unsigned char *marker, const unsigned char *mask, int sx, int sy, bool conn8, int *d_queue, int *d_queue_size){
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
//
// // if it is inside image without right/bottom borders
// if(y < (sy) && x < (sx)){
// int input_index = y * sy + x;
// unsigned char pval = marker[input_index];
// bool isCandidate = false;
// if(conn8){
// // connectivity 8
// isCandidate = checkCandidateNeighbor8(marker, mask, x, y, sx, sy, pval);
// }else{
// // connectivity 4
// isCandidate = checkCandidateNeighbor4(marker, mask, x, y, sx, sy, pval);
// }
// if(isCandidate){
// int queuePos = atomicAdd((unsigned int*)d_queue_size, 1);
// d_queue[queuePos] = input_index;
// }
// }
//}
//
template <typename T>
__device__ bool checkCandidateNeighbor4(T *marker, const T *mask, int x, int y, int ncols, int nrows, T pval){
bool isCandidate = false;
int index = 0;
T markerXYval;
T maskXYval;
if(x < (ncols-1)){
// check right pixel
index = y * ncols + (x+1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval, maskXYval)) ){
isCandidate = true;
}
}
if(y < (nrows-1)){
// check pixel bellow current
index = (y+1) * ncols + x;
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
// check left pixel
if(x > 0){
index = y * ncols + (x-1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
if(y > 0){
// check up pixel
index = (y-1) * ncols + x;
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
return isCandidate;
}
template <typename T>
__device__ bool checkCandidateNeighbor8(T *marker, const T *mask, int x, int y, int ncols, int nrows,T pval){
int index = 0;
bool isCandidate = checkCandidateNeighbor4(marker, mask, x, y, ncols, nrows, pval);
T markerXYval;
T maskXYval;
// check up right corner
if(x < (ncols-1) && y > 0){
// check right pixel
index = (y-1) * ncols + (x+1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval, maskXYval)) ){
isCandidate = true;
}
}
// check up left corner
if(x> 0 && y > 0){
// check pixel bellow current
index = (y-1) * ncols + (x-1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
// check bottom left pixel
if(x > 0 && y < (nrows-1)){
index = (y+1) * ncols + (x-1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
// check bottom right
if(x < (ncols-1) && y < (nrows-1)){
index = (y+1) * ncols + (x+1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
return isCandidate;
}
template <typename T>
__global__ void initQueuePixels(T *marker, const T *mask, int sx, int sy, bool conn8, int *d_queue, int *d_queue_size){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// if it is inside image without right/bottom borders
if(y < (sy) && x < (sx)){
int input_index = y * sy + x;
T pval = marker[input_index];
bool isCandidate = false;
if(conn8){
// connectivity 8
isCandidate = checkCandidateNeighbor8(marker, mask, x, y, sx, sy, pval);
}else{
// connectivity 4
isCandidate = checkCandidateNeighbor4(marker, mask, x, y, sx, sy, pval);
}
if(isCandidate){
int queuePos = atomicAdd((unsigned int*)d_queue_size, 1);
d_queue[queuePos] = input_index;
}
}
}
// connectivity: if 8 conn, need to have border.
template <typename T> int *imreconstructIntCallerBuildQueue(T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, const int connectivity, int &queueSize, int num_iterations, hipStream_t stream) {
// setup execution parameters
bool conn8 = (connectivity == 8);
dim3 threadsx( XX_THREADS, XY_THREADS );
dim3 blocksx( (sy + threadsx.y - 1) / threadsx.y );
dim3 threadsy( MAX_THREADS );
dim3 blocksy( (sx + threadsy.x - 1) / threadsy.x );
bool *d_change;
hipMalloc( (void**) &d_change, sizeof(bool) ) ;
// alloc it with the same size as the input image.
int *d_queue = NULL;
hipMalloc( (void**) &d_queue, sizeof(int) * sx * sy ) ;
int *d_queue_size;
hipMalloc( (void**) &d_queue_size, sizeof(int)) ;
hipMemset( (void*) d_queue_size, 0, sizeof(int)) ;
// stability detection
unsigned int iter = 0;
//long t1, t2, t3;
// t1 = ClockGetTime();
if (conn8) {
while(iter < num_iterations){
iter++;
// dopredny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DForward_X_dilation) , dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DForward_Y_dilation_8), dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DBackward_X_dilation), dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DBackward_Y_dilation_8), dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
if (stream == 0) hipDeviceSynchronize();
else hipStreamSynchronize(stream);
}
} else {
while(iter < num_iterations){
iter++;
// dopredny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DForward_X_dilation) , dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DForward_Y_dilation) , dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
hipLaunchKernelGGL(( iRec1DBackward_X_dilation), dim3(blocksx), dim3(threadsx), 0, stream , marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
hipLaunchKernelGGL(( iRec1DBackward_Y_dilation), dim3(blocksy), dim3(threadsy), 0, stream , marker, mask, sx, sy, d_change );
if (stream == 0) hipDeviceSynchronize();
else hipStreamSynchronize(stream);
}
}
// t2 = ClockGetTime();
// printf("first pass 4conn == scan, %lu ms\n", t2-t1);
hipFree(d_change) ;
// This is now a per pixel operation where we build the
// first queue of pixels that may propagate their values.
// Creating a single thread per-pixel in the input image
dim3 threads(16, 16);
dim3 grid((sx + threads.x - 1) / threads.x, (sy + threads.y - 1) / threads.y);
//
hipLaunchKernelGGL(( initQueuePixels), dim3(grid), dim3(threads), 0, stream , marker, mask, sx, sy, conn8, d_queue, d_queue_size);
if (stream == 0) hipDeviceSynchronize();
else hipStreamSynchronize(stream);
int h_compact_queue_size;
hipMemcpy( &h_compact_queue_size, d_queue_size, sizeof(int), hipMemcpyDeviceToHost ) ;
//t3 = ClockGetTime();
// printf(" compactQueueSize %d, time to generate %lu ms\n", h_compact_queue_size, t3-t2);
int *d_queue_fit = NULL;
// alloc current size +1000 (magic number)
hipMalloc( (void**) &d_queue_fit, sizeof(int) * (h_compact_queue_size+1000)*2 ) ;
// Copy content of the d_queue (which has the size of the image x*y) to a more compact for (d_queue_fit).
// This should save a lot of memory, since the compact queue is usually much smaller than the image size
hipMemcpy( d_queue_fit, d_queue, sizeof(int) * h_compact_queue_size, hipMemcpyDeviceToDevice ) ;
// This is the int containing the size of the queue
hipFree(d_queue_size) ;
// Cleanup the temporary memory use to store the queue
hipFree(d_queue) ;
queueSize = h_compact_queue_size;
return d_queue_fit;
}
template unsigned int imreconstructIntCaller<unsigned char>(unsigned char*, const unsigned char*, const int, const int,
const int, hipStream_t);
template unsigned int imreconstructIntCaller<int>(int*, const int*, const int, const int,
const int, hipStream_t);
//,unsigned char*h_markerFistPass );
template int *imreconstructIntCallerBuildQueue<unsigned char>(unsigned char*, const unsigned char*, const int, const int, const int, int&, int, hipStream_t);
template int *imreconstructIntCallerBuildQueue<int>(int*, const int*, const int, const int, const int, int&, int, hipStream_t);
}}
| bfe2a521082b574d65b06a0c965dddcf35eab47d.cu | // adaptation of Pavel's imreconstruction code for openCV
#include "change_kernel.cuh"
#include <sys/time.h>
#include <stdio.h>
#define MAX_THREADS 256
#define YX_THREADS 64
#define YY_THREADS 4
#define X_THREADS 32
#define Y_THREADS 64
#define XX_THREADS 4
#define XY_THREADS 64
#define NEQ(a,b) ( (a) != (b) )
#define WARP_SIZE 32
long ClockGetTime()
{
struct timeval ts;
gettimeofday(&ts, NULL);
// timespec ts;
// clock_gettime(CLOCK_REALTIME, &ts);
return (ts.tv_sec*1000000 + (ts.tv_usec))/1000LL;
// return (uint64_t)ts.tv_sec * 1000000LL + (uint64_t)ts.tv_nsec / 1000LL;
}
namespace nscale { namespace gpu {
////////////////////////////////////////////////////////////////////////////////
// RECONSTRUCTION BY DILATION
////////////////////////////////////////////////////////////////////////////////
/*
* fast code
*/
//template <typename T>
//__global__ void
//iRec1DForward_X_dilation2 (T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
//{
//
// const int ty = threadIdx.x;
// const int by = blockIdx.x * blockDim.x;
//
// volatile __shared__ T s_marker[Y_THREADS][Y_THREADS+1];
// volatile __shared__ T s_mask [Y_THREADS][Y_THREADS+1];
// bool s_change = false;
//
//
//
// int startx, iy, ix;
//
// T s_old;
// // the increment allows overlap by 1 between iterations to move the data to next block.
// for (startx = 0; startx < sx - Y_THREADS; startx += Y_THREADS - 1) {
// // copy part of marker and mask to shared memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// // this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
// if (by + ty < sy) {
// for (ix = 1; ix < Y_THREADS; ++ix) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix-1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// }
// __syncthreads();
//
// }
//
// startx = sx - Y_THREADS;
//
// // copy part of marker and mask to shared memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration
// if (by + ty < sy) {
// for (ix = 1; ix < Y_THREADS; ++ix) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix-1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (iy = 0; iy < Y_THREADS && by+iy<sy; ++iy) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// if (s_change) *change = true;
// }
// __syncthreads();
//
//
//}
//
//template <typename T>
//__global__ void
//iRec1DBackward_X_dilation2 (T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
//{
//
// const int ty = threadIdx.x;
// const int by = blockIdx.x * Y_THREADS;
// // always 0. const int bz = blockIdx.y;
//
// volatile __shared__ T s_marker[Y_THREADS][Y_THREADS+1];
// volatile __shared__ T s_mask [Y_THREADS][Y_THREADS+1];
// bool s_change = false;
//
//
//
// int startx;
//
// T s_old;
// for (startx = sx - Y_THREADS; startx > 0; startx -= Y_THREADS - 1) {
//
// // copy part of marker and mask to shared memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration
// if (by + ty < sy) {
// for (int ix = Y_THREADS - 2; ix >= 0; ix--) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix+1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// }
// __syncthreads();
//
// }
//
// startx = 0;
//
// // copy part of marker and mask to shared memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// s_marker[ty][iy] = marker[(by + iy)*sx + startx + ty];
// s_mask [ty][iy] = mask [(by + iy)*sx + startx + ty];
// }
// __syncthreads();
//
// // perform iteration
// if (by + ty < sy) {
// for (int ix = Y_THREADS - 2; ix >= 0; ix--) {
// s_old = s_marker[ix][ty];
// s_marker[ix][ty] = max( s_marker[ix][ty], s_marker[ix+1][ty] );
// s_marker[ix][ty] = min( s_marker[ix][ty], s_mask [ix] [ty] );
// s_change |= NEQ( s_old, s_marker[ix][ty] );
// }
//} __syncthreads();
//
// // output result back to global memory
// for (int iy = 0; iy < Y_THREADS && by+iy<sy; iy++) {
// // now treat ty as x, and iy as y, so global mem acccess is closer.
// marker[(by + iy)*sx + startx + ty] = s_marker[ty][iy];
// if (s_change) *change = true;
// }
// __syncthreads();
//
//
//
//}
////////////////////////////////////////////////////////////////////////////////
// RECONSTRUCTION BY DILATION
////////////////////////////////////////////////////////////////////////////////
/*
* warp = 32. shared memory in banks of 32, each 32 bits (128 bytes wide) - interleave of 4 for rows? no need. compute 2 has no conflict for read/write bytes.
* global memory in partitions of 256 bytes. 1 warp at a time at 1, 2, 4, 8, or 16 bytes. width of array and threadblock = warpsize * c,
* try to remove syncthreads by making sure warps do not diverge(and use volatile)
* thread id = x + y * Dx. so this means if x and y are swapped between mem and compute steps, must have sync...
* IF 32 x 8 theads, repeat 4 times in y. read single char from global, then swap x and y to process 32 y at a time, would need to syncthread inside iterations. can use 1 warp to go through all shared mem iteratively, or have each warp compute 4 bytes 4 columns (warps are ordered)
* IF 8x4 or 4x8 threads for a warp, read 1 bytes from global (linearize the warp thread id (e.g. x + y*8 or x+y*4) to read from global sequentially, and repeat 4 or 8 times) then process the memory for this warp 4 y or 8 y iteratively, repeat for all x chunks. essentially the original algorithm. then create threadblock that is just multiplied in y to reach 192 or 256. avoids syncthreads completely.
* or alternatively, treat each warp as 4x8, and each x process columns 8 apart. each warp then do 4 bytes, (8 warps), to generate 8x8 blocks that are completed. - no synthreads needed. - no... would require more kernel iterations
for backward: thread ids should map to the data - so first thread has the last data.... ( for correctness)
for y, similar to this...
for register usage: use unsigned int where possible. maybe use 1D shared array would be better too...
*/
template <typename T>
__global__ void
iRec1DForward_X_dilation ( T* marker, const T* mask, const unsigned int sx, const unsigned int sy, bool* change )
{
const unsigned int x = (threadIdx.x + threadIdx.y * XX_THREADS) % WARP_SIZE;
const unsigned int y = (threadIdx.x + threadIdx.y * XX_THREADS) / WARP_SIZE;
const unsigned int ychunk = WARP_SIZE / XX_THREADS;
const unsigned int xstop = sx - WARP_SIZE;
// printf("(tx, ty) -> (x, y) : (%d, %d)->(%d,%d)\n", threadIdx.x, threadIdx.y, x, y);
// XY_THREADS should be 32==warpSize, XX_THREADS should be 4 or 8.
// init to 0...
volatile __shared__ T s_marker[XY_THREADS][WARP_SIZE+1];
volatile __shared__ T s_mask [XY_THREADS][WARP_SIZE+1];
volatile unsigned int s_change = 0;
T s_old, s_new;
unsigned int startx;
unsigned int start;
s_marker[threadIdx.y][WARP_SIZE] = 0; // only need x=0 to be 0
// the increment allows overlap by 1 between iterations to move the data to next block.
for (startx = 0; startx < xstop; startx += WARP_SIZE) {
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
s_marker[threadIdx.y][0] = s_marker[threadIdx.y][WARP_SIZE];
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x+1] = marker[start + i*sx];
s_mask [y * ychunk+i][x+1] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
// if (threadIdx.x == 0) { // have all threads do the same work
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (unsigned int i = 1; i <= WARP_SIZE; ++i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i-1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x+1];
}
// printf("startx: %d, change = %d\n", startx, s_change);
}
if (startx < sx) {
s_marker[threadIdx.y][0] = s_marker[threadIdx.y][sx-startx]; // getting ix-1st entry, which has been offsetted by 1 in s_marker
// shared mem copy
startx = sx - WARP_SIZE;
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x+1] = marker[start + i*sx];
s_mask [y * ychunk+i][x+1] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (unsigned int i = 1; i <= WARP_SIZE; ++i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i-1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x+1];
}
}
// __syncthreads();
if (s_change > 0) *change = true;
// __syncthreads();
}
template <typename T>
__global__ void
iRec1DBackward_X_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
{
const unsigned int x = (threadIdx.x + threadIdx.y * XX_THREADS) % WARP_SIZE;
const unsigned int y = (threadIdx.x + threadIdx.y * XX_THREADS) / WARP_SIZE;
const unsigned int ychunk = WARP_SIZE / XX_THREADS;
const unsigned int xstop = sx - WARP_SIZE;
// printf("(tx, ty) -> (x, y) : (%d, %d)->(%d,%d)\n", threadIdx.x, threadIdx.y, x, y);
// XY_THREADS should be 32==warpSize, XX_THREADS should be 4 or 8.
// init to 0...
volatile __shared__ T s_marker[XY_THREADS][WARP_SIZE+1];
volatile __shared__ T s_mask [XY_THREADS][WARP_SIZE+1];
volatile unsigned int s_change = 0;
T s_old, s_new;
int startx;
unsigned int start;
s_marker[threadIdx.y][0] = 0; // only need x=WARPSIZE to be 0
// the increment allows overlap by 1 between iterations to move the data to next block.
for (startx = xstop; startx > 0; startx -= WARP_SIZE) {
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
s_marker[threadIdx.y][WARP_SIZE] = s_marker[threadIdx.y][0];
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x] = marker[start + i*sx];
s_mask [y * ychunk+i][x] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
// if (threadIdx.x == 0) { // have all threads do the same work
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (int i = WARP_SIZE - 1; i >= 0; --i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i+1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x];
}
// printf("startx: %d, change = %d\n", startx, s_change);
}
if (startx <= 0) {
s_marker[threadIdx.y][WARP_SIZE] = s_marker[threadIdx.y][-startx]; // getting ix-1st entry, which has been offsetted by 1 in s_marker
// shared mem copy
startx = 0;
start = (blockIdx.x * XY_THREADS + y * ychunk) * sx + startx + x;
// printf("tx: %d, ty: %d, x: %d, y: %d, startx: %d, start: %d", threadIdx.x, threadIdx.y, x, y, startx, start);
// copy part of marker and mask to shared memory. works for 1 warp at a time...
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
s_marker[y * ychunk+i][x] = marker[start + i*sx];
s_mask [y * ychunk+i][x] = mask[start + i*sx];
}
// perform iteration all X threads do the same operations, so there may be read/write hazards. but the output is the same.
// this is looping for BLOCK_SIZE times, and each iteration the final results are propagated 1 step closer to tx.
//#pragma unroll
if (threadIdx.y + blockIdx.x * XY_THREADS < sy) { //require dimension to be perfectly padded.
for (int i = WARP_SIZE - 1; i >= 0; --i) {
s_old = s_marker[threadIdx.y][i];
s_new = min( max( s_marker[threadIdx.y][i+1], s_old ), s_mask[threadIdx.y][i] );
s_change |= s_new ^ s_old;
s_marker[threadIdx.y][i] = s_new;
}
}
// output result back to global memory and set up for next x chunk
//#pragma unroll
for (unsigned int i = 0; i < ychunk && y*ychunk+i < sy; ++i) {
marker[start + i*sx] = s_marker[y * ychunk+i][x];
}
}
// __syncthreads();
if (s_change > 0) *change = true;
// __syncthreads();
}
template <typename T>
__global__ void
iRec1DForward_Y_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
{
// parallelize along x.
const int tx = threadIdx.x;
const int bx = blockIdx.x * MAX_THREADS;
unsigned int s_change = 0;
T s_old, s_new, s_prev;
if ( (bx + tx) < sx ) {
s_prev = 0;
for (int iy = 0; iy < sy; ++iy) {
// copy part of marker and mask to shared memory
s_old = marker[iy * sx + bx + tx];
// perform iteration
s_new = min( max( s_prev, s_old ), mask[iy * sx + bx + tx] );
s_change |= s_old ^ s_new;
s_prev = s_new;
// output result back to global memory
marker[iy * sx + bx + tx] = s_new;
}
}
if (s_change != 0) *change = true;
}
template <typename T>
__global__ void
iRec1DBackward_Y_dilation ( T* __restrict__ marker, const T* __restrict__ mask, const unsigned int sx, const unsigned int sy, bool* __restrict__ change )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x * MAX_THREADS;
unsigned int s_change=0;
T s_old, s_new, s_prev;
if ( (bx + tx) < sx ) {
s_prev = 0;
for (int iy = sy - 1; iy >= 0; --iy) {
// copy part of marker and mask to shared memory
s_old = marker[iy * sx + bx + tx];
// perform iteration
s_new = min( max( s_prev, s_old ), mask[iy * sx + bx + tx] );
s_change |= s_old ^ s_new;
s_prev = s_new;
// output result back to global memory
marker[iy * sx + bx + tx] = s_new;
}
}
if (s_change != 0) *change = true;
}
template <typename T>
__global__ void
iRec1DForward_Y_dilation_8 ( T* __restrict__ marker, const T* __restrict__ mask, const unsigned int sx, const unsigned int sy, bool* __restrict__ change )
{
// best thing to do is to use linear arrays. each warp does a column of 32.
// parallelize along x.
const unsigned int tx = threadIdx.x;
const unsigned int bx = blockIdx.x * MAX_THREADS;
volatile __shared__ T s_marker_B[MAX_THREADS+2];
// volatile T* s_marker = s_marker_B + 1;
unsigned int s_change = 0;
int tx1 = tx + 1;
T s_new, s_old, s_prev;
if ( bx + tx < sx ) { // make sure number of threads is a divisor of sx.
s_prev = 0;
for (int iy = 0; iy < sy; ++iy) {
// copy part of marker and mask to shared memory
if (tx == 0) {
s_marker_B[0] = (bx == 0) ? 0 : marker[iy*sx + bx - 1];
s_marker_B[MAX_THREADS + 1] = (bx + MAX_THREADS >= sx) ? 0 : marker[iy*sx + bx + MAX_THREADS];
}
if (tx < WARP_SIZE) {
// first warp, get extra stuff
s_marker_B[tx1] = marker[iy*sx + bx + tx];
}
if (tx < MAX_THREADS - WARP_SIZE) {
s_marker_B[tx1 + WARP_SIZE] = marker[iy*sx + bx + tx + WARP_SIZE];
}
__syncthreads();
// perform iteration
s_old = s_marker_B[tx1];
s_new = min( max( s_prev, s_old ), mask[iy*sx + bx + tx]);
s_change |= s_old ^ s_new;
// output result back to global memory
s_marker_B[tx1] = s_new;
marker[iy*sx + bx + tx] = s_new;
__syncthreads();
s_prev = max( max(s_marker_B[tx1-1], s_marker_B[tx1]), s_marker_B[tx1+1]);
}
}
if (s_change != 0) *change = true;
}
template <typename T>
__global__ void
iRec1DBackward_Y_dilation_8 ( T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, bool* __restrict__ change )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x * MAX_THREADS;
volatile __shared__ T s_marker_B[MAX_THREADS+2];
// volatile T* s_marker = s_marker_B + 1;
unsigned int s_change = 0;
int tx1 = tx + 1; // for accessing s_marker_B
T s_new, s_old, s_prev;
if ( bx + tx < sx ) { //make sure number of threads is a divisor of sx.
s_prev = 0;
for (int iy = sy - 1; iy >= 0; --iy) {
if (tx == 0) {
s_marker_B[0] = (bx == 0) ? 0 : marker[iy*sx + bx - 1];
s_marker_B[MAX_THREADS+1] = (bx + MAX_THREADS >= sx) ? 0 : marker[iy*sx + bx + MAX_THREADS];
}
if (tx < WARP_SIZE) {
// first warp, get extra stuff
s_marker_B[tx1] = marker[iy*sx + bx + tx];
}
if (tx < MAX_THREADS - WARP_SIZE) {
s_marker_B[tx1 + WARP_SIZE] = marker[iy*sx + bx + tx + WARP_SIZE];
}
__syncthreads();
// perform iteration
s_old = s_marker_B[tx1];
s_new = min( max( s_prev, s_old ), mask[iy*sx + bx + tx]);
s_change |= s_old ^ s_new;
// output result back to global memory
s_marker_B[tx1] = s_new;
marker[iy*sx + bx + tx] = s_new;
__syncthreads();
s_prev = max( max(s_marker_B[tx1-1], s_marker_B[tx1]), s_marker_B[tx1+1]);
}
}
if (s_change != 0) *change = true;
}
// connectivity: if 8 conn, need to have border.
template <typename T>
unsigned int imreconstructIntCaller(T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy,
const int connectivity, cudaStream_t stream) {
//, unsigned char*h_markerFistPass) {
// here because we are not using streams inside.
// if (stream == 0) cudaDeviceSynchronize();
// else cudaStreamSynchronize(stream);
// printf("entering imrecon int caller with conn=%d\n", connectivity);
// setup execution parameters
bool conn8 = (connectivity == 8);
dim3 threadsx( XX_THREADS, XY_THREADS );
dim3 blocksx( (sy + threadsx.y - 1) / threadsx.y );
// dim3 threadsx2( Y_THREADS );
// dim3 blocksx2( (sy + threadsx2.y - 1) / threadsx2.y );
dim3 threadsy( MAX_THREADS );
dim3 blocksy( (sx + threadsy.x - 1) / threadsy.x );
// dim3 threadsy2( YX_THREADS, YY_THREADS );
// dim3 blocksy2( (sx + threadsy2.x - 1) / threadsy2.x, (sy + threadsy2.y - 1) / threadsy2.y );
// size_t Nsy = (threadsy.x * 3 + 2) * sizeof(uchar4);
// stability detection
unsigned int iter = 0;
bool *h_change, *d_change;
h_change = (bool*) malloc( sizeof(bool) );
cudaMalloc( (void**) &d_change, sizeof(bool) ) ;
*h_change = true;
// printf("completed setup for imrecon int caller \n");
//long t1, t2;
if (conn8) {
while ( (*h_change) && (iter < 100000) ) // repeat until stability
{
// t1 = ClockGetTime();
iter++;
*h_change = false;
init_change<<< 1, 1, 0, stream>>>( d_change );
// dopredny pruchod pres osu X
iRec1DForward_X_dilation <<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// iRec1DForward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
iRec1DForward_Y_dilation_8<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
iRec1DBackward_X_dilation<<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// iRec1DBackward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
iRec1DBackward_Y_dilation_8<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
if (stream == 0) cudaDeviceSynchronize();
else cudaStreamSynchronize(stream);
// printf("%d sync \n", iter);
cudaMemcpy( h_change, d_change, sizeof(bool), cudaMemcpyDeviceToHost ) ;
// printf("%d read flag : value %s\n", iter, (*h_change ? "true" : "false"));
// t2 = ClockGetTime();
//
// if (iter == 1) {
//
// cudaMemcpy( h_markerFistPass, marker, sizeof(unsigned char) * sx * sy, cudaMemcpyDeviceToHost ) ;
// printf("first pass 8conn == scan, %lu ms\n", t2-t1);
// }
}
} else {
while ( (*h_change) && (iter < 100000) ) // repeat until stability
{
// t1 = ClockGetTime();
iter++;
*h_change = false;
init_change<<< 1, 1, 0, stream>>>( d_change );
// dopredny pruchod pres osu X
iRec1DForward_X_dilation <<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// iRec1DForward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
iRec1DForward_Y_dilation <<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
iRec1DBackward_X_dilation<<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// iRec1DBackward_X_dilation2<<< blocksx2, threadsx2, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
iRec1DBackward_Y_dilation<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
if (stream == 0) cudaDeviceSynchronize();
else cudaStreamSynchronize(stream);
// printf("%d sync \n", iter);
cudaMemcpy( h_change, d_change, sizeof(bool), cudaMemcpyDeviceToHost ) ;
// printf("%d read flag : value %s\n", iter, (*h_change ? "true" : "false"));
// t2 = ClockGetTime();
// if (iter == 1) {
// // cudaMemcpy( h_markerFistPass, marker, sizeof(unsigned char) * sx * sy, cudaMemcpyDeviceToDevice ) ;
// printf("first pass 4conn == scan, %lu ms\n", t2-t1);
//// break;
// }
}
}
cudaFree(d_change) ;
free(h_change);
// printf("Number of iterations: %d\n", iter);
cudaGetLastError();
return iter;
}
//
//__device__ bool checkCandidateNeighbor4(unsigned char *marker, const unsigned char *mask, int x, int y, int ncols, int nrows,unsigned char pval){
// bool isCandidate = false;
// int index = 0;
//
// unsigned char markerXYval;
// unsigned char maskXYval;
// if(x < (ncols-1)){
// // check right pixel
// index = y * ncols + (x+1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval, maskXYval)) ){
// isCandidate = true;
// }
// }
//
// if(y < (nrows-1)){
// // check pixel bellow current
// index = (y+1) * ncols + x;
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check left pixel
// if(x > 0){
// index = y * ncols + (x-1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// if(y > 0){
// // check up pixel
// index = (y-1) * ncols + x;
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
// return isCandidate;
//}
//
//__device__ bool checkCandidateNeighbor8(unsigned char *marker, const unsigned char *mask, int x, int y, int ncols, int nrows,unsigned char pval){
// int index = 0;
// bool isCandidate = checkCandidateNeighbor4(marker, mask, x, y, ncols, nrows, pval);
//// if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0){
//// printf("checkCandidateNeighbor8\n");
//// }
//
// unsigned char markerXYval;
// unsigned char maskXYval;
//
// // check up right corner
// if(x < (ncols-1) && y > 0){
// // check right pixel
// index = (y-1) * ncols + (x+1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval, maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check up left corner
// if(x> 0 && y > 0){
// // check pixel bellow current
// index = (y-1) * ncols + (x-1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check bottom left pixel
// if(x > 0 && y < (nrows-1)){
// index = (y+1) * ncols + (x-1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
//
// // check bottom right
// if(x < (ncols-1) && y < (nrows-1)){
// index = (y+1) * ncols + (x+1);
//
// markerXYval = marker[index];
// maskXYval = mask[index];
// if( (markerXYval < min(pval,maskXYval)) ){
// isCandidate = true;
// }
// }
// return isCandidate;
//}
//
//
//__global__ void initQueuePixels(unsigned char *marker, const unsigned char *mask, int sx, int sy, bool conn8, int *d_queue, int *d_queue_size){
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
//
// // if it is inside image without right/bottom borders
// if(y < (sy) && x < (sx)){
// int input_index = y * sy + x;
// unsigned char pval = marker[input_index];
// bool isCandidate = false;
// if(conn8){
// // connectivity 8
// isCandidate = checkCandidateNeighbor8(marker, mask, x, y, sx, sy, pval);
// }else{
// // connectivity 4
// isCandidate = checkCandidateNeighbor4(marker, mask, x, y, sx, sy, pval);
// }
// if(isCandidate){
// int queuePos = atomicAdd((unsigned int*)d_queue_size, 1);
// d_queue[queuePos] = input_index;
// }
// }
//}
//
template <typename T>
__device__ bool checkCandidateNeighbor4(T *marker, const T *mask, int x, int y, int ncols, int nrows, T pval){
bool isCandidate = false;
int index = 0;
T markerXYval;
T maskXYval;
if(x < (ncols-1)){
// check right pixel
index = y * ncols + (x+1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval, maskXYval)) ){
isCandidate = true;
}
}
if(y < (nrows-1)){
// check pixel bellow current
index = (y+1) * ncols + x;
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
// check left pixel
if(x > 0){
index = y * ncols + (x-1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
if(y > 0){
// check up pixel
index = (y-1) * ncols + x;
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
return isCandidate;
}
template <typename T>
__device__ bool checkCandidateNeighbor8(T *marker, const T *mask, int x, int y, int ncols, int nrows,T pval){
int index = 0;
bool isCandidate = checkCandidateNeighbor4(marker, mask, x, y, ncols, nrows, pval);
T markerXYval;
T maskXYval;
// check up right corner
if(x < (ncols-1) && y > 0){
// check right pixel
index = (y-1) * ncols + (x+1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval, maskXYval)) ){
isCandidate = true;
}
}
// check up left corner
if(x> 0 && y > 0){
// check pixel bellow current
index = (y-1) * ncols + (x-1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
// check bottom left pixel
if(x > 0 && y < (nrows-1)){
index = (y+1) * ncols + (x-1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
// check bottom right
if(x < (ncols-1) && y < (nrows-1)){
index = (y+1) * ncols + (x+1);
markerXYval = marker[index];
maskXYval = mask[index];
if( (markerXYval < min(pval,maskXYval)) ){
isCandidate = true;
}
}
return isCandidate;
}
template <typename T>
__global__ void initQueuePixels(T *marker, const T *mask, int sx, int sy, bool conn8, int *d_queue, int *d_queue_size){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// if it is inside image without right/bottom borders
if(y < (sy) && x < (sx)){
int input_index = y * sy + x;
T pval = marker[input_index];
bool isCandidate = false;
if(conn8){
// connectivity 8
isCandidate = checkCandidateNeighbor8(marker, mask, x, y, sx, sy, pval);
}else{
// connectivity 4
isCandidate = checkCandidateNeighbor4(marker, mask, x, y, sx, sy, pval);
}
if(isCandidate){
int queuePos = atomicAdd((unsigned int*)d_queue_size, 1);
d_queue[queuePos] = input_index;
}
}
}
// connectivity: if 8 conn, need to have border.
template <typename T> int *imreconstructIntCallerBuildQueue(T* __restrict__ marker, const T* __restrict__ mask, const int sx, const int sy, const int connectivity, int &queueSize, int num_iterations, cudaStream_t stream) {
// setup execution parameters
bool conn8 = (connectivity == 8);
dim3 threadsx( XX_THREADS, XY_THREADS );
dim3 blocksx( (sy + threadsx.y - 1) / threadsx.y );
dim3 threadsy( MAX_THREADS );
dim3 blocksy( (sx + threadsy.x - 1) / threadsy.x );
bool *d_change;
cudaMalloc( (void**) &d_change, sizeof(bool) ) ;
// alloc it with the same size as the input image.
int *d_queue = NULL;
cudaMalloc( (void**) &d_queue, sizeof(int) * sx * sy ) ;
int *d_queue_size;
cudaMalloc( (void**) &d_queue_size, sizeof(int)) ;
cudaMemset( (void*) d_queue_size, 0, sizeof(int)) ;
// stability detection
unsigned int iter = 0;
//long t1, t2, t3;
// t1 = ClockGetTime();
if (conn8) {
while(iter < num_iterations){
iter++;
// dopredny pruchod pres osu X
iRec1DForward_X_dilation <<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
iRec1DForward_Y_dilation_8<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
iRec1DBackward_X_dilation<<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
iRec1DBackward_Y_dilation_8<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
if (stream == 0) cudaDeviceSynchronize();
else cudaStreamSynchronize(stream);
}
} else {
while(iter < num_iterations){
iter++;
// dopredny pruchod pres osu X
iRec1DForward_X_dilation <<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// dopredny pruchod pres osu Y
iRec1DForward_Y_dilation <<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu X
iRec1DBackward_X_dilation<<< blocksx, threadsx, 0, stream >>> ( marker, mask, sx, sy, d_change );
// zpetny pruchod pres osu Y
iRec1DBackward_Y_dilation<<< blocksy, threadsy, 0, stream >>> ( marker, mask, sx, sy, d_change );
if (stream == 0) cudaDeviceSynchronize();
else cudaStreamSynchronize(stream);
}
}
// t2 = ClockGetTime();
// printf("first pass 4conn == scan, %lu ms\n", t2-t1);
cudaFree(d_change) ;
// This is now a per pixel operation where we build the
// first queue of pixels that may propagate their values.
// Creating a single thread per-pixel in the input image
dim3 threads(16, 16);
dim3 grid((sx + threads.x - 1) / threads.x, (sy + threads.y - 1) / threads.y);
//
initQueuePixels<<< grid, threads, 0, stream >>>(marker, mask, sx, sy, conn8, d_queue, d_queue_size);
if (stream == 0) cudaDeviceSynchronize();
else cudaStreamSynchronize(stream);
int h_compact_queue_size;
cudaMemcpy( &h_compact_queue_size, d_queue_size, sizeof(int), cudaMemcpyDeviceToHost ) ;
//t3 = ClockGetTime();
// printf(" compactQueueSize %d, time to generate %lu ms\n", h_compact_queue_size, t3-t2);
int *d_queue_fit = NULL;
// alloc current size +1000 (magic number)
cudaMalloc( (void**) &d_queue_fit, sizeof(int) * (h_compact_queue_size+1000)*2 ) ;
// Copy content of the d_queue (which has the size of the image x*y) to a more compact for (d_queue_fit).
// This should save a lot of memory, since the compact queue is usually much smaller than the image size
cudaMemcpy( d_queue_fit, d_queue, sizeof(int) * h_compact_queue_size, cudaMemcpyDeviceToDevice ) ;
// This is the int containing the size of the queue
cudaFree(d_queue_size) ;
// Cleanup the temporary memory use to store the queue
cudaFree(d_queue) ;
queueSize = h_compact_queue_size;
return d_queue_fit;
}
template unsigned int imreconstructIntCaller<unsigned char>(unsigned char*, const unsigned char*, const int, const int,
const int, cudaStream_t);
template unsigned int imreconstructIntCaller<int>(int*, const int*, const int, const int,
const int, cudaStream_t);
//,unsigned char*h_markerFistPass );
template int *imreconstructIntCallerBuildQueue<unsigned char>(unsigned char*, const unsigned char*, const int, const int, const int, int&, int, cudaStream_t);
template int *imreconstructIntCallerBuildQueue<int>(int*, const int*, const int, const int, const int, int&, int, cudaStream_t);
}}
|
34a2680c5f928fdb8fa9cc39a2c3d7ec9b2bcfd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.h"
#include "data.h"
#include "spmm.h"
#include "sample.h"
#include "aggr_gcn.h"
#include "dense.h"
enum PTRS
{
x,
y,
y2,
val,
weight,
transformed,
transformed2,
tmp
};
int main(int argc, char ** argv)
{
const int times = 10;
argParse(argc, argv);
const int out_feature_len = outfea;
assert(out_feature_len > 0);
assert(GPUNUM == 1);
hiprandGenerator_t hiprand;
hiprandCreateGenerator(&hiprand, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(hiprand, 123ULL);
hipblasCreate(&cublasHs[0]);
int* tmp1 = NULL;
int* tmp2 = NULL;
load_graph(inputgraph, n, m, tmp1, tmp2);
gptrs = new int*[1];
gidxs = new int*[1];
checkCudaErrors(cudaMalloc2((void**)gptrs, (n + 1) * sizeof(int)));
checkCudaErrors(cudaMalloc2((void**)gidxs, m * sizeof(int)));
checkCudaErrors(hipMemcpy(gptrs[0], tmp1, sizeof(int) * (n + 1), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(gidxs[0], tmp2, sizeof(int) * m, hipMemcpyHostToDevice));
vector<float*> ptr;
vector<double> p_time(100, 10);
vector<int> sizes =
{
n * feature_len,
n * out_feature_len,
n * out_feature_len,
m,
feature_len * out_feature_len,
n * out_feature_len,
n * out_feature_len,
n * out_feature_len,
};
for(auto item : sizes)
{
float* tmp = NULL;
checkCudaErrors(cudaMalloc2((void**)&tmp, sizeof(float) * item));
hiprandGenerateNormal(hiprand, tmp, item, 0.f, 1.00);
ptr.push_back(tmp);
}
int NEIGHBOR_NUM = 16;
if(NEINUM != -1) NEIGHBOR_NUM = NEINUM;
int BLOCK_SIZE = 128;
dbg(NEIGHBOR_NUM);
dbg(BLOCK_SIZE);
auto g = fullGraph(gptrs[0], gidxs[0]);
Aggregator_GCN * atgcn = new Aggregator_GCN(g, feature_len, feature_len, ptr[val]);
int tmparr[] = {NEIGHBOR_NUM};
atgcn->schedule(neighbor_grouping, tmparr);
// warm up
for(int i = 0; i < times; ++i)
{
atgcn->run(ptr[x], ptr[y], BLOCK_SIZE, 0);
checkCudaErrors(hipDeviceSynchronize());
}
// base implementation
checkCudaErrors(hipDeviceSynchronize());
timestamp(t_base0);
for(int i = 0; i < times; ++i)
{
atgcn->run(ptr[x], ptr[y2], BLOCK_SIZE, 1);
matmul_NN(ptr[y2], ptr[weight], ptr[transformed2], n, out_feature_len, feature_len, ptr[tmp]);
checkCudaErrors(hipDeviceSynchronize());
}
timestamp(t_base1);
dbg(getDuration(t_base0, t_base1) / times);
timestamp(t_linear0);
for(int i = 0; i < times; ++i)
{
atgcn->run_with_nn(ptr[x], ptr[y], ptr[weight], ptr[transformed], BLOCK_SIZE);
checkCudaErrors(hipDeviceSynchronize());
}
timestamp(t_linear1);
dbg(getDuration(t_linear0, t_linear1) / times);
}
| 34a2680c5f928fdb8fa9cc39a2c3d7ec9b2bcfd3.cu | #include "util.h"
#include "data.h"
#include "spmm.h"
#include "sample.h"
#include "aggr_gcn.h"
#include "dense.h"
enum PTRS
{
x,
y,
y2,
val,
weight,
transformed,
transformed2,
tmp
};
int main(int argc, char ** argv)
{
const int times = 10;
argParse(argc, argv);
const int out_feature_len = outfea;
assert(out_feature_len > 0);
assert(GPUNUM == 1);
curandGenerator_t curand;
curandCreateGenerator(&curand, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curand, 123ULL);
cublasCreate(&cublasHs[0]);
int* tmp1 = NULL;
int* tmp2 = NULL;
load_graph(inputgraph, n, m, tmp1, tmp2);
gptrs = new int*[1];
gidxs = new int*[1];
checkCudaErrors(cudaMalloc2((void**)gptrs, (n + 1) * sizeof(int)));
checkCudaErrors(cudaMalloc2((void**)gidxs, m * sizeof(int)));
checkCudaErrors(cudaMemcpy(gptrs[0], tmp1, sizeof(int) * (n + 1), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(gidxs[0], tmp2, sizeof(int) * m, cudaMemcpyHostToDevice));
vector<float*> ptr;
vector<double> p_time(100, 10);
vector<int> sizes =
{
n * feature_len,
n * out_feature_len,
n * out_feature_len,
m,
feature_len * out_feature_len,
n * out_feature_len,
n * out_feature_len,
n * out_feature_len,
};
for(auto item : sizes)
{
float* tmp = NULL;
checkCudaErrors(cudaMalloc2((void**)&tmp, sizeof(float) * item));
curandGenerateNormal(curand, tmp, item, 0.f, 1.00);
ptr.push_back(tmp);
}
int NEIGHBOR_NUM = 16;
if(NEINUM != -1) NEIGHBOR_NUM = NEINUM;
int BLOCK_SIZE = 128;
dbg(NEIGHBOR_NUM);
dbg(BLOCK_SIZE);
auto g = fullGraph(gptrs[0], gidxs[0]);
Aggregator_GCN * atgcn = new Aggregator_GCN(g, feature_len, feature_len, ptr[val]);
int tmparr[] = {NEIGHBOR_NUM};
atgcn->schedule(neighbor_grouping, tmparr);
// warm up
for(int i = 0; i < times; ++i)
{
atgcn->run(ptr[x], ptr[y], BLOCK_SIZE, 0);
checkCudaErrors(cudaDeviceSynchronize());
}
// base implementation
checkCudaErrors(cudaDeviceSynchronize());
timestamp(t_base0);
for(int i = 0; i < times; ++i)
{
atgcn->run(ptr[x], ptr[y2], BLOCK_SIZE, 1);
matmul_NN(ptr[y2], ptr[weight], ptr[transformed2], n, out_feature_len, feature_len, ptr[tmp]);
checkCudaErrors(cudaDeviceSynchronize());
}
timestamp(t_base1);
dbg(getDuration(t_base0, t_base1) / times);
timestamp(t_linear0);
for(int i = 0; i < times; ++i)
{
atgcn->run_with_nn(ptr[x], ptr[y], ptr[weight], ptr[transformed], BLOCK_SIZE);
checkCudaErrors(cudaDeviceSynchronize());
}
timestamp(t_linear1);
dbg(getDuration(t_linear0, t_linear1) / times);
}
|
577408a17a621c72c7493ccaea347cde705424fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//resolvi usar o mesmo tamanho de "bloco" que foi usado no exemplo do slide (slide 68, "CudaBasics")
#define TILE_WIDTH 16
__global__ void addMatrix(int *A, int *B,int *C, int *rows, int *cols){
//variaveis necessarias para computar o indice corretamente
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
/*
* como alguma das dimensoes da matriz pode nao ser divisivel pelo o tamanho do bloco (TILE_WIDTH =16)
* entao e existem indices em alguns blocos que estao mapeados em "nada" (no mapeam algum elemento na matriz).
* Portanto necessario esse condicional para evitar acessos a posies de memorias indesejaveis.
* */
if(col < (*cols) && row < (*rows)){
/*
* A expresso
*
* (row * (*cols)) + col
*
* computa o indice da corretamente
* */
C[ (row * (*cols)) + col] = A[ (row * (*cols)) + col] + B[ (row * (*cols)) + col] ;
}
}
int main()
{
//copias das variaveis da CPU
int *A, *B, *C;
//copias das variaveis da GPU
int *D_A, *D_B, *D_C;
int *D_ROWS, *D_COLS;
int i, j;
//Input
int linhas, colunas;
scanf("%d", &linhas);
scanf("%d", &colunas);
int size = sizeof(int) * linhas * colunas;
//Alocando memria na CPU
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
//Alocando memria na GPU
hipMalloc( (void **)&D_A, size);
hipMalloc( (void **)&D_B, size);
hipMalloc( (void **)&D_C, size);
hipMalloc( (void **)&D_ROWS, sizeof(int));
hipMalloc( (void **)&D_COLS, sizeof(int));
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
}
}
//enviando os dados para a GPU
hipMemcpy(D_A, A, size, hipMemcpyHostToDevice);
hipMemcpy(D_B, B, size, hipMemcpyHostToDevice);
hipMemcpy(D_ROWS, &linhas, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(D_COLS, &colunas, sizeof(int), hipMemcpyHostToDevice);
//crieando as variaveis necessarias para fazer o mapeamento 2D
dim3 numeroDeBlocosNaMatriz( ceil((float)colunas/TILE_WIDTH),ceil((float)linhas/TILE_WIDTH), 1 );
dim3 numeroDeThreadsPorBloco( TILE_WIDTH,TILE_WIDTH, 1 );
//executanto a soma de matriz na GPU
hipLaunchKernelGGL(( addMatrix), dim3(numeroDeBlocosNaMatriz),dim3(numeroDeThreadsPorBloco), 0, 0, D_A,D_B,D_C, D_ROWS, D_COLS);
//transferindo o resultado da soma, que esta na GPU, para a CPU.
hipMemcpy(C, D_C, size, hipMemcpyDeviceToHost);
long long int somador=0;
//Manter esta computao na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
}
}
printf("%lli\n", somador);
free(A);
free(B);
free(C);
//desalocando a memoria na GPU
hipFree(D_A);
hipFree(D_B);
hipFree(D_C);
hipFree(D_ROWS);
hipFree(D_COLS);
return 0;
}
| 577408a17a621c72c7493ccaea347cde705424fc.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
//resolvi usar o mesmo tamanho de "bloco" que foi usado no exemplo do slide (slide 68, "CudaBasics")
#define TILE_WIDTH 16
__global__ void addMatrix(int *A, int *B,int *C, int *rows, int *cols){
//variaveis necessarias para computar o indice corretamente
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
/*
* como alguma das dimensoes da matriz pode nao ser divisivel pelo o tamanho do bloco (TILE_WIDTH =16)
* entao e existem indices em alguns blocos que estao mapeados em "nada" (não mapeam algum elemento na matriz).
* Portanto é necessario esse condicional para evitar acessos a posições de memorias indesejaveis.
* */
if(col < (*cols) && row < (*rows)){
/*
* A expressão
*
* (row * (*cols)) + col
*
* computa o indice da corretamente
* */
C[ (row * (*cols)) + col] = A[ (row * (*cols)) + col] + B[ (row * (*cols)) + col] ;
}
}
int main()
{
//copias das variaveis da CPU
int *A, *B, *C;
//copias das variaveis da GPU
int *D_A, *D_B, *D_C;
int *D_ROWS, *D_COLS;
int i, j;
//Input
int linhas, colunas;
scanf("%d", &linhas);
scanf("%d", &colunas);
int size = sizeof(int) * linhas * colunas;
//Alocando memória na CPU
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
//Alocando memória na GPU
cudaMalloc( (void **)&D_A, size);
cudaMalloc( (void **)&D_B, size);
cudaMalloc( (void **)&D_C, size);
cudaMalloc( (void **)&D_ROWS, sizeof(int));
cudaMalloc( (void **)&D_COLS, sizeof(int));
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
}
}
//enviando os dados para a GPU
cudaMemcpy(D_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(D_B, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(D_ROWS, &linhas, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_COLS, &colunas, sizeof(int), cudaMemcpyHostToDevice);
//crieando as variaveis necessarias para fazer o mapeamento 2D
dim3 numeroDeBlocosNaMatriz( ceil((float)colunas/TILE_WIDTH),ceil((float)linhas/TILE_WIDTH), 1 );
dim3 numeroDeThreadsPorBloco( TILE_WIDTH,TILE_WIDTH, 1 );
//executanto a soma de matriz na GPU
addMatrix<<<numeroDeBlocosNaMatriz,numeroDeThreadsPorBloco>>>(D_A,D_B,D_C, D_ROWS, D_COLS);
//transferindo o resultado da soma, que esta na GPU, para a CPU.
cudaMemcpy(C, D_C, size, cudaMemcpyDeviceToHost);
long long int somador=0;
//Manter esta computação na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
}
}
printf("%lli\n", somador);
free(A);
free(B);
free(C);
//desalocando a memoria na GPU
cudaFree(D_A);
cudaFree(D_B);
cudaFree(D_C);
cudaFree(D_ROWS);
cudaFree(D_COLS);
return 0;
}
|
cd9838fbcf1000e326e307592963bb5586f3ef29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Builder
// --------------------------------------------------------
TEST_CASE("Builder" * doctest::timeout(300)) {
tf::cudaGraph G;
tf::cudaFlow cf(G);
int source = 1;
int target = 1;
auto copy1 = cf.copy(&target, &source, 1).name("copy1");
auto copy2 = cf.copy(&target, &source, 1).name("copy2");
auto copy3 = cf.copy(&target, &source, 1).name("copy3");
REQUIRE(copy1.name() == "copy1");
REQUIRE(copy2.name() == "copy2");
REQUIRE(copy3.name() == "copy3");
REQUIRE(!copy1.empty());
REQUIRE(!copy2.empty());
REQUIRE(!copy3.empty());
copy1.precede(copy2);
copy2.succeed(copy3);
REQUIRE(copy1.num_successors() == 1);
REQUIRE(copy2.num_successors() == 0);
REQUIRE(copy3.num_successors() == 1);
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
TEST_CASE("Memset") {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(int)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
TEST_CASE("Loop") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
TEST_CASE("Subflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("NestedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("DetachedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
}
| cd9838fbcf1000e326e307592963bb5586f3ef29.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Builder
// --------------------------------------------------------
TEST_CASE("Builder" * doctest::timeout(300)) {
tf::cudaGraph G;
tf::cudaFlow cf(G);
int source = 1;
int target = 1;
auto copy1 = cf.copy(&target, &source, 1).name("copy1");
auto copy2 = cf.copy(&target, &source, 1).name("copy2");
auto copy3 = cf.copy(&target, &source, 1).name("copy3");
REQUIRE(copy1.name() == "copy1");
REQUIRE(copy2.name() == "copy2");
REQUIRE(copy3.name() == "copy3");
REQUIRE(!copy1.empty());
REQUIRE(!copy2.empty());
REQUIRE(!copy3.empty());
copy1.precede(copy2);
copy2.succeed(copy3);
REQUIRE(copy1.num_successors() == 1);
REQUIRE(copy2.num_successors() == 0);
REQUIRE(copy3.num_successors() == 1);
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
TEST_CASE("Memset") {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(int)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
TEST_CASE("Loop") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
TEST_CASE("Subflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("NestedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
TEST_CASE("DetachedSubflow") {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
|
dabd3ec35013662ed30e5c8f9db2123d035df422.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//: nvcc mm.cu -o mm
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
__global__ void mm_kernel(float *d_m, float *d_n, float *d_p, int size)
{
const int row = blockIdx.y;
const int col = blockIdx.x;
float val = 0.0;
for (int i = 0; i < size; ++i)
{
val += d_m[row * size + i] * d_n[i * size + col];
}
d_p[row * size + col] = val;
}
/*
* En declarant une variable comme __shared
* tous les threads dans un bloc peuvent se partager la valeur
* donner la valeur dans
*
* __shared_int sum;
* if (threadIdx.x == 0)
* {
* sum = 0; // ne peut etre initialisee que par le premier, par de maniere globale
* }
* __syncthread();
* atomicAdd(&sum, 5); // la memoire partagee n'est pas changee par plusieurs threads
* __syncthread(); // on attend que les autres threads soient OK pour passer a la suite
*
* atomicAdd est VRAIMENT lent
*/
void multiply(float *m, float *n, float *p, int size)
{
// Pointers on the device:
float *d_m, *d_n, *d_p;
// Matrice allouee directement dans un vecteur unique
// sinon un nombre enorme de cuda malloc
const int nbytes = (size * size) * sizeof(float);
hipMalloc((void**)&d_m, nbytes);
hipMalloc((void**)&d_n, nbytes);
hipMalloc((void**)&d_p, nbytes);
hipMemcpy(d_m, m, nbytes, hipMemcpyHostToDevice);
hipMemcpy(d_n, n, nbytes, hipMemcpyHostToDevice);
// definit une grid
dim3 dimGrid(1, 1);
// definit un block de size by size
dim3 dimBlock(size, size);
// balance sur les GPU
hipLaunchKernelGGL(( mm_kernel), dim3(dimBlock),dim3(dimGrid), 0, 0, d_m, d_n, d_p, size);
hipMemcpy(p, d_p, nbytes, hipMemcpyDeviceToHost);
hipFree(d_m);
hipFree(d_n);
hipFree(d_p);
}
void print_matrix(float *m, FILE *out, int size)
{
// Print matrix:
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
fprintf(out, "%8.4f ", m[i * size + j]);
}
fprintf(out, "\n");
}
}
int main(int argc, char **argv)
{
const int size = (argc == 2)? atof(argv[1]) : 10;
const int nbytes = (size * size) * sizeof(float);
float *m = (float*)malloc(nbytes);
float *n = (float*)malloc(nbytes);
float *p = (float*)malloc(nbytes);
// Fill matrices m & n:
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
m[i * size + j] = cos(j) * sqrt(i);
n[i * size + j] = sin(j) + 0.1 * i;
}
}
multiply(m, n, p, size);
print_matrix(p, stdout, size);
free(m);
free(n);
free(p);
return EXIT_SUCCESS;
}
| dabd3ec35013662ed30e5c8f9db2123d035df422.cu | //: nvcc mm.cu -o mm
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
__global__ void mm_kernel(float *d_m, float *d_n, float *d_p, int size)
{
const int row = blockIdx.y;
const int col = blockIdx.x;
float val = 0.0;
for (int i = 0; i < size; ++i)
{
val += d_m[row * size + i] * d_n[i * size + col];
}
d_p[row * size + col] = val;
}
/*
* En declarant une variable comme __shared
* tous les threads dans un bloc peuvent se partager la valeur
* donner la valeur dans
*
* __shared_int sum;
* if (threadIdx.x == 0)
* {
* sum = 0; // ne peut etre initialisee que par le premier, par de maniere globale
* }
* __syncthread();
* atomicAdd(&sum, 5); // la memoire partagee n'est pas changee par plusieurs threads
* __syncthread(); // on attend que les autres threads soient OK pour passer a la suite
*
* atomicAdd est VRAIMENT lent
*/
void multiply(float *m, float *n, float *p, int size)
{
// Pointers on the device:
float *d_m, *d_n, *d_p;
// Matrice allouee directement dans un vecteur unique
// sinon un nombre enorme de cuda malloc
const int nbytes = (size * size) * sizeof(float);
cudaMalloc((void**)&d_m, nbytes);
cudaMalloc((void**)&d_n, nbytes);
cudaMalloc((void**)&d_p, nbytes);
cudaMemcpy(d_m, m, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, n, nbytes, cudaMemcpyHostToDevice);
// definit une grid
dim3 dimGrid(1, 1);
// definit un block de size by size
dim3 dimBlock(size, size);
// balance sur les GPU
mm_kernel<<<dimBlock,dimGrid>>>(d_m, d_n, d_p, size);
cudaMemcpy(p, d_p, nbytes, cudaMemcpyDeviceToHost);
cudaFree(d_m);
cudaFree(d_n);
cudaFree(d_p);
}
void print_matrix(float *m, FILE *out, int size)
{
// Print matrix:
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
fprintf(out, "%8.4f ", m[i * size + j]);
}
fprintf(out, "\n");
}
}
int main(int argc, char **argv)
{
const int size = (argc == 2)? atof(argv[1]) : 10;
const int nbytes = (size * size) * sizeof(float);
float *m = (float*)malloc(nbytes);
float *n = (float*)malloc(nbytes);
float *p = (float*)malloc(nbytes);
// Fill matrices m & n:
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
m[i * size + j] = cos(j) * sqrt(i);
n[i * size + j] = sin(j) + 0.1 * i;
}
}
multiply(m, n, p, size);
print_matrix(p, stdout, size);
free(m);
free(n);
free(p);
return EXIT_SUCCESS;
}
|
8ef2585e1b70983eb3bbf7f62fa22452dae06a23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_blockMatching_gpu.cu
*
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
*
*/
#ifndef _REG_BLOCKMATCHING_GPU_CU
#define _REG_BLOCKMATCHING_GPU_CU
#include "blockMatchingKernel.h"
#include "_reg_ReadWriteImage.h"
#include "_reg_tools.h"
#include <vector>
#include "_reg_maths.h"
//#define USE_TEST_KERNEL
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
/*
* before it was in the file _reg_blockMatching_kernels.cu
*
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
// Some parameters that we need for the kernel execution.
// The caller is supposed to ensure that the values are set
// Number of blocks in each dimension
__device__ __constant__ int3 c_BlockDim;
__device__ __constant__ uint3 c_ImageSize;
// Transformation matrix from nifti header
__device__ __constant__ float4 t_m_a;
__device__ __constant__ float4 t_m_b;
__device__ __constant__ float4 t_m_c;
#define BLOCK_WIDTH 4
#define BLOCK_SIZE 64
#define OVERLAP_SIZE 3
#define STEP_SIZE 1
texture<float, 1, hipReadModeElementType> referenceImageArray_texture;
texture<float, 1, hipReadModeElementType> warpedImageArray_texture;
texture<int, 1, hipReadModeElementType> totalBlock_texture;
/* *************************************************************** */
template<class DTYPE>
__inline__ __device__
void reg2D_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * 0 + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * 0 + (double)mat[1 * 4 + 3]);
return;
}
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
// Apply the transformation matrix
__device__ inline void apply_affine(const float4 &pt, float * result)
{
float4 mat = t_m_a;
result[0] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w);
mat = t_m_b;
result[1] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w);
mat = t_m_c;
result[2] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w);
}
/* *************************************************************** */
__inline__ __device__
float blockReduce2DSum(float val, int tid)
{
static __shared__ float shared[16];
shared[tid] = val;
__syncthreads();
for (unsigned int i = 8; i > 0; i >>= 1){
if (tid < i) {
shared[tid] += shared[tid + i];
}
__syncthreads();
}
return shared[0];
}
/* *************************************************************** */
__inline__ __device__
float blockReduceSum(float val, int tid)
{
static __shared__ float shared[64];
shared[tid] = val;
__syncthreads();
for (unsigned int i = 32; i > 0; i >>= 1){
if (tid < i) {
shared[tid] += shared[tid + i];
}
__syncthreads();
}
return shared[0];
}
/* *************************************************************** */
__global__ void blockMatchingKernel2D(float *warpedPosition,
float *referencePosition,
int *mask,
float* referenceMatrix_xyz,
unsigned int *definedBlock)
{
extern __shared__ float sWarpedValues[];
// Compute the current block index
const unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int currentBlockIndex = tex1Dfetch(totalBlock_texture, bid);
if (currentBlockIndex > -1) {
const unsigned int idy = threadIdx.x;
const unsigned int idx = threadIdx.y;
const unsigned int tid = idy * 4 + idx;
const unsigned int xImage = blockIdx.x * 4 + idx;
const unsigned int yImage = blockIdx.y * 4 + idy;
//populate shared memory with resultImageArray's values
for (int y=-1; y<2; ++y) {
const int yImageIn = yImage + y * 4;
for (int x=-1; x<2; ++x) {
const int xImageIn = xImage + x * 4;
const int sharedIndex = ((y+1)*4+idy)*12+(x+1)*4+idx;
const int indexXYIn = yImageIn * c_ImageSize.x + xImageIn;
const bool valid =
(xImageIn > -1 && xImageIn < (int)c_ImageSize.x) &&
(yImageIn > -1 && yImageIn < (int)c_ImageSize.y);
sWarpedValues[sharedIndex] = (valid && mask[indexXYIn] > -1) ?
tex1Dfetch(warpedImageArray_texture, indexXYIn) : nanf("sNaN");
}
}
//for most cases we need this out of th loop
//value if the block is 4x4 NaN otherwise
const unsigned long voxIndex = yImage * c_ImageSize.x + xImage;
const bool referenceInBounds =
xImage < c_ImageSize.x &&
yImage < c_ImageSize.y;
float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ?
tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN");
const bool finiteReference = isfinite(rReferenceValue);
rReferenceValue = finiteReference ? rReferenceValue : 0.f;
const unsigned int referenceSize = __syncthreads_count(finiteReference);
float bestDisplacement[2] = {nanf("sNaN"), 0.0f};
float bestCC = 0.0;
if (referenceSize > 8) {
//the target values must remain constant throughout the block matching process
const float referenceMean = __fdividef(blockReduce2DSum(rReferenceValue, tid), referenceSize);
const float referenceTemp = finiteReference ? rReferenceValue - referenceMean : 0.f;
const float referenceVar = blockReduce2DSum(referenceTemp * referenceTemp, tid);
// iteration over the result blocks (block matching part)
for (unsigned int y=1; y<8; ++y) {
for (unsigned int x=1; x<8; ++x) {
const unsigned int sharedIndex = ( y + idy ) * 12 + x + idx;
const float rWarpedValue = sWarpedValues[sharedIndex];
const bool overlap = isfinite(rWarpedValue) && finiteReference;
const unsigned int currentWarpedSize = __syncthreads_count(overlap);
if (currentWarpedSize > 8) {
//the reference values must remain intact at each loop, so please do not touch this!
float newreferenceTemp = referenceTemp;
float newreferenceVar = referenceVar;
if (currentWarpedSize != referenceSize){
const float newReferenceValue = overlap ? rReferenceValue : 0.0f;
const float newReferenceMean = __fdividef(blockReduce2DSum(newReferenceValue, tid), currentWarpedSize);
newreferenceTemp = overlap ? newReferenceValue - newReferenceMean : 0.0f;
newreferenceVar = blockReduce2DSum(newreferenceTemp * newreferenceTemp, tid);
}
const float rChecked = overlap ? rWarpedValue : 0.0f;
const float warpedMean = __fdividef(blockReduce2DSum(rChecked, tid), currentWarpedSize);
const float warpedTemp = overlap ? rChecked - warpedMean : 0.0f;
const float warpedVar = blockReduce2DSum(warpedTemp * warpedTemp, tid);
const float sumTargetResult = blockReduce2DSum((newreferenceTemp)* (warpedTemp), tid);
const float localCC = (newreferenceVar * warpedVar) > 0.0 ? fabs((sumTargetResult) / sqrt(newreferenceVar * warpedVar)) : 0.0;
if (tid == 0 && localCC > bestCC) {
bestCC = localCC + 1.0e-7f;
bestDisplacement[0] = x - 4.f;
bestDisplacement[1] = y - 4.f;
}
}
}
}
}
if (tid==0){
const unsigned int posIdx = 2 * currentBlockIndex;
const float referencePosition_temp[2] = {(float)xImage, (float)yImage};
bestDisplacement[0] += referencePosition_temp[0];
bestDisplacement[1] += referencePosition_temp[1];
reg2D_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]);
reg2D_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisplacement, &warpedPosition[posIdx]);
if (isfinite(bestDisplacement[0])) {
atomicAdd(definedBlock, 1);
}
}
}
}
/* *************************************************************** */
#ifdef USE_TEST_KERNEL
__inline__ __device__
float2 REDUCE_TEST(float* sData,
float data,
unsigned int tid)
{
sData[tid] = data;
__syncthreads();
bool seconHalf = tid > 63 ? true : false;
for (unsigned int i = 32; i > 0; i >>= 1){
if (tid < i) sData[tid] += sData[tid + i];
if (seconHalf && tid < 64 + i) sData[tid] += sData[tid + i];
__syncthreads();
}
const float2 temp = make_float2(sData[0], sData[64]);
__syncthreads();
return temp;
}
/* *************************************************************** */
__global__ void blockMatchingKernel3D(float *warpedPosition,
float *referencePosition,
int *mask,
float* referenceMatrix_xyz,
unsigned int *definedBlock)
{
extern __shared__ float sWarpedValues[];
float *sData = &sWarpedValues[12*12*16];
// Compute the current block index
const unsigned int bid0 = (2*blockIdx.z * gridDim.y + blockIdx.y) *
gridDim.x + blockIdx.x;
const unsigned int bid1 = bid0 + gridDim.x * gridDim.y;
int currentBlockIndex[2] = {tex1Dfetch(totalBlock_texture, bid0),
tex1Dfetch(totalBlock_texture, bid1)};
currentBlockIndex[1] = (2*blockIdx.z+1)<c_BlockDim.z ? currentBlockIndex[1] : -1;
if (currentBlockIndex[0] > -1 || currentBlockIndex[1] > -1) {
const unsigned int idx = threadIdx.x;
const unsigned int idy = threadIdx.y;
const unsigned int idz = threadIdx.z;
const unsigned int tid = (idz*4+idy)*4+idx;
const unsigned int xImage = blockIdx.x * 4 + idx;
const unsigned int yImage = blockIdx.y * 4 + idy;
const unsigned int zImage = blockIdx.z * 8 + idz;
//populate shared memory with resultImageArray's values
for (int z=-1 ; z<2; z+=2) {
const int zImageIn = zImage + z * 4;
for (int y=-1; y<2; ++y) {
const int yImageIn = yImage + y * 4;
for (int x=-1; x<2; ++x) {
const int xImageIn = xImage + x * 4;
const int sharedIndex = (((z+1)*4+idz)*12+(y+1)*4+idy)*12+(x+1)*4+idx;
const unsigned int indexXYZIn = xImageIn + c_ImageSize.x *
(yImageIn + zImageIn * c_ImageSize.y);
const bool valid =
(xImageIn > -1 && xImageIn < (int)c_ImageSize.x) &&
(yImageIn > -1 && yImageIn < (int)c_ImageSize.y) &&
(zImageIn > -1 && zImageIn < (int)c_ImageSize.z);
sWarpedValues[sharedIndex] = (valid && mask[indexXYZIn] > -1) ?
tex1Dfetch(warpedImageArray_texture, indexXYZIn) : nanf("sNaN");
}
}
}
const unsigned int voxIndex = ( zImage * c_ImageSize.y + yImage ) *
c_ImageSize.x + xImage;
const bool referenceInBounds =
xImage < c_ImageSize.x &&
yImage < c_ImageSize.y &&
zImage < c_ImageSize.z;
float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ?
tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN");
const bool finiteReference = isfinite(rReferenceValue);
rReferenceValue = finiteReference ? rReferenceValue : 0.f;
float2 tempVal = REDUCE_TEST(sData, finiteReference ? 1.0f : 0.0f, tid);
const uint2 referenceSize = make_uint2((uint)tempVal.x, (uint)tempVal.y);
float2 bestValue = make_float2(0.f, 0.f);
float bestDisp[2][3];
bestDisp[0][0] = bestDisp[1][0] = nanf("sNaN");
if (referenceSize.x > 32 || referenceSize.y > 32) {
float2 referenceMean=REDUCE_TEST(sData, rReferenceValue, tid);
referenceMean.x /= (float)referenceSize.x;
referenceMean.y /= (float)referenceSize.y;
float referenceTemp;
if(tid>63)
referenceTemp = finiteReference ? rReferenceValue - referenceMean.y : 0.f;
else referenceTemp = finiteReference ? rReferenceValue - referenceMean.x : 0.f;
float2 referenceVar = REDUCE_TEST(sData, referenceTemp*referenceTemp, tid);
// iteration over the result blocks (block matching part)
for (unsigned int z=1; z<8; ++z) {
for (unsigned int y=1; y<8; ++y) {
for (unsigned int x=1; x<8; ++x) {
const unsigned int sharedIndex = ( (z+idz) * 12 + y + idy ) * 12 + x + idx;
const float rWarpedValue = sWarpedValues[sharedIndex];
const bool overlap = isfinite(rWarpedValue) && finiteReference;
tempVal = REDUCE_TEST(sData, overlap ? 1.0f : 0.0f, tid);
const uint2 currentWarpedSize = make_uint2((uint)tempVal.x, (uint)tempVal.y);
if (currentWarpedSize.x > 32 || currentWarpedSize.y > 32) {
float newreferenceTemp = referenceTemp;
float2 newreferenceVar = referenceVar;
if (currentWarpedSize.x!=referenceSize.x || currentWarpedSize.y!=referenceSize.y){
const float newReferenceValue = overlap ? rReferenceValue : 0.0f;
float2 newReferenceMean = REDUCE_TEST(sData, newReferenceValue, tid);
newReferenceMean.x /= (float)currentWarpedSize.x;
newReferenceMean.y /= (float)currentWarpedSize.y;
if(tid>63)
referenceTemp = overlap ? newReferenceValue - newReferenceMean.y : 0.f;
else referenceTemp = overlap ? newReferenceValue - newReferenceMean.x : 0.f;
newreferenceVar = REDUCE_TEST(sData, newreferenceTemp * newreferenceTemp, tid);
}
const float rChecked = overlap ? rWarpedValue : 0.0f;
float2 warpedMean = REDUCE_TEST(sData, rChecked, tid);
warpedMean.x /= (float)currentWarpedSize.x;
warpedMean.y /= (float)currentWarpedSize.y;
float warpedTemp;
if(tid>63)
warpedTemp = overlap ? rChecked - warpedMean.y : 0.f;
else warpedTemp = overlap ? rChecked - warpedMean.x : 0.f;
const float2 warpedVar = REDUCE_TEST(sData, warpedTemp*warpedTemp, tid);
const float2 sumTargetResult = REDUCE_TEST(sData, newreferenceTemp*warpedTemp, tid);
if (tid==0 && currentWarpedSize.x > 32 ){
const float localCC = fabs(sumTargetResult.x *
rsqrtf(newreferenceVar.x * warpedVar.x));
if(localCC > bestValue.x) {
bestValue.x = localCC;
bestDisp[0][0] = x - 4.f;
bestDisp[0][1] = y - 4.f;
bestDisp[0][2] = z - 4.f;
}
}
if (tid==64 && currentWarpedSize.y > 32 ){
const float localCC = fabs(sumTargetResult.y *
rsqrtf(newreferenceVar.y * warpedVar.y));
if(localCC > bestValue.y) {
bestValue.y = localCC;
bestDisp[1][0] = x - 4.f;
bestDisp[1][1] = y - 4.f;
bestDisp[1][2] = z - 4.f;
}
}
__syncthreads();
}
}
}
}
}
if(tid==0 && currentBlockIndex[0]>-1){
const unsigned int posIdx = 3 * currentBlockIndex[0];
warpedPosition[posIdx] = NAN;
if (isfinite(bestDisp[0][0])){
const float referencePosition_temp[3] = { (float)xImage,
(float)yImage,
(float)zImage};
bestDisp[0][0] += referencePosition_temp[0];
bestDisp[0][1] += referencePosition_temp[1];
bestDisp[0][2] += referencePosition_temp[2];
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
referencePosition_temp,
&referencePosition[posIdx]);
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
bestDisp[0],
&warpedPosition[posIdx]);
atomicAdd(definedBlock, 1);
}
}
if(tid==64 && currentBlockIndex[1]>-1){
const unsigned int posIdx = 3 * currentBlockIndex[1];
warpedPosition[posIdx] = NAN;
if (isfinite(bestDisp[1][0])){
const float referencePosition_temp[3] = {(float)xImage,
(float)yImage,
(float)zImage};
bestDisp[1][0] += referencePosition_temp[0];
bestDisp[1][1] += referencePosition_temp[1];
bestDisp[1][2] += referencePosition_temp[2];
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
referencePosition_temp,
&referencePosition[posIdx]);
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
bestDisp[1],
&warpedPosition[posIdx]);
atomicAdd(definedBlock, 1);
}
}
}
}
#else
/* *************************************************************** */
__global__ void blockMatchingKernel3D(float *warpedPosition,
float *referencePosition,
int *mask,
float* referenceMatrix_xyz,
unsigned int *definedBlock)
{
extern __shared__ float sWarpedValues[];
// Compute the current block index
const unsigned int bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x ;
const int currentBlockIndex = tex1Dfetch(totalBlock_texture, bid);
if (currentBlockIndex > -1) {
const unsigned int idx = threadIdx.x;
const unsigned int idy = threadIdx.y;
const unsigned int idz = threadIdx.z;
const unsigned int tid = (idz*4+idy)*4+idx;
const unsigned int xImage = blockIdx.x * 4 + idx;
const unsigned int yImage = blockIdx.y * 4 + idy;
const unsigned int zImage = blockIdx.z * 4 + idz;
//populate shared memory with resultImageArray's values
for (int z=-1 ; z<2; ++z) {
const int zImageIn = zImage + z * 4;
for (int y=-1; y<2; ++y) {
const int yImageIn = yImage + y * 4;
for (int x=-1; x<2; ++x) {
const int xImageIn = xImage + x * 4;
const int sharedIndex = (((z+1)*4+idz)*12+(y+1)*4+idy)*12+(x+1)*4+idx;
const unsigned int indexXYZIn = xImageIn + c_ImageSize.x *
(yImageIn + zImageIn * c_ImageSize.y);
const bool valid =
(xImageIn > -1 && xImageIn < (int)c_ImageSize.x) &&
(yImageIn > -1 && yImageIn < (int)c_ImageSize.y) &&
(zImageIn > -1 && zImageIn < (int)c_ImageSize.z);
sWarpedValues[sharedIndex] = (valid && mask[indexXYZIn] > -1) ?
tex1Dfetch(warpedImageArray_texture, indexXYZIn) : nanf("sNaN"); //for some reason the mask here creates probs
}
}
}
//for most cases we need this out of th loop
//value if the block is 4x4x4 NaN otherwise
const unsigned int voxIndex = ( zImage * c_ImageSize.y + yImage ) *
c_ImageSize.x + xImage;
const bool referenceInBounds =
xImage < c_ImageSize.x &&
yImage < c_ImageSize.y &&
zImage < c_ImageSize.z;
float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ?
tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN");
const bool finiteReference = isfinite(rReferenceValue);
rReferenceValue = finiteReference ? rReferenceValue : 0.f;
const unsigned int referenceSize = __syncthreads_count(finiteReference);
float bestDisplacement[3] = {nanf("sNaN"), 0.0f, 0.0f };
float bestCC = 0.0f;
if (referenceSize > 32) {
//the target values must remain constant throughout the block matching process
const float referenceMean = __fdividef(blockReduceSum(rReferenceValue, tid), referenceSize);
const float referenceTemp = finiteReference ? rReferenceValue - referenceMean : 0.f;
const float referenceVar = blockReduceSum(referenceTemp * referenceTemp, tid);
// iteration over the result blocks (block matching part)
for (unsigned int z=1; z<8; ++z) {
for (unsigned int y=1; y<8; ++y) {
for (unsigned int x=1; x<8; ++x) {
const unsigned int sharedIndex = ( (z+idz) * 12 + y + idy ) * 12 + x + idx;
const float rWarpedValue = sWarpedValues[sharedIndex];
const bool overlap = isfinite(rWarpedValue) && finiteReference;
const unsigned int currentWarpedSize = __syncthreads_count(overlap);
if (currentWarpedSize > 32) {
//the target values must remain intact at each loop, so please do not touch this!
float newreferenceTemp = referenceTemp;
float newreferenceVar = referenceVar;
if (currentWarpedSize != referenceSize){
const float newReferenceValue = overlap ? rReferenceValue : 0.0f;
const float newReferenceMean = __fdividef(blockReduceSum(newReferenceValue, tid), currentWarpedSize);
newreferenceTemp = overlap ? newReferenceValue - newReferenceMean : 0.0f;
newreferenceVar = blockReduceSum(newreferenceTemp * newreferenceTemp, tid);
}
const float rChecked = overlap ? rWarpedValue : 0.0f;
const float warpedMean = __fdividef(blockReduceSum(rChecked, tid), currentWarpedSize);
const float warpedTemp = overlap ? rChecked - warpedMean : 0.0f;
const float warpedVar = blockReduceSum(warpedTemp * warpedTemp, tid);
const float sumTargetResult = blockReduceSum((newreferenceTemp)* (warpedTemp), tid);
const float localCC = (newreferenceVar * warpedVar) > 0.0 ? fabs((sumTargetResult) / sqrt(newreferenceVar * warpedVar)) : 0.0;
if (tid == 0 && localCC > bestCC) {
bestCC = localCC + 1.0e-7f;
bestDisplacement[0] = x - 4.f;
bestDisplacement[1] = y - 4.f;
bestDisplacement[2] = z - 4.f;
}
}
}
}
}
}
if (tid==0) {
const unsigned int posIdx = 3 * currentBlockIndex;
const float referencePosition_temp[3] = { (float)xImage, (float)yImage, (float)zImage };
bestDisplacement[0] += referencePosition_temp[0];
bestDisplacement[1] += referencePosition_temp[1];
bestDisplacement[2] += referencePosition_temp[2];
reg_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]);
reg_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisplacement, &warpedPosition[posIdx]);
if (isfinite(bestDisplacement[0])) {
atomicAdd(definedBlock, 1);
}
}
}
}
#endif
/* *************************************************************** */
void block_matching_method_gpu(nifti_image *targetImage,
_reg_blockMatchingParam *params,
float **targetImageArray_d,
float **resultImageArray_d,
float **referencePosition_d,
float **warpedPosition_d,
int **totalBlock_d,
int **mask_d,
float** referenceMat_d)
{
// Copy some required parameters over to the device
uint3 imageSize = make_uint3(targetImage->nx,
targetImage->ny,
targetImage->nz);
uint3 blockSize = make_uint3(params->blockNumber[0],
params->blockNumber[1],
params->blockNumber[2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(uint3)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_BlockDim,&blockSize,sizeof(uint3)));
// Texture binding
const unsigned int numBlocks = params->blockNumber[0] * params->blockNumber[1] * params->blockNumber[2];
NR_CUDA_SAFE_CALL(hipBindTexture(0, referenceImageArray_texture, *targetImageArray_d, targetImage->nvox * sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, warpedImageArray_texture, *resultImageArray_d, targetImage->nvox * sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, totalBlock_texture, *totalBlock_d, numBlocks * sizeof(int)));
unsigned int *definedBlock_d;
unsigned int *definedBlock_h = (unsigned int*) malloc(sizeof(unsigned int));
*definedBlock_h = 0;
NR_CUDA_SAFE_CALL(hipMalloc((void** )(&definedBlock_d), sizeof(unsigned int)));
NR_CUDA_SAFE_CALL(hipMemcpy(definedBlock_d, definedBlock_h, sizeof(unsigned int), hipMemcpyHostToDevice));
if (params->stepSize!=1 || params->voxelCaptureRange!=3){
reg_print_msg_error("The block Mathching CUDA kernel supports only a stepsize of 1");
reg_exit();
}
#ifdef USE_TEST_KERNEL
dim3 BlockDims1D(4,4,8);
dim3 BlocksGrid3D(
params->blockNumber[0],
params->blockNumber[1],
(unsigned int)reg_ceil((float)params->blockNumber[2]/2.f));
unsigned int sMem = (128 + 4*3 * 4*3 * 4*4) * sizeof(float);
#else
dim3 BlockDims1D(4,4,4);
dim3 BlocksGrid3D(
params->blockNumber[0],
params->blockNumber[1],
params->blockNumber[2]);
unsigned int sMem = (64 + 4*3 * 4*3 * 4*3) * sizeof(float); // (3*4)^3
#endif
if (targetImage->nz == 1){
BlockDims1D.z=1;
BlocksGrid3D.z=1;
sMem = (16 + 144) * sizeof(float); // // (3*4)^2
blockMatchingKernel2D << <BlocksGrid3D, BlockDims1D, sMem >> >(*warpedPosition_d,
*referencePosition_d,
*mask_d,
*referenceMat_d,
definedBlock_d);
}
else {
hipLaunchKernelGGL(( blockMatchingKernel3D) , dim3(BlocksGrid3D), dim3(BlockDims1D), sMem, 0, *warpedPosition_d,
*referencePosition_d,
*mask_d,
*referenceMat_d,
definedBlock_d);
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(BlocksGrid3D, BlockDims1D);
#else
NR_CUDA_SAFE_CALL(hipDeviceSynchronize());
#endif
NR_CUDA_SAFE_CALL(hipMemcpy((void * )definedBlock_h, (void * )definedBlock_d, sizeof(unsigned int), hipMemcpyDeviceToHost));
params->definedActiveBlockNumber = *definedBlock_h;
NR_CUDA_SAFE_CALL(hipUnbindTexture(referenceImageArray_texture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(warpedImageArray_texture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(totalBlock_texture));
free(definedBlock_h);
hipFree(definedBlock_d);
}
/* *************************************************************** */
#endif //_REG_BLOCKMATCHING_GPU_CU
| 8ef2585e1b70983eb3bbf7f62fa22452dae06a23.cu | /*
* _reg_blockMatching_gpu.cu
*
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
*
*/
#ifndef _REG_BLOCKMATCHING_GPU_CU
#define _REG_BLOCKMATCHING_GPU_CU
#include "blockMatchingKernel.h"
#include "_reg_ReadWriteImage.h"
#include "_reg_tools.h"
#include <vector>
#include "_reg_maths.h"
//#define USE_TEST_KERNEL
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
/*
* before it was in the file _reg_blockMatching_kernels.cu
*
*
* Created by Marc Modat and Pankaj Daga on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
// Some parameters that we need for the kernel execution.
// The caller is supposed to ensure that the values are set
// Number of blocks in each dimension
__device__ __constant__ int3 c_BlockDim;
__device__ __constant__ uint3 c_ImageSize;
// Transformation matrix from nifti header
__device__ __constant__ float4 t_m_a;
__device__ __constant__ float4 t_m_b;
__device__ __constant__ float4 t_m_c;
#define BLOCK_WIDTH 4
#define BLOCK_SIZE 64
#define OVERLAP_SIZE 3
#define STEP_SIZE 1
texture<float, 1, cudaReadModeElementType> referenceImageArray_texture;
texture<float, 1, cudaReadModeElementType> warpedImageArray_texture;
texture<int, 1, cudaReadModeElementType> totalBlock_texture;
/* *************************************************************** */
template<class DTYPE>
__inline__ __device__
void reg2D_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * 0 + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * 0 + (double)mat[1 * 4 + 3]);
return;
}
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
// Apply the transformation matrix
__device__ inline void apply_affine(const float4 &pt, float * result)
{
float4 mat = t_m_a;
result[0] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w);
mat = t_m_b;
result[1] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w);
mat = t_m_c;
result[2] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w);
}
/* *************************************************************** */
__inline__ __device__
float blockReduce2DSum(float val, int tid)
{
static __shared__ float shared[16];
shared[tid] = val;
__syncthreads();
for (unsigned int i = 8; i > 0; i >>= 1){
if (tid < i) {
shared[tid] += shared[tid + i];
}
__syncthreads();
}
return shared[0];
}
/* *************************************************************** */
__inline__ __device__
float blockReduceSum(float val, int tid)
{
static __shared__ float shared[64];
shared[tid] = val;
__syncthreads();
for (unsigned int i = 32; i > 0; i >>= 1){
if (tid < i) {
shared[tid] += shared[tid + i];
}
__syncthreads();
}
return shared[0];
}
/* *************************************************************** */
__global__ void blockMatchingKernel2D(float *warpedPosition,
float *referencePosition,
int *mask,
float* referenceMatrix_xyz,
unsigned int *definedBlock)
{
extern __shared__ float sWarpedValues[];
// Compute the current block index
const unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int currentBlockIndex = tex1Dfetch(totalBlock_texture, bid);
if (currentBlockIndex > -1) {
const unsigned int idy = threadIdx.x;
const unsigned int idx = threadIdx.y;
const unsigned int tid = idy * 4 + idx;
const unsigned int xImage = blockIdx.x * 4 + idx;
const unsigned int yImage = blockIdx.y * 4 + idy;
//populate shared memory with resultImageArray's values
for (int y=-1; y<2; ++y) {
const int yImageIn = yImage + y * 4;
for (int x=-1; x<2; ++x) {
const int xImageIn = xImage + x * 4;
const int sharedIndex = ((y+1)*4+idy)*12+(x+1)*4+idx;
const int indexXYIn = yImageIn * c_ImageSize.x + xImageIn;
const bool valid =
(xImageIn > -1 && xImageIn < (int)c_ImageSize.x) &&
(yImageIn > -1 && yImageIn < (int)c_ImageSize.y);
sWarpedValues[sharedIndex] = (valid && mask[indexXYIn] > -1) ?
tex1Dfetch(warpedImageArray_texture, indexXYIn) : nanf("sNaN");
}
}
//for most cases we need this out of th loop
//value if the block is 4x4 NaN otherwise
const unsigned long voxIndex = yImage * c_ImageSize.x + xImage;
const bool referenceInBounds =
xImage < c_ImageSize.x &&
yImage < c_ImageSize.y;
float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ?
tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN");
const bool finiteReference = isfinite(rReferenceValue);
rReferenceValue = finiteReference ? rReferenceValue : 0.f;
const unsigned int referenceSize = __syncthreads_count(finiteReference);
float bestDisplacement[2] = {nanf("sNaN"), 0.0f};
float bestCC = 0.0;
if (referenceSize > 8) {
//the target values must remain constant throughout the block matching process
const float referenceMean = __fdividef(blockReduce2DSum(rReferenceValue, tid), referenceSize);
const float referenceTemp = finiteReference ? rReferenceValue - referenceMean : 0.f;
const float referenceVar = blockReduce2DSum(referenceTemp * referenceTemp, tid);
// iteration over the result blocks (block matching part)
for (unsigned int y=1; y<8; ++y) {
for (unsigned int x=1; x<8; ++x) {
const unsigned int sharedIndex = ( y + idy ) * 12 + x + idx;
const float rWarpedValue = sWarpedValues[sharedIndex];
const bool overlap = isfinite(rWarpedValue) && finiteReference;
const unsigned int currentWarpedSize = __syncthreads_count(overlap);
if (currentWarpedSize > 8) {
//the reference values must remain intact at each loop, so please do not touch this!
float newreferenceTemp = referenceTemp;
float newreferenceVar = referenceVar;
if (currentWarpedSize != referenceSize){
const float newReferenceValue = overlap ? rReferenceValue : 0.0f;
const float newReferenceMean = __fdividef(blockReduce2DSum(newReferenceValue, tid), currentWarpedSize);
newreferenceTemp = overlap ? newReferenceValue - newReferenceMean : 0.0f;
newreferenceVar = blockReduce2DSum(newreferenceTemp * newreferenceTemp, tid);
}
const float rChecked = overlap ? rWarpedValue : 0.0f;
const float warpedMean = __fdividef(blockReduce2DSum(rChecked, tid), currentWarpedSize);
const float warpedTemp = overlap ? rChecked - warpedMean : 0.0f;
const float warpedVar = blockReduce2DSum(warpedTemp * warpedTemp, tid);
const float sumTargetResult = blockReduce2DSum((newreferenceTemp)* (warpedTemp), tid);
const float localCC = (newreferenceVar * warpedVar) > 0.0 ? fabs((sumTargetResult) / sqrt(newreferenceVar * warpedVar)) : 0.0;
if (tid == 0 && localCC > bestCC) {
bestCC = localCC + 1.0e-7f;
bestDisplacement[0] = x - 4.f;
bestDisplacement[1] = y - 4.f;
}
}
}
}
}
if (tid==0){
const unsigned int posIdx = 2 * currentBlockIndex;
const float referencePosition_temp[2] = {(float)xImage, (float)yImage};
bestDisplacement[0] += referencePosition_temp[0];
bestDisplacement[1] += referencePosition_temp[1];
reg2D_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]);
reg2D_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisplacement, &warpedPosition[posIdx]);
if (isfinite(bestDisplacement[0])) {
atomicAdd(definedBlock, 1);
}
}
}
}
/* *************************************************************** */
#ifdef USE_TEST_KERNEL
__inline__ __device__
float2 REDUCE_TEST(float* sData,
float data,
unsigned int tid)
{
sData[tid] = data;
__syncthreads();
bool seconHalf = tid > 63 ? true : false;
for (unsigned int i = 32; i > 0; i >>= 1){
if (tid < i) sData[tid] += sData[tid + i];
if (seconHalf && tid < 64 + i) sData[tid] += sData[tid + i];
__syncthreads();
}
const float2 temp = make_float2(sData[0], sData[64]);
__syncthreads();
return temp;
}
/* *************************************************************** */
__global__ void blockMatchingKernel3D(float *warpedPosition,
float *referencePosition,
int *mask,
float* referenceMatrix_xyz,
unsigned int *definedBlock)
{
extern __shared__ float sWarpedValues[];
float *sData = &sWarpedValues[12*12*16];
// Compute the current block index
const unsigned int bid0 = (2*blockIdx.z * gridDim.y + blockIdx.y) *
gridDim.x + blockIdx.x;
const unsigned int bid1 = bid0 + gridDim.x * gridDim.y;
int currentBlockIndex[2] = {tex1Dfetch(totalBlock_texture, bid0),
tex1Dfetch(totalBlock_texture, bid1)};
currentBlockIndex[1] = (2*blockIdx.z+1)<c_BlockDim.z ? currentBlockIndex[1] : -1;
if (currentBlockIndex[0] > -1 || currentBlockIndex[1] > -1) {
const unsigned int idx = threadIdx.x;
const unsigned int idy = threadIdx.y;
const unsigned int idz = threadIdx.z;
const unsigned int tid = (idz*4+idy)*4+idx;
const unsigned int xImage = blockIdx.x * 4 + idx;
const unsigned int yImage = blockIdx.y * 4 + idy;
const unsigned int zImage = blockIdx.z * 8 + idz;
//populate shared memory with resultImageArray's values
for (int z=-1 ; z<2; z+=2) {
const int zImageIn = zImage + z * 4;
for (int y=-1; y<2; ++y) {
const int yImageIn = yImage + y * 4;
for (int x=-1; x<2; ++x) {
const int xImageIn = xImage + x * 4;
const int sharedIndex = (((z+1)*4+idz)*12+(y+1)*4+idy)*12+(x+1)*4+idx;
const unsigned int indexXYZIn = xImageIn + c_ImageSize.x *
(yImageIn + zImageIn * c_ImageSize.y);
const bool valid =
(xImageIn > -1 && xImageIn < (int)c_ImageSize.x) &&
(yImageIn > -1 && yImageIn < (int)c_ImageSize.y) &&
(zImageIn > -1 && zImageIn < (int)c_ImageSize.z);
sWarpedValues[sharedIndex] = (valid && mask[indexXYZIn] > -1) ?
tex1Dfetch(warpedImageArray_texture, indexXYZIn) : nanf("sNaN");
}
}
}
const unsigned int voxIndex = ( zImage * c_ImageSize.y + yImage ) *
c_ImageSize.x + xImage;
const bool referenceInBounds =
xImage < c_ImageSize.x &&
yImage < c_ImageSize.y &&
zImage < c_ImageSize.z;
float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ?
tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN");
const bool finiteReference = isfinite(rReferenceValue);
rReferenceValue = finiteReference ? rReferenceValue : 0.f;
float2 tempVal = REDUCE_TEST(sData, finiteReference ? 1.0f : 0.0f, tid);
const uint2 referenceSize = make_uint2((uint)tempVal.x, (uint)tempVal.y);
float2 bestValue = make_float2(0.f, 0.f);
float bestDisp[2][3];
bestDisp[0][0] = bestDisp[1][0] = nanf("sNaN");
if (referenceSize.x > 32 || referenceSize.y > 32) {
float2 referenceMean=REDUCE_TEST(sData, rReferenceValue, tid);
referenceMean.x /= (float)referenceSize.x;
referenceMean.y /= (float)referenceSize.y;
float referenceTemp;
if(tid>63)
referenceTemp = finiteReference ? rReferenceValue - referenceMean.y : 0.f;
else referenceTemp = finiteReference ? rReferenceValue - referenceMean.x : 0.f;
float2 referenceVar = REDUCE_TEST(sData, referenceTemp*referenceTemp, tid);
// iteration over the result blocks (block matching part)
for (unsigned int z=1; z<8; ++z) {
for (unsigned int y=1; y<8; ++y) {
for (unsigned int x=1; x<8; ++x) {
const unsigned int sharedIndex = ( (z+idz) * 12 + y + idy ) * 12 + x + idx;
const float rWarpedValue = sWarpedValues[sharedIndex];
const bool overlap = isfinite(rWarpedValue) && finiteReference;
tempVal = REDUCE_TEST(sData, overlap ? 1.0f : 0.0f, tid);
const uint2 currentWarpedSize = make_uint2((uint)tempVal.x, (uint)tempVal.y);
if (currentWarpedSize.x > 32 || currentWarpedSize.y > 32) {
float newreferenceTemp = referenceTemp;
float2 newreferenceVar = referenceVar;
if (currentWarpedSize.x!=referenceSize.x || currentWarpedSize.y!=referenceSize.y){
const float newReferenceValue = overlap ? rReferenceValue : 0.0f;
float2 newReferenceMean = REDUCE_TEST(sData, newReferenceValue, tid);
newReferenceMean.x /= (float)currentWarpedSize.x;
newReferenceMean.y /= (float)currentWarpedSize.y;
if(tid>63)
referenceTemp = overlap ? newReferenceValue - newReferenceMean.y : 0.f;
else referenceTemp = overlap ? newReferenceValue - newReferenceMean.x : 0.f;
newreferenceVar = REDUCE_TEST(sData, newreferenceTemp * newreferenceTemp, tid);
}
const float rChecked = overlap ? rWarpedValue : 0.0f;
float2 warpedMean = REDUCE_TEST(sData, rChecked, tid);
warpedMean.x /= (float)currentWarpedSize.x;
warpedMean.y /= (float)currentWarpedSize.y;
float warpedTemp;
if(tid>63)
warpedTemp = overlap ? rChecked - warpedMean.y : 0.f;
else warpedTemp = overlap ? rChecked - warpedMean.x : 0.f;
const float2 warpedVar = REDUCE_TEST(sData, warpedTemp*warpedTemp, tid);
const float2 sumTargetResult = REDUCE_TEST(sData, newreferenceTemp*warpedTemp, tid);
if (tid==0 && currentWarpedSize.x > 32 ){
const float localCC = fabs(sumTargetResult.x *
rsqrtf(newreferenceVar.x * warpedVar.x));
if(localCC > bestValue.x) {
bestValue.x = localCC;
bestDisp[0][0] = x - 4.f;
bestDisp[0][1] = y - 4.f;
bestDisp[0][2] = z - 4.f;
}
}
if (tid==64 && currentWarpedSize.y > 32 ){
const float localCC = fabs(sumTargetResult.y *
rsqrtf(newreferenceVar.y * warpedVar.y));
if(localCC > bestValue.y) {
bestValue.y = localCC;
bestDisp[1][0] = x - 4.f;
bestDisp[1][1] = y - 4.f;
bestDisp[1][2] = z - 4.f;
}
}
__syncthreads();
}
}
}
}
}
if(tid==0 && currentBlockIndex[0]>-1){
const unsigned int posIdx = 3 * currentBlockIndex[0];
warpedPosition[posIdx] = NAN;
if (isfinite(bestDisp[0][0])){
const float referencePosition_temp[3] = { (float)xImage,
(float)yImage,
(float)zImage};
bestDisp[0][0] += referencePosition_temp[0];
bestDisp[0][1] += referencePosition_temp[1];
bestDisp[0][2] += referencePosition_temp[2];
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
referencePosition_temp,
&referencePosition[posIdx]);
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
bestDisp[0],
&warpedPosition[posIdx]);
atomicAdd(definedBlock, 1);
}
}
if(tid==64 && currentBlockIndex[1]>-1){
const unsigned int posIdx = 3 * currentBlockIndex[1];
warpedPosition[posIdx] = NAN;
if (isfinite(bestDisp[1][0])){
const float referencePosition_temp[3] = {(float)xImage,
(float)yImage,
(float)zImage};
bestDisp[1][0] += referencePosition_temp[0];
bestDisp[1][1] += referencePosition_temp[1];
bestDisp[1][2] += referencePosition_temp[2];
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
referencePosition_temp,
&referencePosition[posIdx]);
reg_mat44_mul_cuda<float>(referenceMatrix_xyz,
bestDisp[1],
&warpedPosition[posIdx]);
atomicAdd(definedBlock, 1);
}
}
}
}
#else
/* *************************************************************** */
__global__ void blockMatchingKernel3D(float *warpedPosition,
float *referencePosition,
int *mask,
float* referenceMatrix_xyz,
unsigned int *definedBlock)
{
extern __shared__ float sWarpedValues[];
// Compute the current block index
const unsigned int bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x ;
const int currentBlockIndex = tex1Dfetch(totalBlock_texture, bid);
if (currentBlockIndex > -1) {
const unsigned int idx = threadIdx.x;
const unsigned int idy = threadIdx.y;
const unsigned int idz = threadIdx.z;
const unsigned int tid = (idz*4+idy)*4+idx;
const unsigned int xImage = blockIdx.x * 4 + idx;
const unsigned int yImage = blockIdx.y * 4 + idy;
const unsigned int zImage = blockIdx.z * 4 + idz;
//populate shared memory with resultImageArray's values
for (int z=-1 ; z<2; ++z) {
const int zImageIn = zImage + z * 4;
for (int y=-1; y<2; ++y) {
const int yImageIn = yImage + y * 4;
for (int x=-1; x<2; ++x) {
const int xImageIn = xImage + x * 4;
const int sharedIndex = (((z+1)*4+idz)*12+(y+1)*4+idy)*12+(x+1)*4+idx;
const unsigned int indexXYZIn = xImageIn + c_ImageSize.x *
(yImageIn + zImageIn * c_ImageSize.y);
const bool valid =
(xImageIn > -1 && xImageIn < (int)c_ImageSize.x) &&
(yImageIn > -1 && yImageIn < (int)c_ImageSize.y) &&
(zImageIn > -1 && zImageIn < (int)c_ImageSize.z);
sWarpedValues[sharedIndex] = (valid && mask[indexXYZIn] > -1) ?
tex1Dfetch(warpedImageArray_texture, indexXYZIn) : nanf("sNaN"); //for some reason the mask here creates probs
}
}
}
//for most cases we need this out of th loop
//value if the block is 4x4x4 NaN otherwise
const unsigned int voxIndex = ( zImage * c_ImageSize.y + yImage ) *
c_ImageSize.x + xImage;
const bool referenceInBounds =
xImage < c_ImageSize.x &&
yImage < c_ImageSize.y &&
zImage < c_ImageSize.z;
float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ?
tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN");
const bool finiteReference = isfinite(rReferenceValue);
rReferenceValue = finiteReference ? rReferenceValue : 0.f;
const unsigned int referenceSize = __syncthreads_count(finiteReference);
float bestDisplacement[3] = {nanf("sNaN"), 0.0f, 0.0f };
float bestCC = 0.0f;
if (referenceSize > 32) {
//the target values must remain constant throughout the block matching process
const float referenceMean = __fdividef(blockReduceSum(rReferenceValue, tid), referenceSize);
const float referenceTemp = finiteReference ? rReferenceValue - referenceMean : 0.f;
const float referenceVar = blockReduceSum(referenceTemp * referenceTemp, tid);
// iteration over the result blocks (block matching part)
for (unsigned int z=1; z<8; ++z) {
for (unsigned int y=1; y<8; ++y) {
for (unsigned int x=1; x<8; ++x) {
const unsigned int sharedIndex = ( (z+idz) * 12 + y + idy ) * 12 + x + idx;
const float rWarpedValue = sWarpedValues[sharedIndex];
const bool overlap = isfinite(rWarpedValue) && finiteReference;
const unsigned int currentWarpedSize = __syncthreads_count(overlap);
if (currentWarpedSize > 32) {
//the target values must remain intact at each loop, so please do not touch this!
float newreferenceTemp = referenceTemp;
float newreferenceVar = referenceVar;
if (currentWarpedSize != referenceSize){
const float newReferenceValue = overlap ? rReferenceValue : 0.0f;
const float newReferenceMean = __fdividef(blockReduceSum(newReferenceValue, tid), currentWarpedSize);
newreferenceTemp = overlap ? newReferenceValue - newReferenceMean : 0.0f;
newreferenceVar = blockReduceSum(newreferenceTemp * newreferenceTemp, tid);
}
const float rChecked = overlap ? rWarpedValue : 0.0f;
const float warpedMean = __fdividef(blockReduceSum(rChecked, tid), currentWarpedSize);
const float warpedTemp = overlap ? rChecked - warpedMean : 0.0f;
const float warpedVar = blockReduceSum(warpedTemp * warpedTemp, tid);
const float sumTargetResult = blockReduceSum((newreferenceTemp)* (warpedTemp), tid);
const float localCC = (newreferenceVar * warpedVar) > 0.0 ? fabs((sumTargetResult) / sqrt(newreferenceVar * warpedVar)) : 0.0;
if (tid == 0 && localCC > bestCC) {
bestCC = localCC + 1.0e-7f;
bestDisplacement[0] = x - 4.f;
bestDisplacement[1] = y - 4.f;
bestDisplacement[2] = z - 4.f;
}
}
}
}
}
}
if (tid==0) {
const unsigned int posIdx = 3 * currentBlockIndex;
const float referencePosition_temp[3] = { (float)xImage, (float)yImage, (float)zImage };
bestDisplacement[0] += referencePosition_temp[0];
bestDisplacement[1] += referencePosition_temp[1];
bestDisplacement[2] += referencePosition_temp[2];
reg_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]);
reg_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisplacement, &warpedPosition[posIdx]);
if (isfinite(bestDisplacement[0])) {
atomicAdd(definedBlock, 1);
}
}
}
}
#endif
/* *************************************************************** */
void block_matching_method_gpu(nifti_image *targetImage,
_reg_blockMatchingParam *params,
float **targetImageArray_d,
float **resultImageArray_d,
float **referencePosition_d,
float **warpedPosition_d,
int **totalBlock_d,
int **mask_d,
float** referenceMat_d)
{
// Copy some required parameters over to the device
uint3 imageSize = make_uint3(targetImage->nx,
targetImage->ny,
targetImage->nz);
uint3 blockSize = make_uint3(params->blockNumber[0],
params->blockNumber[1],
params->blockNumber[2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(uint3)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_BlockDim,&blockSize,sizeof(uint3)));
// Texture binding
const unsigned int numBlocks = params->blockNumber[0] * params->blockNumber[1] * params->blockNumber[2];
NR_CUDA_SAFE_CALL(cudaBindTexture(0, referenceImageArray_texture, *targetImageArray_d, targetImage->nvox * sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, warpedImageArray_texture, *resultImageArray_d, targetImage->nvox * sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, totalBlock_texture, *totalBlock_d, numBlocks * sizeof(int)));
unsigned int *definedBlock_d;
unsigned int *definedBlock_h = (unsigned int*) malloc(sizeof(unsigned int));
*definedBlock_h = 0;
NR_CUDA_SAFE_CALL(cudaMalloc((void** )(&definedBlock_d), sizeof(unsigned int)));
NR_CUDA_SAFE_CALL(cudaMemcpy(definedBlock_d, definedBlock_h, sizeof(unsigned int), cudaMemcpyHostToDevice));
if (params->stepSize!=1 || params->voxelCaptureRange!=3){
reg_print_msg_error("The block Mathching CUDA kernel supports only a stepsize of 1");
reg_exit();
}
#ifdef USE_TEST_KERNEL
dim3 BlockDims1D(4,4,8);
dim3 BlocksGrid3D(
params->blockNumber[0],
params->blockNumber[1],
(unsigned int)reg_ceil((float)params->blockNumber[2]/2.f));
unsigned int sMem = (128 + 4*3 * 4*3 * 4*4) * sizeof(float);
#else
dim3 BlockDims1D(4,4,4);
dim3 BlocksGrid3D(
params->blockNumber[0],
params->blockNumber[1],
params->blockNumber[2]);
unsigned int sMem = (64 + 4*3 * 4*3 * 4*3) * sizeof(float); // (3*4)^3
#endif
if (targetImage->nz == 1){
BlockDims1D.z=1;
BlocksGrid3D.z=1;
sMem = (16 + 144) * sizeof(float); // // (3*4)^2
blockMatchingKernel2D << <BlocksGrid3D, BlockDims1D, sMem >> >(*warpedPosition_d,
*referencePosition_d,
*mask_d,
*referenceMat_d,
definedBlock_d);
}
else {
blockMatchingKernel3D <<<BlocksGrid3D, BlockDims1D, sMem>>>(*warpedPosition_d,
*referencePosition_d,
*mask_d,
*referenceMat_d,
definedBlock_d);
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(BlocksGrid3D, BlockDims1D);
#else
NR_CUDA_SAFE_CALL(cudaThreadSynchronize());
#endif
NR_CUDA_SAFE_CALL(cudaMemcpy((void * )definedBlock_h, (void * )definedBlock_d, sizeof(unsigned int), cudaMemcpyDeviceToHost));
params->definedActiveBlockNumber = *definedBlock_h;
NR_CUDA_SAFE_CALL(cudaUnbindTexture(referenceImageArray_texture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(warpedImageArray_texture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(totalBlock_texture));
free(definedBlock_h);
cudaFree(definedBlock_d);
}
/* *************************************************************** */
#endif //_REG_BLOCKMATCHING_GPU_CU
|
8595f89e90e10f9492735c3fd59c417899b53f05.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
typedef float (*fntype)(float);
typedef float (*optype)(float,float);
__device__ float link_linear(float a) {return a;}
__device__ float link_logistic(float a) {return log(a/(1.0f - a));}
__device__ float mean_linear(float a) {return a;}
__device__ float mean_logistic(float a) {
if (a > 20.0f) {
return 1.0f;
} else if (a < -80.0f) {
return 0.0f;
} else {
return 1.0f/(1.0f + exp(-a));
}
}
__device__ float deriv_linear(float a, float b) {return b-a;}
__device__ float deriv_logistic(float a, float b) {return b-a;}
__device__ float deriv_maxp(float p, float t) {return (2.0f*t - 1.0f)*p*(1.0f-p);}
__device__ float deriv_svm(float p, float t) {
float tt = 2 * t - 1;
return (p * tt < 1.0f) ? tt : 0.0f;
}
#define EPS 1.0e-10f
__device__ float ll_linear(float a, float t) {return (t-a)*(a-t);}
__device__ float ll_logistic(float a, float b) {return log(a * b + (1.0f - a) * (1.0f - b) + EPS);}
__device__ float ll_maxp(float a, float t) {return a * t + (1.0f - a) * (1.0f - t) - 1.0f;}
__device__ float ll_svm(float p, float t) {
float tt = 2 * t - 1;
return min(0.0f, tt * p - 1);
}
__device__ const fntype linkfns[] = {
link_linear,
link_logistic,
link_logistic,
link_linear};
__device__ const fntype meanfns[] = {
mean_linear,
mean_logistic,
mean_logistic,
mean_linear};
__device__ const optype derivfns[] = {
deriv_linear,
deriv_logistic,
deriv_maxp,
deriv_svm};
__device__ const optype llfns[] = {
ll_linear,
ll_logistic,
ll_maxp,
ll_svm};
typedef double (*dfntype)(double);
typedef double (*doptype)(double,double);
__device__ double dlink_linear(double a) {return a;}
__device__ double dlink_logistic(double a) {return log(a/(1.0 - a));}
__device__ double dmean_linear(double a) {return a;}
__device__ double dmean_logistic(double a) {
double tmp;
if (a > 0) {
tmp = exp(-a);
return 1.0/(1.0 + tmp);
} else {
tmp = exp(a);
return tmp/(1.0 + tmp);
}
}
__device__ double dderiv_linear(double a, double b) {return b-a;}
__device__ double dderiv_logistic(double a, double b) {return b-a;}
__device__ double dderiv_maxp(double p, double t) {return (2.0*t - 1.0f)*p*(1.0-p);}
__device__ double dderiv_svm(double p, double t) {
double tt = 2 * t - 1;
return (p * tt < 1.0) ? tt : 0.0;
}
__device__ double dll_linear(double a, double t) {return (t-a)*(a-t);}
__device__ double dll_logistic(double a, double b) {return log(a * b + (1.0 - a) * (1.0 - b) + EPS);}
__device__ double dll_maxp(double a, double t) {return a * t + (1.0 - a) * (1.0 - t) - 1.0;}
__device__ double dll_svm(double p, double t) {
double tt = 2 * t - 1;
return min(0.0, tt * p - 1);
}
__device__ const dfntype dlinkfns[] = {
dlink_linear,
dlink_logistic,
dlink_logistic,
dlink_linear};
__device__ const dfntype dmeanfns[] = {
dmean_linear,
dmean_logistic,
dmean_logistic,
dmean_linear};
__device__ const doptype dderivfns[] = {
dderiv_linear,
dderiv_logistic,
dderiv_maxp,
dderiv_svm};
__device__ const doptype dllfns[] = {
dll_linear,
dll_logistic,
dll_maxp,
dll_svm};
void setsizes(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
__global__ void __apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = meanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_preds), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = linkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_links), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = llfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_lls), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = derivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_derivs), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dmeanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dpreds), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dlinkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dlinks), dim3(griddims),dim3(nthreads), 0, 0, A, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dllfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dlls), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dderivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_dderivs), dim3(griddims),dim3(nthreads), 0, 0, A, B, L, C, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad;
Sumsq[ihere] += grad * grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
atomicAdd(&MM[ihere], ngrad);
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[jhere] == 0) MM[ihere] = 0;
}
}
}
__global__ void __multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) {
float aval, grad;
int i, j, ihere, jhere;
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
if (biasv > 0) {
for (i = threadIdx.x; i < nrows; i += blockDim.x) {
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
grad = aval;
ihere = i + nrows * nbr;
jhere = nbr;
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
} else {
for (i = threadIdx.x; i < nrows; i += blockDim.x) {
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
}
__global__ void __multADAGradx(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) {
float aval, grad;
int i, j, ihere, jhere;
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
i = threadIdx.x;
aval = 0;
if (biasv > 0) {
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
grad = aval;
ihere = i + nrows * nbr;
jhere = nbr;
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
} else {
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
int multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(256, max(1, 1 + (ncols-1)/nt));
hipLaunchKernelGGL(( __multADAGradx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr);
} else {
int nthreads = min(1024, 32*(1+(nrows-1)/32));
int nblocks = min(128, ncols);
hipLaunchKernelGGL(( __multADAGrad), dim3(nblocks),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __nrandinit(hiprandState_t *rstates) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(1234, id, 0, &rstates[id]);
}
__global__ void __ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve,
float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, hiprandState_t *rstates) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom, grad;
float sqnw = sqrtf(nw);
float sq1mnw = sqrtf(1-nw);
hiprandState_t *prstate = &rstates[ithread];
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
// newss = (nw * umval * umval) + (1 - nw) * sqval;
newss = hypotf(sqnw * umval, sq1mnw * sqval);
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? newss : powf(newss, veval*2);
denom = denom + eps;
grad = (umval / denom);
if (langevin > 0) grad += hiprand_normal(prstate) * langevin;
mmval += grad * lrval * tsval;
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
// ADAGRAD with standard momentum
__global__ void __ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr,
float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, hiprandState_t *rstates) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom, grad;
float sqnw = sqrtf(nw);
float sq1mnw = sqrtf(1-nw);
hiprandState_t *prstate = &rstates[ithread];
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
// newss = (nw * umval * umval) + (1 - nw) * sqval;
newss = hypotf(sqnw * umval, sq1mnw * sqval);
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? newss : powf(newss, veval*2);
denom = denom + eps;
grad = (umval / denom);
if (langevin > 0) grad += hiprand_normal(prstate) * langevin;
grad = grad * lrval * tsval; // Normal gradient
grad += momentum[i]; // With momentum
momentum[i] = mu * grad; // Save updated momentum
mmval += grad; // Add the new gradient
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
// ADAGRAD with Nesterov momentum
__global__ void __ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr,
float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, hiprandState_t *rstates) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom, grad, oldmom, newmom;
float sqnw = sqrtf(nw);
float sq1mnw = sqrtf(1-nw);
hiprandState_t *prstate = &rstates[ithread];
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
// newss = (nw * umval * umval) + (1 - nw) * sqval;
newss = hypotf(sqnw * umval, sq1mnw * sqval);
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? newss : powf(newss, veval*2);
denom = denom + eps;
grad = (umval / denom);
if (langevin > 0) grad += hiprand_normal(prstate) * langevin;
grad = grad * lrval * tsval; // Normal gradient
oldmom = momentum[i]; // Momentum
grad += oldmom; // New gradient
newmom = mu * grad; // Compute new momentum
momentum[i] = newmom; // Save new momentum
mmval += grad + newmom - oldmom;
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
int ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float langevin, float eps, int doupdate) {
int nthreads;
dim3 griddims;
int basesize;
if (langevin > 0) {
basesize = max(32, nrows * ncols / 32);
} else {
basesize = max(32, nrows * ncols);
}
setsizes(basesize, &griddims, &nthreads);
int ntt = nthreads * griddims.x * griddims.y;
hiprandState_t *rstates = NULL;
if (langevin > 0) {
hipError_t err = hipMalloc(( void **)& rstates , ntt * sizeof(hiprandState_t));
if (err > 0) {
fprintf(stderr, "Error in hipMalloc %d", err);
return err;
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( __nrandinit), dim3(griddims),dim3(nthreads), 0, 0, rstates);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( __ADAGrad), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, mm, um, ssq, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates);
hipDeviceSynchronize();
if (langevin > 0) hipFree(rstates);
hipError_t err = hipGetLastError();
return err;
}
int ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float langevin, float eps, int doupdate) {
int nthreads;
dim3 griddims;
int basesize;
if (langevin > 0) {
basesize = max(32, nrows * ncols / 32);
} else {
basesize = max(32, nrows * ncols);
}
setsizes(basesize, &griddims, &nthreads);
int ntt = nthreads * griddims.x * griddims.y;
hiprandState_t *rstates = NULL;
if (langevin > 0) {
hipError_t err = hipMalloc(( void **)& rstates , ntt * sizeof(hiprandState_t));
if (err > 0) {
fprintf(stderr, "Error in hipMalloc %d", err);
return err;
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( __nrandinit), dim3(griddims),dim3(nthreads), 0, 0, rstates);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( __ADAGradm), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, mm, um, ssq, momentum, mu, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates);
hipDeviceSynchronize();
if (langevin > 0) hipFree(rstates);
hipError_t err = hipGetLastError();
return err;
}
int ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float langevin, float eps, int doupdate) {
int nthreads;
dim3 griddims;
int basesize;
if (langevin > 0) {
basesize = max(32, nrows * ncols / 32);
} else {
basesize = max(32, nrows * ncols);
}
setsizes(basesize, &griddims, &nthreads);
int ntt = nthreads * griddims.x * griddims.y;
hiprandState_t *rstates = NULL;
if (langevin > 0) {
hipError_t err = hipMalloc(( void **)& rstates , ntt * sizeof(hiprandState_t));
if (err > 0) {
fprintf(stderr, "Error in hipMalloc %d", err);
return err;
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( __nrandinit), dim3(griddims),dim3(nthreads), 0, 0, rstates);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( __ADAGradn), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, mm, um, ssq, momentum, mu, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates);
hipDeviceSynchronize();
if (langevin > 0) hipFree(rstates);
hipError_t err = hipGetLastError();
return err;
}
| 8595f89e90e10f9492735c3fd59c417899b53f05.cu | #include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
typedef float (*fntype)(float);
typedef float (*optype)(float,float);
__device__ float link_linear(float a) {return a;}
__device__ float link_logistic(float a) {return log(a/(1.0f - a));}
__device__ float mean_linear(float a) {return a;}
__device__ float mean_logistic(float a) {
if (a > 20.0f) {
return 1.0f;
} else if (a < -80.0f) {
return 0.0f;
} else {
return 1.0f/(1.0f + exp(-a));
}
}
__device__ float deriv_linear(float a, float b) {return b-a;}
__device__ float deriv_logistic(float a, float b) {return b-a;}
__device__ float deriv_maxp(float p, float t) {return (2.0f*t - 1.0f)*p*(1.0f-p);}
__device__ float deriv_svm(float p, float t) {
float tt = 2 * t - 1;
return (p * tt < 1.0f) ? tt : 0.0f;
}
#define EPS 1.0e-10f
__device__ float ll_linear(float a, float t) {return (t-a)*(a-t);}
__device__ float ll_logistic(float a, float b) {return log(a * b + (1.0f - a) * (1.0f - b) + EPS);}
__device__ float ll_maxp(float a, float t) {return a * t + (1.0f - a) * (1.0f - t) - 1.0f;}
__device__ float ll_svm(float p, float t) {
float tt = 2 * t - 1;
return min(0.0f, tt * p - 1);
}
__device__ const fntype linkfns[] = {
link_linear,
link_logistic,
link_logistic,
link_linear};
__device__ const fntype meanfns[] = {
mean_linear,
mean_logistic,
mean_logistic,
mean_linear};
__device__ const optype derivfns[] = {
deriv_linear,
deriv_logistic,
deriv_maxp,
deriv_svm};
__device__ const optype llfns[] = {
ll_linear,
ll_logistic,
ll_maxp,
ll_svm};
typedef double (*dfntype)(double);
typedef double (*doptype)(double,double);
__device__ double dlink_linear(double a) {return a;}
__device__ double dlink_logistic(double a) {return log(a/(1.0 - a));}
__device__ double dmean_linear(double a) {return a;}
__device__ double dmean_logistic(double a) {
double tmp;
if (a > 0) {
tmp = exp(-a);
return 1.0/(1.0 + tmp);
} else {
tmp = exp(a);
return tmp/(1.0 + tmp);
}
}
__device__ double dderiv_linear(double a, double b) {return b-a;}
__device__ double dderiv_logistic(double a, double b) {return b-a;}
__device__ double dderiv_maxp(double p, double t) {return (2.0*t - 1.0f)*p*(1.0-p);}
__device__ double dderiv_svm(double p, double t) {
double tt = 2 * t - 1;
return (p * tt < 1.0) ? tt : 0.0;
}
__device__ double dll_linear(double a, double t) {return (t-a)*(a-t);}
__device__ double dll_logistic(double a, double b) {return log(a * b + (1.0 - a) * (1.0 - b) + EPS);}
__device__ double dll_maxp(double a, double t) {return a * t + (1.0 - a) * (1.0 - t) - 1.0;}
__device__ double dll_svm(double p, double t) {
double tt = 2 * t - 1;
return min(0.0, tt * p - 1);
}
__device__ const dfntype dlinkfns[] = {
dlink_linear,
dlink_logistic,
dlink_logistic,
dlink_linear};
__device__ const dfntype dmeanfns[] = {
dmean_linear,
dmean_logistic,
dmean_logistic,
dmean_linear};
__device__ const doptype dderivfns[] = {
dderiv_linear,
dderiv_logistic,
dderiv_maxp,
dderiv_svm};
__device__ const doptype dllfns[] = {
dll_linear,
dll_logistic,
dll_maxp,
dll_svm};
void setsizes(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
__global__ void __apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = meanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_preds(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_preds<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
fntype fn = linkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_links(float *A, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_links<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = llfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_lls<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
optype op = derivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_derivs<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dmeanfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dpreds<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
dfntype fn = dlinkfns[L[i % nrows]];
C[i] = fn(A[i]);
}
}
int apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dlinks<<<griddims,nthreads>>>(A, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dllfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dlls<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
doptype op = dderivfns[L[i % nrows]];
C[i] = op(A[i],B[i]);
}
}
int apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__apply_dderivs<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad;
Sumsq[ihere] += grad * grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
atomicAdd(&MM[ihere], ngrad);
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[jhere] == 0) MM[ihere] = 0;
}
}
}
__global__ void __multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) {
float aval, grad;
int i, j, ihere, jhere;
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
if (biasv > 0) {
for (i = threadIdx.x; i < nrows; i += blockDim.x) {
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
grad = aval;
ihere = i + nrows * nbr;
jhere = nbr;
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
} else {
for (i = threadIdx.x; i < nrows; i += blockDim.x) {
aval = 0;
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
}
__global__ void __multADAGradx(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) {
float aval, grad;
int i, j, ihere, jhere;
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
i = threadIdx.x;
aval = 0;
if (biasv > 0) {
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
grad = aval;
ihere = i + nrows * nbr;
jhere = nbr;
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
} else {
for (j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
grad = aval * Bdata[j];
ihere = i + nrows * Bir[j];
jhere = Bir[j];
__gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
int multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(256, max(1, 1 + (ncols-1)/nt));
__multADAGradx<<<nblocks,threadDim>>>(nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr);
} else {
int nthreads = min(1024, 32*(1+(nrows-1)/32));
int nblocks = min(128, ncols);
__multADAGrad<<<nblocks,nthreads>>>(nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen,
vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __nrandinit(curandState *rstates) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(1234, id, 0, &rstates[id]);
}
__global__ void __ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve,
float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, curandState *rstates) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom, grad;
float sqnw = sqrtf(nw);
float sq1mnw = sqrtf(1-nw);
curandState *prstate = &rstates[ithread];
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
// newss = (nw * umval * umval) + (1 - nw) * sqval;
newss = hypotf(sqnw * umval, sq1mnw * sqval);
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? newss : powf(newss, veval*2);
denom = denom + eps;
grad = (umval / denom);
if (langevin > 0) grad += curand_normal(prstate) * langevin;
mmval += grad * lrval * tsval;
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
// ADAGRAD with standard momentum
__global__ void __ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr,
float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, curandState *rstates) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom, grad;
float sqnw = sqrtf(nw);
float sq1mnw = sqrtf(1-nw);
curandState *prstate = &rstates[ithread];
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
// newss = (nw * umval * umval) + (1 - nw) * sqval;
newss = hypotf(sqnw * umval, sq1mnw * sqval);
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? newss : powf(newss, veval*2);
denom = denom + eps;
grad = (umval / denom);
if (langevin > 0) grad += curand_normal(prstate) * langevin;
grad = grad * lrval * tsval; // Normal gradient
grad += momentum[i]; // With momentum
momentum[i] = mu * grad; // Save updated momentum
mmval += grad; // Add the new gradient
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
// ADAGRAD with Nesterov momentum
__global__ void __ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr,
float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, curandState *rstates) {
int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int nthreads = blockDim.x * gridDim.x * gridDim.y;
int i, irow, icol;
float mmval, umval, sqval, newss, veval, tsval, lrval, denom, grad, oldmom, newmom;
float sqnw = sqrtf(nw);
float sq1mnw = sqrtf(1-nw);
curandState *prstate = &rstates[ithread];
for (i = ithread; i < nrows*ncols; i += nthreads) {
icol = i / nrows;
irow = i - icol * nrows;
umval = um[i];
sqval = ssq[i];
// newss = (nw * umval * umval) + (1 - nw) * sqval;
newss = hypotf(sqnw * umval, sq1mnw * sqval);
ssq[i] = newss;
if (doupdate) {
mmval = mm[i];
veval = (nve > 1) ? ve[irow] : ve[0];
tsval = (nts > 1) ? ts[irow] : ts[0];
lrval = (nlr > 1) ? lr[irow] : lr[0];
denom = (veval == 0.5f) ? newss : powf(newss, veval*2);
denom = denom + eps;
grad = (umval / denom);
if (langevin > 0) grad += curand_normal(prstate) * langevin;
grad = grad * lrval * tsval; // Normal gradient
oldmom = momentum[i]; // Momentum
grad += oldmom; // New gradient
newmom = mu * grad; // Compute new momentum
momentum[i] = newmom; // Save new momentum
mmval += grad + newmom - oldmom;
if (maskr > 0) {
if (maskr > 1) {
mmval *= mask[i];
} else {
mmval *= mask[icol];
}
}
mm[i] = mmval;
}
}
}
int ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float langevin, float eps, int doupdate) {
int nthreads;
dim3 griddims;
int basesize;
if (langevin > 0) {
basesize = max(32, nrows * ncols / 32);
} else {
basesize = max(32, nrows * ncols);
}
setsizes(basesize, &griddims, &nthreads);
int ntt = nthreads * griddims.x * griddims.y;
curandState *rstates = NULL;
if (langevin > 0) {
cudaError_t err = cudaMalloc(( void **)& rstates , ntt * sizeof(curandState));
if (err > 0) {
fprintf(stderr, "Error in cudaMalloc %d", err);
return err;
}
cudaDeviceSynchronize();
__nrandinit<<<griddims,nthreads>>>(rstates);
cudaDeviceSynchronize();
}
__ADAGrad<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates);
cudaDeviceSynchronize();
if (langevin > 0) cudaFree(rstates);
cudaError_t err = cudaGetLastError();
return err;
}
int ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float langevin, float eps, int doupdate) {
int nthreads;
dim3 griddims;
int basesize;
if (langevin > 0) {
basesize = max(32, nrows * ncols / 32);
} else {
basesize = max(32, nrows * ncols);
}
setsizes(basesize, &griddims, &nthreads);
int ntt = nthreads * griddims.x * griddims.y;
curandState *rstates = NULL;
if (langevin > 0) {
cudaError_t err = cudaMalloc(( void **)& rstates , ntt * sizeof(curandState));
if (err > 0) {
fprintf(stderr, "Error in cudaMalloc %d", err);
return err;
}
cudaDeviceSynchronize();
__nrandinit<<<griddims,nthreads>>>(rstates);
cudaDeviceSynchronize();
}
__ADAGradm<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, momentum, mu, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates);
cudaDeviceSynchronize();
if (langevin > 0) cudaFree(rstates);
cudaError_t err = cudaGetLastError();
return err;
}
int ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts,
float *lrate, int nlrate, float langevin, float eps, int doupdate) {
int nthreads;
dim3 griddims;
int basesize;
if (langevin > 0) {
basesize = max(32, nrows * ncols / 32);
} else {
basesize = max(32, nrows * ncols);
}
setsizes(basesize, &griddims, &nthreads);
int ntt = nthreads * griddims.x * griddims.y;
curandState *rstates = NULL;
if (langevin > 0) {
cudaError_t err = cudaMalloc(( void **)& rstates , ntt * sizeof(curandState));
if (err > 0) {
fprintf(stderr, "Error in cudaMalloc %d", err);
return err;
}
cudaDeviceSynchronize();
__nrandinit<<<griddims,nthreads>>>(rstates);
cudaDeviceSynchronize();
}
__ADAGradn<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, momentum, mu, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates);
cudaDeviceSynchronize();
if (langevin > 0) cudaFree(rstates);
cudaError_t err = cudaGetLastError();
return err;
}
|
036e5d23235a4fbd0cfd124236fad8e25855ef2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/domain_transform_forward_only_layer.hpp"
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 1; w < input_width; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out - 1] - output[ind_out];
output[ind_out] += weight[ind_wei] * (output[ind_out - 1] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 1; w >= 1; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - 1] += weight[ind_wei] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 2; w >= 0; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out + 1] - output[ind_out];
output[ind_out] += weight[ind_wei + 1] * (output[ind_out + 1] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 0; w < input_width - 1; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + 1],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + 1] += weight[ind_wei + 1] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + 1];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 1; h < input_height; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out - width] - output[ind_out];
output[ind_out] += weight[ind_wei] * (output[ind_out - width] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 1; h >= 1; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - width] += weight[ind_wei] * output[ind_out];
output[ind_out] = (1 - weight[ind_wei]) * output[ind_out];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 2; h >= 0; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out + width] - output[ind_out];
output[ind_out] += weight[ind_wei + width] * (output[ind_out + width] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 0; h < input_height - 1; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + width],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + width] += weight[ind_wei + width] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + width];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_setup_weight_image(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype min_weight, const Dtype* data, Dtype* weight) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
// weight must be [min_weight_, 1]
weight[pos] = exp(mult1 * (1 + data[pos] * mult2));
}
}
/*
template <typename Dtype>
__global__ void kernel_compute_ref_grad_diff(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype* weight, const Dtype* weight_diff, Dtype* ref_grad_diff) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
ref_grad_diff[pos] += (mult1 * mult2 * weight_diff[pos] * weight[pos]);
}
}
*/
template <typename Dtype>
void DomainTransformForwardOnlyLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* feat_data = bottom[0]->gpu_data_at(n);
Dtype* top_data = top[0]->mutable_gpu_data_at(n);
caffe_copy<Dtype>(sample_dim, feat_data, top_data);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
const int input_spatial_dim = input_height * input_width;
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
for (int iter = 0; iter < num_iter_; ++iter) {
Dtype sigma_i = ComputeSigma(iter);
hipLaunchKernelGGL(( kernel_setup_weight_image<Dtype>), CAFFE_GET_BLOCKS(
input_spatial_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (forward) orders:
// (0) left->right (1) right->left (2) top->bottom (3) bottom->top.
for (int pass = 0; pass < num_passes_; ++pass) {
switch (pass) {
case 0:
hipLaunchKernelGGL(( kernel_horizontal_filter_left_to_right_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
case 1:
hipLaunchKernelGGL(( kernel_horizontal_filter_right_to_left_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
case 2:
hipLaunchKernelGGL(( kernel_vertical_filter_top_to_bottom_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_width)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
case 3:
hipLaunchKernelGGL(( kernel_vertical_filter_bottom_to_top_forward<Dtype>),
dim3(CAFFE_GET_BLOCKS(channels_ * input_width)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
}
}
}
}
}
template <typename Dtype>
void DomainTransformForwardOnlyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
/*
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot back-propagate to image dimension.";
}
if (propagate_down[0] || propagate_down[1]) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
// weight_diff is a temporary buffer shared for all samples.
Dtype* weight_diff = blob_weight_diff_.mutable_gpu_diff();
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* top_diff = top[0]->gpu_diff_at(n);
Dtype* bottom_input_diff = bottom[0]->mutable_gpu_diff_at(n);
Dtype* bottom_ref_grad_diff = bottom[1]->mutable_gpu_diff_at(n);
caffe_copy<Dtype>(sample_dim, top_diff, bottom_input_diff);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), bottom_ref_grad_diff);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
const int input_spatial_dim = input_height * input_width;
for (int iter = num_iter_ - 1; iter >= 0; --iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), weight_diff);
// Filter the input four times in the following (backward) orders:
// (3) bottom->top (2) top->bottom (1) right->left (0) left->right.
for (int pass = num_passes_ - 1; pass >= 0; --pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 1:
kernel_horizontal_filter_right_to_left_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 2:
kernel_vertical_filter_top_to_bottom_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 3:
kernel_vertical_filter_bottom_to_top_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
}
}
kernel_compute_ref_grad_diff<Dtype><<<
CAFFE_GET_BLOCKS(input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_,
weight, weight_diff, bottom_ref_grad_diff);
}
}
}
*/
}
INSTANTIATE_LAYER_GPU_FUNCS(DomainTransformForwardOnlyLayer);
} // namespace caffe
| 036e5d23235a4fbd0cfd124236fad8e25855ef2f.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/domain_transform_forward_only_layer.hpp"
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 1; w < input_width; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out - 1] - output[ind_out];
output[ind_out] += weight[ind_wei] * (output[ind_out - 1] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 1; w >= 1; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - 1] += weight[ind_wei] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 2; w >= 0; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out + 1] - output[ind_out];
output[ind_out] += weight[ind_wei + 1] * (output[ind_out + 1] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 0; w < input_width - 1; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + 1],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + 1] += weight[ind_wei + 1] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + 1];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 1; h < input_height; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out - width] - output[ind_out];
output[ind_out] += weight[ind_wei] * (output[ind_out - width] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 1; h >= 1; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - width] += weight[ind_wei] * output[ind_out];
output[ind_out] = (1 - weight[ind_wei]) * output[ind_out];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 2; h >= 0; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
//intermediate_res[ind_out] = output[ind_out + width] - output[ind_out];
output[ind_out] += weight[ind_wei + width] * (output[ind_out + width] - output[ind_out]);
}
}
}
/*
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 0; h < input_height - 1; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + width],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + width] += weight[ind_wei + width] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + width];
}
}
}
*/
template <typename Dtype>
__global__ void kernel_setup_weight_image(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype min_weight, const Dtype* data, Dtype* weight) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
// weight must be [min_weight_, 1]
weight[pos] = exp(mult1 * (1 + data[pos] * mult2));
}
}
/*
template <typename Dtype>
__global__ void kernel_compute_ref_grad_diff(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype* weight, const Dtype* weight_diff, Dtype* ref_grad_diff) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
ref_grad_diff[pos] += (mult1 * mult2 * weight_diff[pos] * weight[pos]);
}
}
*/
template <typename Dtype>
void DomainTransformForwardOnlyLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* feat_data = bottom[0]->gpu_data_at(n);
Dtype* top_data = top[0]->mutable_gpu_data_at(n);
caffe_copy<Dtype>(sample_dim, feat_data, top_data);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
const int input_spatial_dim = input_height * input_width;
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
for (int iter = 0; iter < num_iter_; ++iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (forward) orders:
// (0) left->right (1) right->left (2) top->bottom (3) bottom->top.
for (int pass = 0; pass < num_passes_; ++pass) {
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
case 1:
kernel_horizontal_filter_right_to_left_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
case 2:
kernel_vertical_filter_top_to_bottom_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
case 3:
kernel_vertical_filter_bottom_to_top_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, top_data);
break;
}
}
}
}
}
template <typename Dtype>
void DomainTransformForwardOnlyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
/*
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot back-propagate to image dimension.";
}
if (propagate_down[0] || propagate_down[1]) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
// weight_diff is a temporary buffer shared for all samples.
Dtype* weight_diff = blob_weight_diff_.mutable_gpu_diff();
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* top_diff = top[0]->gpu_diff_at(n);
Dtype* bottom_input_diff = bottom[0]->mutable_gpu_diff_at(n);
Dtype* bottom_ref_grad_diff = bottom[1]->mutable_gpu_diff_at(n);
caffe_copy<Dtype>(sample_dim, top_diff, bottom_input_diff);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), bottom_ref_grad_diff);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
const int input_spatial_dim = input_height * input_width;
for (int iter = num_iter_ - 1; iter >= 0; --iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), weight_diff);
// Filter the input four times in the following (backward) orders:
// (3) bottom->top (2) top->bottom (1) right->left (0) left->right.
for (int pass = num_passes_ - 1; pass >= 0; --pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 1:
kernel_horizontal_filter_right_to_left_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 2:
kernel_vertical_filter_top_to_bottom_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 3:
kernel_vertical_filter_bottom_to_top_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
}
}
kernel_compute_ref_grad_diff<Dtype><<<
CAFFE_GET_BLOCKS(input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_,
weight, weight_diff, bottom_ref_grad_diff);
}
}
}
*/
}
INSTANTIATE_LAYER_GPU_FUNCS(DomainTransformForwardOnlyLayer);
} // namespace caffe
|
997b7eb7b4da74fa4cb633bea53205696a955569.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/prims/reduce_v.cuh>
#include <thrust/count.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
// do the perf measurements
// enabled by command line parameter s'--perf'
//
static int PERF = 0;
template <typename vertex_t, typename... T>
struct property_transform : public thrust::unary_function<vertex_t, thrust::tuple<T...>> {
int mod{};
property_transform(int mod_count) : mod(mod_count) {}
__device__ auto operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
auto value = hash_func(val) % mod;
return thrust::make_tuple(static_cast<T>(value)...);
}
};
template <typename vertex_t, template <typename...> typename Tuple, typename... T>
struct property_transform<vertex_t, Tuple<T...>> : public property_transform<vertex_t, T...> {
};
template <typename Tuple, std::size_t... I>
auto make_iterator_tuple(Tuple& data, std::index_sequence<I...>)
{
return thrust::make_tuple((std::get<I>(data).begin())...);
}
template <typename... T>
auto get_zip_iterator(std::tuple<T...>& data)
{
return thrust::make_zip_iterator(make_iterator_tuple(
data, std::make_index_sequence<std::tuple_size<std::tuple<T...>>::value>()));
}
template <typename T>
auto get_property_iterator(std::tuple<T>& data)
{
return (std::get<0>(data)).begin();
}
template <typename T0, typename... T>
auto get_property_iterator(std::tuple<T0, T...>& data)
{
return get_zip_iterator(data);
}
template <typename... T>
struct generate_impl {
static thrust::tuple<T...> initial_value(int init)
{
return thrust::make_tuple(static_cast<T>(init)...);
}
template <typename label_t>
static std::tuple<rmm::device_uvector<T>...> property(rmm::device_uvector<label_t>& labels,
int hash_bin_count,
raft::handle_t const& handle)
{
auto data = std::make_tuple(rmm::device_uvector<T>(labels.size(), handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels.begin(),
labels.end(),
zip,
property_transform<label_t, T...>(hash_bin_count));
return data;
}
template <typename label_t>
static std::tuple<rmm::device_uvector<T>...> property(thrust::counting_iterator<label_t> begin,
thrust::counting_iterator<label_t> end,
int hash_bin_count,
raft::handle_t const& handle)
{
auto length = thrust::distance(begin, end);
auto data = std::make_tuple(rmm::device_uvector<T>(length, handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
begin,
end,
zip,
property_transform<label_t, T...>(hash_bin_count));
return data;
}
};
template <typename T>
struct result_compare {
constexpr auto operator()(const T& t1, const T& t2) { return (t1 == t2); }
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
private:
template <typename T>
bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
public:
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
};
template <typename T>
struct generate : public generate_impl<T> {
static T initial_value(int init) { return static_cast<T>(init); }
};
template <typename... T>
struct generate<std::tuple<T...>> : public generate_impl<T...> {
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_ReduceIfV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_ReduceIfV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, true, true);
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
// 3. run MG count if
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value = generate<result_t>::initial_value(initial_value);
auto property_data =
generate<result_t>::property((*d_mg_renumber_map_labels), hash_bin_count, handle);
auto property_iter = get_property_iterator(property_data);
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto result = reduce_v(handle,
mg_graph_view,
property_iter,
property_iter + (*d_mg_renumber_map_labels).size(),
property_initial_value);
if (PERF) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG count if took " << elapsed_time * 1e-6 << " s.\n";
}
//// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(
handle);
std::tie(sg_graph, std::ignore) =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, true, false);
auto sg_graph_view = sg_graph.view();
auto sg_property_data = generate<result_t>::property(
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
hash_bin_count,
handle);
auto sg_property_iter = get_property_iterator(sg_property_data);
using property_t = decltype(property_initial_value);
auto expected_result =
thrust::reduce(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sg_property_iter,
sg_property_iter + sg_graph_view.get_number_of_local_vertices(),
property_initial_value,
cugraph::experimental::property_add<property_t>());
result_compare<property_t> compare;
ASSERT_TRUE(compare(expected_result, result));
}
}
};
using Tests_MG_ReduceIfV_File = Tests_MG_ReduceIfV<cugraph::test::File_Usecase>;
using Tests_MG_ReduceIfV_Rmat = Tests_MG_ReduceIfV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_ReduceIfV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_ReduceIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_large_test,
Tests_MG_ReduceIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
| 997b7eb7b4da74fa4cb633bea53205696a955569.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/prims/reduce_v.cuh>
#include <thrust/count.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
// do the perf measurements
// enabled by command line parameter s'--perf'
//
static int PERF = 0;
template <typename vertex_t, typename... T>
struct property_transform : public thrust::unary_function<vertex_t, thrust::tuple<T...>> {
int mod{};
property_transform(int mod_count) : mod(mod_count) {}
__device__ auto operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
auto value = hash_func(val) % mod;
return thrust::make_tuple(static_cast<T>(value)...);
}
};
template <typename vertex_t, template <typename...> typename Tuple, typename... T>
struct property_transform<vertex_t, Tuple<T...>> : public property_transform<vertex_t, T...> {
};
template <typename Tuple, std::size_t... I>
auto make_iterator_tuple(Tuple& data, std::index_sequence<I...>)
{
return thrust::make_tuple((std::get<I>(data).begin())...);
}
template <typename... T>
auto get_zip_iterator(std::tuple<T...>& data)
{
return thrust::make_zip_iterator(make_iterator_tuple(
data, std::make_index_sequence<std::tuple_size<std::tuple<T...>>::value>()));
}
template <typename T>
auto get_property_iterator(std::tuple<T>& data)
{
return (std::get<0>(data)).begin();
}
template <typename T0, typename... T>
auto get_property_iterator(std::tuple<T0, T...>& data)
{
return get_zip_iterator(data);
}
template <typename... T>
struct generate_impl {
static thrust::tuple<T...> initial_value(int init)
{
return thrust::make_tuple(static_cast<T>(init)...);
}
template <typename label_t>
static std::tuple<rmm::device_uvector<T>...> property(rmm::device_uvector<label_t>& labels,
int hash_bin_count,
raft::handle_t const& handle)
{
auto data = std::make_tuple(rmm::device_uvector<T>(labels.size(), handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels.begin(),
labels.end(),
zip,
property_transform<label_t, T...>(hash_bin_count));
return data;
}
template <typename label_t>
static std::tuple<rmm::device_uvector<T>...> property(thrust::counting_iterator<label_t> begin,
thrust::counting_iterator<label_t> end,
int hash_bin_count,
raft::handle_t const& handle)
{
auto length = thrust::distance(begin, end);
auto data = std::make_tuple(rmm::device_uvector<T>(length, handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
begin,
end,
zip,
property_transform<label_t, T...>(hash_bin_count));
return data;
}
};
template <typename T>
struct result_compare {
constexpr auto operator()(const T& t1, const T& t2) { return (t1 == t2); }
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
private:
template <typename T>
bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (std::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
public:
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
};
template <typename T>
struct generate : public generate_impl<T> {
static T initial_value(int init) { return static_cast<T>(init); }
};
template <typename... T>
struct generate<std::tuple<T...>> : public generate_impl<T...> {
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_ReduceIfV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_ReduceIfV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, true, true);
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
// 3. run MG count if
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value = generate<result_t>::initial_value(initial_value);
auto property_data =
generate<result_t>::property((*d_mg_renumber_map_labels), hash_bin_count, handle);
auto property_iter = get_property_iterator(property_data);
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto result = reduce_v(handle,
mg_graph_view,
property_iter,
property_iter + (*d_mg_renumber_map_labels).size(),
property_initial_value);
if (PERF) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG count if took " << elapsed_time * 1e-6 << " s.\n";
}
//// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(
handle);
std::tie(sg_graph, std::ignore) =
input_usecase.template construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, true, false);
auto sg_graph_view = sg_graph.view();
auto sg_property_data = generate<result_t>::property(
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
hash_bin_count,
handle);
auto sg_property_iter = get_property_iterator(sg_property_data);
using property_t = decltype(property_initial_value);
auto expected_result =
thrust::reduce(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sg_property_iter,
sg_property_iter + sg_graph_view.get_number_of_local_vertices(),
property_initial_value,
cugraph::experimental::property_add<property_t>());
result_compare<property_t> compare;
ASSERT_TRUE(compare(expected_result, result));
}
}
};
using Tests_MG_ReduceIfV_File = Tests_MG_ReduceIfV<cugraph::test::File_Usecase>;
using Tests_MG_ReduceIfV_Rmat = Tests_MG_ReduceIfV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceIfV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_ReduceIfV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_ReduceIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_large_test,
Tests_MG_ReduceIfV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
d5ba64bbeea9245a06c8bf19b563b78db1b8c978.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <c10/util/Logging.h>
#include <c10/util/bit_cast.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/hip/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/hip/flash_attn/fmha_api.h>
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernels/cutlassF.h>
#include <ATen/native/transformers/hip/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
namespace {
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = ::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_HIP_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 1-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_sizes().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false};
auto backend = select_sdp_backend(kernel_params);
// strides from packed projection for nested tensors when seq_len is 1 will be
// and will trigger a contiguous call in the kernel, so we prevent this
bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true;
if (no_seq_len_1_nested &&
(backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt);
auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention");
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention, log_sumexp, debug_attn_mask, philox_seed, philox_offset;
std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) =
at::_flash_attention_forward(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal,
return_debug_mask,
scale);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask);
}
std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
bool compute_log_sumexp,
bool is_causal,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
sdp::CustomMaskType custom_mask_type = is_causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
Tensor attention, log_sumexp;
std::tie(attention, log_sumexp) = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
c10::nullopt,
0.0 /*dropout_p*/,
static_cast<int64_t>(custom_mask_type),
compute_log_sumexp,
scale);
attention = attention.transpose(1, 2);
return std::make_tuple(std::move(attention), std::move(log_sumexp));
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _flash_attention_forward(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
/*
num_splits determines how much to parallelize over the seqlen_q dimension
num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for
benchmarking. We will hard code it to 0 for now
*/
constexpr int num_splits{0};
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
at::Tensor output = at::empty_like(query);
Tensor logsumexp, debug_attn_mask, philox_seed, philox_offset;
std::tie(logsumexp, philox_seed, philox_offset, debug_attn_mask) = fmha::mha_fwd(
query,
key,
value,
output,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false, /*zero_tensors = false for all calls here*/
is_causal,
return_debug_mask, /*return_softmax (this is used for testing)*/
num_splits);
debug_attn_mask = return_debug_mask ? debug_attn_mask : at::empty({0}, query.options());
return std::make_tuple(output, logsumexp, philox_seed, philox_offset, debug_attn_mask);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor(), Tensor(), Tensor(), Tensor(), Tensor());
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& seqstart_q,
// (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& seqstart_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
double dropout_p, // attention matrix dropout probability
int64_t custom_mask_type,
bool compute_logsumexp,
c10::optional<double> scale,
const c10::optional<at::Tensor>& causal_diagonal,
const c10::optional<at::Tensor>& seqlen_k) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
// TODO_DRISS we should return max_seqlen_k;
int64_t max_seqlen_q, max_seqlen_k;
TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value());
if (seqstart_q.has_value()) {
TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k));
TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
at::PhiloxCudaState rng_engine_inputs;
if (use_dropout) {
at::CUDAGeneratorImpl* gen =
at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
std::lock_guard<std::mutex> lock(gen->mutex_);
// if using dropout, we produce 1 random number for each element of the
// attention tensor
rng_engine_inputs = gen->philox_cuda_state(B * num_heads * M * N);
}
hipDeviceProp_t* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (!Kernel::kSupportsDropout && use_dropout) {
return;
}
if (!Kernel::kSupportsBias && bias.has_value()) {
return;
}
if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kAlignmentQ) ||
(key.stride(2) % Kernel::kAlignmentK) ||
(value.stride(2) % Kernel::kAlignmentV)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<
typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (seqstart_q.has_value()) {
p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr();
p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr();
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B;
p.custom_mask_type = custom_mask_type;
p.causal_diagonal_ptr = nullptr;
if (causal_diagonal.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value());
TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int);
p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr();
}
p.seqlen_k_ptr = nullptr;
if (seqlen_k.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value());
TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int);
p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr();
}
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.attn_bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as
// (batch_sz, n_heads, n_queries, n_keys)
const at::Tensor bias_4d_view =
get_bias_4d_view(*bias, B, num_heads, M, N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
}
p.use_dropout = use_dropout;
if (p.use_dropout) {
p.rng_engine_inputs = rng_engine_inputs;
p.dropout_prob = dropout_p;
}
if (smem_bytes > 0xc000) {
auto err = hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != hipErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
Kernel::check_supported(p);
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p);
};
// Dispatch to the right kernel
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassF<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!");
AT_CUDA_CHECK(hipGetLastError());
// !!TODO_DRISS: We are throwing this away for now and need to change how its done
// uint64_t -> int64_t bitwise casting as PyTorch don't support uint64_t
// so just fake it as a int64_t
int64_t seed, offset;
if (use_dropout) {
std::memcpy(&seed, &rng_engine_inputs.seed_, sizeof(seed));
std::memcpy(&offset, &rng_engine_inputs.offset_.val, sizeof(offset));
}
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda);
// !!This function is deprecated. See FunctionsManual.cpp for the implementation!!
bool _chunk_grad_outputs_efficient_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
bool is_causal) {
int64_t M = query.size(2);
int64_t N = key.size(2);
bool grad_kv_needs_init = is_causal && N > M;
bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage());
bool equal_seq_len = query.size(2) == key.size(2);
bool q_v_same_head_dim = query.size(3) == value.size(3);
bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased);
return chunk_grad_outputs;
}
} // namespace native
} // namespace at
| d5ba64bbeea9245a06c8bf19b563b78db1b8c978.cu | #include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <c10/util/Logging.h>
#include <c10/util/bit_cast.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/cuda/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/cuda/flash_attn/fmha_api.h>
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernels/cutlassF.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
namespace {
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = std::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 1-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_sizes().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false};
auto backend = select_sdp_backend(kernel_params);
// strides from packed projection for nested tensors when seq_len is 1 will be
// and will trigger a contiguous call in the kernel, so we prevent this
bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true;
if (no_seq_len_1_nested &&
(backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt);
auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention");
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention, log_sumexp, debug_attn_mask, philox_seed, philox_offset;
std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) =
at::_flash_attention_forward(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal,
return_debug_mask,
scale);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask);
}
std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
bool compute_log_sumexp,
bool is_causal,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
sdp::CustomMaskType custom_mask_type = is_causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
Tensor attention, log_sumexp;
std::tie(attention, log_sumexp) = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
c10::nullopt,
0.0 /*dropout_p*/,
static_cast<int64_t>(custom_mask_type),
compute_log_sumexp,
scale);
attention = attention.transpose(1, 2);
return std::make_tuple(std::move(attention), std::move(log_sumexp));
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _flash_attention_forward(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
/*
num_splits determines how much to parallelize over the seqlen_q dimension
num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for
benchmarking. We will hard code it to 0 for now
*/
constexpr int num_splits{0};
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
at::Tensor output = at::empty_like(query);
Tensor logsumexp, debug_attn_mask, philox_seed, philox_offset;
std::tie(logsumexp, philox_seed, philox_offset, debug_attn_mask) = fmha::mha_fwd(
query,
key,
value,
output,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false, /*zero_tensors = false for all calls here*/
is_causal,
return_debug_mask, /*return_softmax (this is used for testing)*/
num_splits);
debug_attn_mask = return_debug_mask ? debug_attn_mask : at::empty({0}, query.options());
return std::make_tuple(output, logsumexp, philox_seed, philox_offset, debug_attn_mask);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor(), Tensor(), Tensor(), Tensor(), Tensor());
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& seqstart_q,
// (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& seqstart_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
double dropout_p, // attention matrix dropout probability
int64_t custom_mask_type,
bool compute_logsumexp,
c10::optional<double> scale,
const c10::optional<at::Tensor>& causal_diagonal,
const c10::optional<at::Tensor>& seqlen_k) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
// TODO_DRISS we should return max_seqlen_k;
int64_t max_seqlen_q, max_seqlen_k;
TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value());
if (seqstart_q.has_value()) {
TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k));
TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
at::PhiloxCudaState rng_engine_inputs;
if (use_dropout) {
at::CUDAGeneratorImpl* gen =
at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
std::lock_guard<std::mutex> lock(gen->mutex_);
// if using dropout, we produce 1 random number for each element of the
// attention tensor
rng_engine_inputs = gen->philox_cuda_state(B * num_heads * M * N);
}
cudaDeviceProp* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (!Kernel::kSupportsDropout && use_dropout) {
return;
}
if (!Kernel::kSupportsBias && bias.has_value()) {
return;
}
if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kAlignmentQ) ||
(key.stride(2) % Kernel::kAlignmentK) ||
(value.stride(2) % Kernel::kAlignmentV)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<
typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (seqstart_q.has_value()) {
p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr();
p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr();
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B;
p.custom_mask_type = custom_mask_type;
p.causal_diagonal_ptr = nullptr;
if (causal_diagonal.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value());
TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int);
p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr();
}
p.seqlen_k_ptr = nullptr;
if (seqlen_k.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value());
TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int);
p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr();
}
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.attn_bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as
// (batch_sz, n_heads, n_queries, n_keys)
const at::Tensor bias_4d_view =
get_bias_4d_view(*bias, B, num_heads, M, N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
}
p.use_dropout = use_dropout;
if (p.use_dropout) {
p.rng_engine_inputs = rng_engine_inputs;
p.dropout_prob = dropout_p;
}
if (smem_bytes > 0xc000) {
auto err = cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != cudaErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
Kernel::check_supported(p);
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
};
// Dispatch to the right kernel
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassF<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!");
AT_CUDA_CHECK(cudaGetLastError());
// !!TODO_DRISS: We are throwing this away for now and need to change how its done
// uint64_t -> int64_t bitwise casting as PyTorch don't support uint64_t
// so just fake it as a int64_t
int64_t seed, offset;
if (use_dropout) {
std::memcpy(&seed, &rng_engine_inputs.seed_, sizeof(seed));
std::memcpy(&offset, &rng_engine_inputs.offset_.val, sizeof(offset));
}
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda);
// !!This function is deprecated. See FunctionsManual.cpp for the implementation!!
bool _chunk_grad_outputs_efficient_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
bool is_causal) {
int64_t M = query.size(2);
int64_t N = key.size(2);
bool grad_kv_needs_init = is_causal && N > M;
bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage());
bool equal_seq_len = query.size(2) == key.size(2);
bool q_v_same_head_dim = query.size(3) == value.size(3);
bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased);
return chunk_grad_outputs;
}
} // namespace native
} // namespace at
|
072975683abf31c5a02f10281c21fb1f7ad57eab.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "matrix_math.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
float *dev_matA;
float *dev_matB;
float *dev_matC;
void MatrixCalc::initMats(float *hst_matA,float *hst_matB,int matWidth)
{
int size = matWidth*matWidth*sizeof(float);
hipMalloc((void**)&dev_matA, size);
checkCUDAErrorWithLine("hipMalloc dev_matA failed!");
hipMalloc((void**)&dev_matB, size);
checkCUDAErrorWithLine("hipMalloc dev_matB failed!");
hipMemcpy(dev_matA,hst_matA,size,hipMemcpyHostToDevice);
checkCUDAErrorWithLine("hipMemcpy hst_matA to dev_matA failed!");
hipMemcpy(dev_matB, hst_matB, size, hipMemcpyHostToDevice);
checkCUDAErrorWithLine("hipMemcpy hst_matB to dev_matB failed!");
hipMalloc((void**)&dev_matC, size);
checkCUDAErrorWithLine("hipMalloc dev_matB failed!");
}
__global__ void kernMatAdd(float *matA,float *matB,float *matC,int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx*width + ty;
matC[idx] = matA[idx] + matB[idx];
}
void MatrixCalc::mat_add(float*A, float*B, float*C,int width)
{
initMats(A,B,width);//todo later: 5
dim3 threadsPerBlock(width, width);
hipLaunchKernelGGL(( kernMatAdd), dim3(1),dim3(threadsPerBlock), 0, 0, dev_matA,dev_matB,dev_matC,width);
hipMemcpy(C,dev_matC,width*width*sizeof(float),hipMemcpyDeviceToHost);
freeMats();
}
__global__ void kernMatSub(float *matA, float *matB, float *matC, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx*width + ty;
matC[idx] = matA[idx] - matB[idx];
}
void MatrixCalc::mat_sub(float*A, float*B, float*C, int width)
{
initMats(A, B, width);//todo later: 5
dim3 threadsPerBlock(width, width);
hipLaunchKernelGGL(( kernMatSub) , dim3(1), dim3(threadsPerBlock) , 0, 0, dev_matA, dev_matB, dev_matC, width);
hipMemcpy(C, dev_matC, width*width*sizeof(float), hipMemcpyDeviceToHost);
freeMats();
}
__global__ void kernMatMul(float *matA, float *matB, float *matC, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx*width + ty;
float sum = 0;
for (int i = 0; i < width; i++)
{
sum += matA[tx*width + i] * matB[i*width + ty];
}
matC[idx] = sum;
}
void MatrixCalc::mat_mul(float*A, float*B, float*C, int width)
{
initMats(A, B, width);
dim3 threadsPerBlock(width, width);
hipLaunchKernelGGL(( kernMatMul) , dim3(1), dim3(threadsPerBlock) , 0, 0, dev_matA, dev_matB, dev_matC, width);
//kernMatMul << <5, 5 >> >(dev_matA, dev_matB, dev_matC, width);
hipMemcpy(C, dev_matC, width*width*sizeof(float), hipMemcpyDeviceToHost);
freeMats();
}
void MatrixCalc::freeMats()
{
hipFree(dev_matA);
hipFree(dev_matB);
hipFree(dev_matC);
} | 072975683abf31c5a02f10281c21fb1f7ad57eab.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "matrix_math.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
float *dev_matA;
float *dev_matB;
float *dev_matC;
void MatrixCalc::initMats(float *hst_matA,float *hst_matB,int matWidth)
{
int size = matWidth*matWidth*sizeof(float);
cudaMalloc((void**)&dev_matA, size);
checkCUDAErrorWithLine("cudaMalloc dev_matA failed!");
cudaMalloc((void**)&dev_matB, size);
checkCUDAErrorWithLine("cudaMalloc dev_matB failed!");
cudaMemcpy(dev_matA,hst_matA,size,cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("cudaMemcpy hst_matA to dev_matA failed!");
cudaMemcpy(dev_matB, hst_matB, size, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("cudaMemcpy hst_matB to dev_matB failed!");
cudaMalloc((void**)&dev_matC, size);
checkCUDAErrorWithLine("cudaMalloc dev_matB failed!");
}
__global__ void kernMatAdd(float *matA,float *matB,float *matC,int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx*width + ty;
matC[idx] = matA[idx] + matB[idx];
}
void MatrixCalc::mat_add(float*A, float*B, float*C,int width)
{
initMats(A,B,width);//todo later: 5
dim3 threadsPerBlock(width, width);
kernMatAdd<<<1,threadsPerBlock>>>(dev_matA,dev_matB,dev_matC,width);
cudaMemcpy(C,dev_matC,width*width*sizeof(float),cudaMemcpyDeviceToHost);
freeMats();
}
__global__ void kernMatSub(float *matA, float *matB, float *matC, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx*width + ty;
matC[idx] = matA[idx] - matB[idx];
}
void MatrixCalc::mat_sub(float*A, float*B, float*C, int width)
{
initMats(A, B, width);//todo later: 5
dim3 threadsPerBlock(width, width);
kernMatSub <<<1, threadsPerBlock >>>(dev_matA, dev_matB, dev_matC, width);
cudaMemcpy(C, dev_matC, width*width*sizeof(float), cudaMemcpyDeviceToHost);
freeMats();
}
__global__ void kernMatMul(float *matA, float *matB, float *matC, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx*width + ty;
float sum = 0;
for (int i = 0; i < width; i++)
{
sum += matA[tx*width + i] * matB[i*width + ty];
}
matC[idx] = sum;
}
void MatrixCalc::mat_mul(float*A, float*B, float*C, int width)
{
initMats(A, B, width);
dim3 threadsPerBlock(width, width);
kernMatMul <<<1, threadsPerBlock >>>(dev_matA, dev_matB, dev_matC, width);
//kernMatMul << <5, 5 >> >(dev_matA, dev_matB, dev_matC, width);
cudaMemcpy(C, dev_matC, width*width*sizeof(float), cudaMemcpyDeviceToHost);
freeMats();
}
void MatrixCalc::freeMats()
{
cudaFree(dev_matA);
cudaFree(dev_matB);
cudaFree(dev_matC);
} |
943f852e5654126ff27e2fd76efb689993f60006.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <exceptions/cuda_exception.h>
#include <rocblas.h>
#include <specials_cuda.h>
#include <op_boilerplate.h>
#include <types/float16.h>
#include <ops/declarable/helpers/batched_gemm.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////////
// bsxMXK x bSxKxN = bSxMxN
void bgemm(const std::vector<NDArray*>& vA, const std::vector<NDArray*>& vB, std::vector<NDArray*>& vC, const NDArray* alphas, const NDArray* betas, int transA, int transB, int M, int N, int K, const int lda, const int ldb, const int ldc) {
const auto bS = vA.size(); // batch size
std::vector<NDArray*> pA(bS), pB(bS), pC(bS);
std::vector<NDArray*> toDelete;
for(int i = 0; i < bS; ++i) {
if(vA[i]->ews() != 1) {
pA[i] = new NDArray(vA[i]->dup('f'));
toDelete.emplace_back(pA[i]);
}
else
pA[i] = vA[i];
if(vB[i]->ews() != 1) {
pB[i] = new NDArray(vB[i]->dup('f'));
toDelete.emplace_back(pB[i]);
}
else
pB[i] = vB[i];
if(vC[i]->ews() != 1) {
pC[i] = new NDArray(vC[i]->dup('f'));
toDelete.emplace_back(pC[i]);
}
else
pC[i] = vC[i];
if(pC[i]->ordering() != 'f') {
auto temp = pA[i];
pA[i] = new NDArray(pB[i]->permute({1,0}));
pB[i] = new NDArray(temp ->permute({1,0}));
pC[i] = new NDArray(pC[i]->permute({1,0}));
toDelete.push_back(pA[i]);
toDelete.push_back(pB[i]);
toDelete.push_back(pC[i]);
M = pA[i]->sizeAt(0);
K = pA[i]->sizeAt(1);
N = pB[i]->sizeAt(1);
}
NDArray::prepareSpecialUse ({pC[i]}, {pA[i], pB[i]});
NDArray::registerSpecialUse({pC[i]}, {pA[i], pB[i]});
}
NDArray::prepareSpecialUse ({}, {alphas, betas});
NDArray::registerSpecialUse({}, {alphas, betas});
std::vector<void*> pAbuffs(bS), pBbuffs(bS), pCbuffs(bS);
for(int i = 0; i < bS; ++i) {
pAbuffs[i] = pA[i]->getSpecialBuffer();
pBbuffs[i] = pB[i]->getSpecialBuffer();
pCbuffs[i] = pC[i]->getSpecialBuffer();
}
nd4j::LaunchContext* context = vA[0]->getContext();
PointersManager manager(context, "helpers::bgemm cuda");
const void** aBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pAbuffs.data(), bS * sizeof(void*)));
const void** bBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pBbuffs.data(), bS * sizeof(void*)));
void** cBuffers = reinterpret_cast<void**>(manager.replicatePointer(pCbuffs.data(), bS * sizeof(void*)));
// const auto aOrder = pA->ordering();
// const auto bOrder = pB->ordering();
// const bool transA = aOrder != 'f';
// const bool transB = bOrder != 'f';
const hipblasOperation_t transAblas = transA == 112 ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB == 112 ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// const int lda = aOrder == 'f' ? M : K;
// const int ldb = bOrder == 'f' ? K : N;
// const int ldc = M; // cOrder == 'f' ? M : N;
const auto aType = pA[0]->dataType();
const auto bType = pB[0]->dataType();
const auto cType = pC[0]->dataType();
auto handle = reinterpret_cast<hipblasHandle_t*>(context->getCublasHandle());
auto stream = context->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
// choose appropriate cuda gemm api depending on data types
if(ABC && aType == DataType::DOUBLE) {
double alpha = alphas->e<double>(0);
double beta = betas->e<double>(0);
status = hipblasDgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const double**)aBuffers, lda, (const double**)bBuffers, ldb, &beta, (double**)cBuffers, ldc, bS);
}
else if(ABC && aType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = hipblasSgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const float**)aBuffers, lda, (const float**)bBuffers, ldb, &beta, (float**)cBuffers, ldc, bS);
}
else if(ABC && aType == DataType::HALF) {
__half alpha = alphas->e<float>(0);
__half beta = betas->e<float>(0);
status = hipblasHgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const __half**)aBuffers, lda, (const __half**)bBuffers, ldb, &beta, (__half**)cBuffers, ldc, bS);
}
else if(AB && aType == DataType::INT8 && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, HIP_R_8I, lda, bBuffers, HIP_R_8I, ldb, &beta, cBuffers, HIP_R_32F, ldc, bS, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
}
else if(AB && aType == DataType::HALF && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, HIP_R_16F, lda, bBuffers, HIP_R_16F, ldb, &beta, cBuffers, HIP_R_32F, ldc, bS, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
}
else
throw std::runtime_error("batched gemm cuda: this mode is not implemented yet !");
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
for(int i = 0; i < bS; ++i)
if(vC[i]->ews() != 1)
vC[i]->assign(pC[i]);
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
}
}
}
}
| 943f852e5654126ff27e2fd76efb689993f60006.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <exceptions/cuda_exception.h>
#include <cublas_v2.h>
#include <specials_cuda.h>
#include <op_boilerplate.h>
#include <types/float16.h>
#include <ops/declarable/helpers/batched_gemm.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////////
// bsxMXK x bSxKxN = bSxMxN
void bgemm(const std::vector<NDArray*>& vA, const std::vector<NDArray*>& vB, std::vector<NDArray*>& vC, const NDArray* alphas, const NDArray* betas, int transA, int transB, int M, int N, int K, const int lda, const int ldb, const int ldc) {
const auto bS = vA.size(); // batch size
std::vector<NDArray*> pA(bS), pB(bS), pC(bS);
std::vector<NDArray*> toDelete;
for(int i = 0; i < bS; ++i) {
if(vA[i]->ews() != 1) {
pA[i] = new NDArray(vA[i]->dup('f'));
toDelete.emplace_back(pA[i]);
}
else
pA[i] = vA[i];
if(vB[i]->ews() != 1) {
pB[i] = new NDArray(vB[i]->dup('f'));
toDelete.emplace_back(pB[i]);
}
else
pB[i] = vB[i];
if(vC[i]->ews() != 1) {
pC[i] = new NDArray(vC[i]->dup('f'));
toDelete.emplace_back(pC[i]);
}
else
pC[i] = vC[i];
if(pC[i]->ordering() != 'f') {
auto temp = pA[i];
pA[i] = new NDArray(pB[i]->permute({1,0}));
pB[i] = new NDArray(temp ->permute({1,0}));
pC[i] = new NDArray(pC[i]->permute({1,0}));
toDelete.push_back(pA[i]);
toDelete.push_back(pB[i]);
toDelete.push_back(pC[i]);
M = pA[i]->sizeAt(0);
K = pA[i]->sizeAt(1);
N = pB[i]->sizeAt(1);
}
NDArray::prepareSpecialUse ({pC[i]}, {pA[i], pB[i]});
NDArray::registerSpecialUse({pC[i]}, {pA[i], pB[i]});
}
NDArray::prepareSpecialUse ({}, {alphas, betas});
NDArray::registerSpecialUse({}, {alphas, betas});
std::vector<void*> pAbuffs(bS), pBbuffs(bS), pCbuffs(bS);
for(int i = 0; i < bS; ++i) {
pAbuffs[i] = pA[i]->getSpecialBuffer();
pBbuffs[i] = pB[i]->getSpecialBuffer();
pCbuffs[i] = pC[i]->getSpecialBuffer();
}
nd4j::LaunchContext* context = vA[0]->getContext();
PointersManager manager(context, "helpers::bgemm cuda");
const void** aBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pAbuffs.data(), bS * sizeof(void*)));
const void** bBuffers = reinterpret_cast<const void**>(manager.replicatePointer(pBbuffs.data(), bS * sizeof(void*)));
void** cBuffers = reinterpret_cast<void**>(manager.replicatePointer(pCbuffs.data(), bS * sizeof(void*)));
// const auto aOrder = pA->ordering();
// const auto bOrder = pB->ordering();
// const bool transA = aOrder != 'f';
// const bool transB = bOrder != 'f';
const cublasOperation_t transAblas = transA == 112 ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB == 112 ? CUBLAS_OP_T : CUBLAS_OP_N;
// const int lda = aOrder == 'f' ? M : K;
// const int ldb = bOrder == 'f' ? K : N;
// const int ldc = M; // cOrder == 'f' ? M : N;
const auto aType = pA[0]->dataType();
const auto bType = pB[0]->dataType();
const auto cType = pC[0]->dataType();
auto handle = reinterpret_cast<cublasHandle_t*>(context->getCublasHandle());
auto stream = context->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
// choose appropriate cuda gemm api depending on data types
if(ABC && aType == DataType::DOUBLE) {
double alpha = alphas->e<double>(0);
double beta = betas->e<double>(0);
status = cublasDgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const double**)aBuffers, lda, (const double**)bBuffers, ldb, &beta, (double**)cBuffers, ldc, bS);
}
else if(ABC && aType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = cublasSgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const float**)aBuffers, lda, (const float**)bBuffers, ldb, &beta, (float**)cBuffers, ldc, bS);
}
else if(ABC && aType == DataType::HALF) {
__half alpha = alphas->e<float>(0);
__half beta = betas->e<float>(0);
status = cublasHgemmBatched(*handle, transAblas, transBblas, M, N, K, &alpha, (const __half**)aBuffers, lda, (const __half**)bBuffers, ldb, &beta, (__half**)cBuffers, ldc, bS);
}
else if(AB && aType == DataType::INT8 && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, CUDA_R_8I, lda, bBuffers, CUDA_R_8I, ldb, &beta, cBuffers, CUDA_R_32F, ldc, bS, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
}
else if(AB && aType == DataType::HALF && cType == DataType::FLOAT32) {
float alpha = alphas->e<float>(0);
float beta = betas->e<float>(0);
status = cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &alpha, aBuffers, CUDA_R_16F, lda, bBuffers, CUDA_R_16F, ldb, &beta, cBuffers, CUDA_R_32F, ldc, bS, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
}
else
throw std::runtime_error("batched gemm cuda: this mode is not implemented yet !");
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
for(int i = 0; i < bS; ++i)
if(vC[i]->ews() != 1)
vC[i]->assign(pC[i]);
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
}
}
}
}
|
37a27e7ab0fa63f0cd083c65a94e17d72368ca33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void momentumKernel (
int numberIterations,
float learningRate,
float momentum,
float* history,
int* parameterIndices,
int* counts,
int parameterSize,
float* parameters,
float* gradient) {
int startEntry = (blockIdx.y * blockDim.x * numberIterations) + threadIdx.x * numberIterations;
if(startEntry < parameterSize) {
int gradientIndex = blockIdx.x;
int parameterIndex = parameterIndices[gradientIndex];
if(parameterIndex != -1) {
int startParameter = parameterIndex * parameterSize + startEntry;
int startGradient = gradientIndex * parameterSize + startEntry;
float scalingFactor = 1.0 / (float)counts[gradientIndex];
for(int indexParameter = startParameter, indexGradient = startGradient; indexParameter < startParameter + numberIterations; indexParameter++, indexGradient++) {
float update = momentum * history[indexParameter] - scalingFactor * learningRate * gradient[indexGradient];
history[indexParameter] = update;
parameters[indexParameter] += update;
}
}
}
} | 37a27e7ab0fa63f0cd083c65a94e17d72368ca33.cu | __global__ void momentumKernel (
int numberIterations,
float learningRate,
float momentum,
float* history,
int* parameterIndices,
int* counts,
int parameterSize,
float* parameters,
float* gradient) {
int startEntry = (blockIdx.y * blockDim.x * numberIterations) + threadIdx.x * numberIterations;
if(startEntry < parameterSize) {
int gradientIndex = blockIdx.x;
int parameterIndex = parameterIndices[gradientIndex];
if(parameterIndex != -1) {
int startParameter = parameterIndex * parameterSize + startEntry;
int startGradient = gradientIndex * parameterSize + startEntry;
float scalingFactor = 1.0 / (float)counts[gradientIndex];
for(int indexParameter = startParameter, indexGradient = startGradient; indexParameter < startParameter + numberIterations; indexParameter++, indexGradient++) {
float update = momentum * history[indexParameter] - scalingFactor * learningRate * gradient[indexGradient];
history[indexParameter] = update;
parameters[indexParameter] += update;
}
}
}
} |
a55213789f0c15887f3e022f871d4afbf1c2f794.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDABoard.h"
namespace{
__device__
inline void updateStatus(BoardPoint *boardDevice,
int index,
GoColor color,
int *globalLiberty,
int *globalMoveValue,
hiprandState_t *state);
__global__
void initBoard(BoardPoint *boardDevice, hiprandState_t *state, long randSeed){
int index = threadIdx.y * boardSize + threadIdx.x;
hiprand_init(randSeed, index, 0, &state[index]);
if (threadIdx.x == 0 || threadIdx.x == boardSize-1 || threadIdx.y == 0 || threadIdx.y == boardSize-1){
boardDevice[index].color = 3;
boardDevice[index].moveValue = GO_ILLEGAL_MOVE;
boardDevice[index].groupID = index;
} else {
boardDevice[index].color = 0;
boardDevice[index].moveValue = GO_VALUE_NOT_SET;
//boardDevice[index].groupID = index;
}
__syncthreads();
//__threadfence_block();
__shared__ int globalLiberty[totalSize];
__shared__ int globalMoveValue[totalSize];
updateStatus(boardDevice, index, GO_BLACK, globalLiberty, globalMoveValue, state);
//all the initial group ID will be zero..
}
__device__
inline int invertColor(int color){
if (color == GO_BLACK){
return GO_WHITE;
}else if(color == GO_WHITE){
return GO_BLACK;
}
return GO_EMPTY;
}
__device__
inline int generateRandomValue(int index, hiprandState_t *state){
return hiprand(&state[index])>>3; // move left by 3 bit to make sure that it will not be negative after assigned to int.
}
__device__
inline void updateStatus(BoardPoint *boardDevice,
int index,
GoColor color,
int *globalLiberty,
int *globalMoveValue,
hiprandState_t *state){
globalLiberty[index] = 0;
boardDevice[index].libertyNumber = 0;
boardDevice[index].blackNeighbourNumber = 0;
boardDevice[index].whiteNeighbourNumber = 0;
__syncthreads();
if (boardDevice[index].color == GO_EMPTY){
// updating liberty for each point
if (boardDevice[index-1].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else if (boardDevice[index-1].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index-1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-1].groupID], 1);
}else if (boardDevice[index-1].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index-1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-1].groupID], 1);
}
if (boardDevice[index+boardSize].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else{
if (boardDevice[index+boardSize].groupID != boardDevice[index-1].groupID){
if (boardDevice[index+boardSize].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index+boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+boardSize].groupID], 1);
}else if (boardDevice[index+boardSize].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index+boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+boardSize].groupID], 1);
}
}
}
if (boardDevice[index+1].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else{
if (boardDevice[index+1].groupID != boardDevice[index-1].groupID &&
boardDevice[index+1].groupID != boardDevice[index+boardSize].groupID){
if (boardDevice[index+1].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index+1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+1].groupID], 1);
}else if (boardDevice[index+1].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index+1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+1].groupID], 1);
}
}
}
if (boardDevice[index-boardSize].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else{
if (boardDevice[index-boardSize].groupID != boardDevice[index-1].groupID &&
boardDevice[index-boardSize].groupID != boardDevice[index+1].groupID &&
boardDevice[index-boardSize].groupID != boardDevice[index+boardSize].groupID){
if (boardDevice[index-boardSize].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index-boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-boardSize].groupID], 1);
}else if (boardDevice[index-boardSize].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index-boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-boardSize].groupID], 1);
}
}
}
}
__syncthreads();
//__threadfence_block();
boardDevice[index].libertyNumber = globalLiberty[boardDevice[index].groupID];
__syncthreads();
//__threadfence_block();
// computing move value for each point
if (boardDevice[index].color == GO_EMPTY){
if (boardDevice[index].emptyLibertyNumber > 0){
globalMoveValue[index] = generateRandomValue(index, state);
}else{
if (color == GO_WHITE){
if (boardDevice[index].blackLibertyNumber > boardDevice[index].blackNeighbourNumber){
globalMoveValue[index] = generateRandomValue(index, state);
}else{
globalMoveValue[index] = GO_ILLEGAL_MOVE;
}
}else if (color == GO_BLACK){
if (boardDevice[index].whiteLibertyNumber > boardDevice[index].whiteNeighbourNumber){
globalMoveValue[index] = generateRandomValue(index, state);
}else{
globalMoveValue[index] = GO_ILLEGAL_MOVE;
}
}
}
// if (boardDevice[index].moveValue == GO_VALUE_NOT_SET || boardDevice[index].moveValue == GO_ILLEGAL_MOVE){
// if (color == GO_WHITE){
// //assuming that next move will be black, as current move is white.
// if( (boardDevice[index - 1].color == GO_WHITE && boardDevice[index-1].libertyNumber == 1) ||
// (boardDevice[index + 1].color == GO_WHITE && boardDevice[index+1].libertyNumber == 1) ||
// (boardDevice[index - boardSize].color == GO_WHITE && boardDevice[index-boardSize].libertyNumber == 1) ||
// (boardDevice[index + boardSize].color == GO_WHITE && boardDevice[index+boardSize].libertyNumber == 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else {
// if (boardDevice[index - 1].color == GO_EMPTY ||
// (boardDevice[index - 1].color == GO_BLACK && boardDevice[index-1].libertyNumber > 1)||
// boardDevice[index + 1].color == GO_EMPTY ||
// (boardDevice[index + 1].color == GO_BLACK && boardDevice[index+1].libertyNumber > 1)||
// boardDevice[index - boardSize].color == GO_EMPTY ||
// (boardDevice[index - boardSize].color == GO_BLACK && boardDevice[index-boardSize].libertyNumber > 1)||
// boardDevice[index + boardSize].color == GO_EMPTY ||
// (boardDevice[index + boardSize].color == GO_BLACK && boardDevice[index+boardSize].libertyNumber > 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else{
// globalMoveValue[index] = GO_ILLEGAL_MOVE;
// }
// }
// }else if (color == GO_BLACK){
// //assuming that next move will be white, as current move is black.
// if( (boardDevice[index - 1].color == GO_BLACK && boardDevice[index-1].libertyNumber == 1) ||
// (boardDevice[index + 1].color == GO_BLACK && boardDevice[index+1].libertyNumber == 1) ||
// (boardDevice[index - boardSize].color == GO_BLACK && boardDevice[index-boardSize].libertyNumber == 1) ||
// (boardDevice[index + boardSize].color == GO_BLACK && boardDevice[index+boardSize].libertyNumber == 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else {
// if (boardDevice[index - 1].color == GO_EMPTY ||
// (boardDevice[index - 1].color == GO_WHITE && boardDevice[index-1].libertyNumber > 1)||
// boardDevice[index + 1].color == GO_EMPTY ||
// (boardDevice[index + 1].color == GO_WHITE && boardDevice[index+1].libertyNumber > 1)||
// boardDevice[index - boardSize].color == GO_EMPTY ||
// (boardDevice[index - boardSize].color == GO_WHITE && boardDevice[index-boardSize].libertyNumber > 1)||
// boardDevice[index + boardSize].color == GO_EMPTY ||
// (boardDevice[index + boardSize].color == GO_WHITE && boardDevice[index+boardSize].libertyNumber > 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else{
// globalMoveValue[index] = GO_ILLEGAL_MOVE;
// }
// }
// }
// }// endif boardDevice[index].moveValue check.
}else{
// current point is not empty, it is ilegal move, set the value to zero.
globalMoveValue[index] = GO_ILLEGAL_MOVE;
}
__syncthreads();
//__threadfence_block();
// this line is not necessory at this time.
boardDevice[index].moveValue = globalMoveValue[index];
}
__device__
void playStone(BoardPoint *boardDevice,
DebugFlag *debugFlagDevice,
int *selectedMove,
GoColor color,
int *globalLiberty,
int *globalMoveValue,
hiprandState_t *state){
int index = threadIdx.y*boardSize + threadIdx.x;
int playPoint = *selectedMove;
GoColor enemyColor = invertColor(color);
__shared__ int targetGroupID[4];
__shared__ int removedGroupID[4];
//__shared__ bool hasStoneRemoved;
if (threadIdx.y == 0 || threadIdx.y == boardSize || threadIdx.x == 0 || threadIdx.x == boardSize){
// out of the real board, reset the liberty of Group 0 to 0, then return.
globalLiberty[0] = 0;
return;
}
if (index == playPoint){
boardDevice[index].color = color;
boardDevice[index].groupID = index;
if (boardDevice[index+1].color == color){
targetGroupID[0] = boardDevice[index+1].groupID;
}else if(boardDevice[index + 1].color == enemyColor){
if (boardDevice[index + 1].libertyNumber == 1){
removedGroupID[0] = boardDevice[index + 1].groupID;
}else{
removedGroupID[0] = -1;
}
}
else{
targetGroupID[0] = -1;
removedGroupID[0] = -1;
}
if (boardDevice[index-1].color == color){
targetGroupID[1] = boardDevice[index-1].groupID;
}else if(boardDevice[index - 1].color == enemyColor){
if (boardDevice[index - 1].libertyNumber == 1){
removedGroupID[1] = boardDevice[index - 1].groupID;
}else{
removedGroupID[1] = -1;
}
}
else{
targetGroupID[1] = -1;
removedGroupID[1] = -1;
}
if (boardDevice[index+boardSize].color == color){
targetGroupID[2] = boardDevice[index+boardSize].groupID;
}else if(boardDevice[index + boardSize].color == enemyColor){
if (boardDevice[index + boardSize].libertyNumber == 1){
removedGroupID[0] = boardDevice[index + boardSize].groupID;
}else{
removedGroupID[2] = -1;
}
}
else{
targetGroupID[2] = -1;
removedGroupID[2] = -1;
}
if (boardDevice[index-boardSize].color == color){
targetGroupID[3] = boardDevice[index-boardSize].groupID;
}else if(boardDevice[index - boardSize].color == enemyColor){
if (boardDevice[index - boardSize].libertyNumber == 1){
removedGroupID[0] = boardDevice[index - boardSize].groupID;
}else{
removedGroupID[3] = -1;
}
}
else{
targetGroupID[3] = -1;
removedGroupID[3] = -1;
}
}
globalLiberty[index] = 0;
//hasStoneRemoved = false;
__syncthreads();
//@todo , check whether this fence is necessory.
//__threadfence_block();
if (boardDevice[index].groupID == targetGroupID[0] ||
boardDevice[index].groupID == targetGroupID[1] ||
boardDevice[index].groupID == targetGroupID[2] ||
boardDevice[index].groupID == targetGroupID[3] ){
boardDevice[index].groupID = playPoint;
}
if (boardDevice[index].groupID == removedGroupID[0] ||
boardDevice[index].groupID == removedGroupID[1] ||
boardDevice[index].groupID == removedGroupID[2] ||
boardDevice[index].groupID == removedGroupID[3] ){
boardDevice[index].groupID = 0;
boardDevice[index].color = GO_EMPTY;
boardDevice[index].moveValue = GO_VALUE_NOT_SET;
//hasStoneRemoved = true;
}
__syncthreads();
//__threadfence_block();
updateStatus(boardDevice, index, color, globalLiberty, globalMoveValue, state);
//
//
//
// if (boardDevice[index].pointGroup != NULL){
// debugFlagDevice[index].changeFlag = boardDevice[index].pointGroup.numberOfLiberty;
//
// }
//
//
// debugFlagDevice[index].counter++;
// }
//
}
__device__ void selectMove(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int index, GoColor color, int *globalMoveValue, int *selectedMove, int *maxMoveValue){
// atomicMax(maxMoveValue, boardDevice[index].moveValue);
//
// __syncthreads();
//
// if (boardDevice[index].moveValue == *maxMoveValue){
// *selectedMove = index;
// }
if (threadIdx.x == 0 && threadIdx.y == 0){
int maxValue = -1;
int maxIndex = 0;
for (int i=0; i<totalSize; i++){
if (globalMoveValue[i] > maxValue){
maxValue = globalMoveValue[i];
maxIndex = i;
}
}
*selectedMove = maxIndex;
}
}
__global__
void randomPlay(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, GoColor color, hiprandState_t *state){
int index = threadIdx.y*boardSize + threadIdx.x;
__shared__ int globalLiberty[totalSize];
__shared__ int globalMoveValue[totalSize];
__shared__ int selectedMove;
__shared__ int maxMoveValue;
maxMoveValue = -1;
int currentColor = color;
for (int i=0; i<500; i++){
selectMove(boardDevice, debugFlagDevice, index, currentColor, globalMoveValue, &selectedMove, &maxMoveValue);
__syncthreads();
if (selectedMove < 0){
break;
}
playStone(boardDevice, debugFlagDevice, &selectedMove, currentColor, globalLiberty, globalMoveValue, state);
currentColor = invertColor(currentColor);
maxMoveValue = -1;
__syncthreads();
}
}
__global__
void playBoard(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int row, int col, GoColor color, hiprandState_t *state){
__shared__ int selectedMove;
__shared__ int globalLiberty[totalSize]; // shared array to count the liberty of each group.
__shared__ int globalMoveValue[totalSize];
if (threadIdx.x == 0 && threadIdx.y ==0){
// the corner point is special point for global operation.
int playPoint = row*boardSize + col;
selectedMove = playPoint;
}
__syncthreads();
//__threadfence_block();
playStone(boardDevice, debugFlagDevice, &selectedMove, color, globalLiberty, globalMoveValue, state);
}
// __global__
// void playBoard(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int row, int col, int color){
// dim3 threadShape( boardSize, boardSize );
// int numberOfBlock = 1;
// playBoardInside<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, row, col, color);
//
// }
//
// __global__
// void updateLegleMove(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int color){
// int index = threadIdx.y*boardSize + threadIdx.x;
//
// if (boardDevice[index].color != GO_EMPTY){
// boardDevice[index].isBlackLegal = false;
// boardDevice[index].isWhiteLegal = false;
// }else{
// if (boardDevice[index - 1].color == GO_EMPTY ||
// boardDevice[index + 1].color == GO_EMPTY ||
// boardDevice[index - boardSize].color == GO_EMPTY ||
// boardDevice[index + boardSize].color == GO_EMPTY){
// boardDevice[index].isBlackLegal = true;
// boardDevice[index].isWhiteLegal = true;
//
// }else{
// int totalLiberty = 0;
//
// if (boardDevice[index - 1].color == color){
// totalLiberty = totalLiberty + boardDevice[index - 1].libertyNumber - 1;
// }else if(boardDevice[index - 1].color == GO_EMPTY){
// totalLiberty++;
// }
//
// if (boardDevice[index + 1].color == color){
// totalLiberty = totalLiberty + boardDevice[index + 1].libertyNumber - 1;
// }else if(boardDevice[index + 1].color == GO_EMPTY){
// totalLiberty++;
// }
//
// if (boardDevice[index - boardSize].color == color){
// totalLiberty = totalLiberty + boardDevice[index - boardSize].libertyNumber - 1;
// }else if(boardDevice[index - boardSize].color == GO_EMPTY){
// totalLiberty++;
// }
//
// if (boardDevice[index + boardSize].color == color){
// totalLiberty = totalLiberty + boardDevice[index + boardSize].libertyNumber - 1;
// }else if(boardDevice[index + boardSize].color == GO_EMPTY){
// totalLiberty++;
// }
//
// debugFlagDevice[index].libertyCount = totalLiberty;
//
// if (totalLiberty > 0){
// if (color == GO_BLACK){
// boardDevice[index].isBlackLegal = true;
// }else if (color == GO_WHITE){
// boardDevice[index].isWhiteLegal = true;
// }
// }else{
// if (color == GO_BLACK){
// boardDevice[index].isBlackLegal = false;
// }else if (color == GO_WHITE){
// boardDevice[index].isWhiteLegal = false;
// }
//
// }
//
// }
//
// }// any of the points around boardDevice[index] is GO_EMPTY?
// }// boardDevice[index].color == GO_EMPTY?
//
}//namespace
CUDABoard::CUDABoard(){
this->currentPlayer = GO_BLACK;
this->detailDebug = false;
hipMalloc( (void**)&(this->boardDevice), this->valueSizeDevice );
hipMalloc( (void**)&(this->debugFlagDevice), this->debugFlagSize );
hipMalloc( (void**)&(this->stateDevice), valueSizeDevice );
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
srand((unsigned int)time(NULL));
hipLaunchKernelGGL(( initBoard), dim3(numberOfBlock), dim3(threadShape), 0, 0, boardDevice, stateDevice, rand());
}
CUDABoard::~CUDABoard(){
hipFree( boardDevice );
hipFree( debugFlagDevice );
hipFree( stateDevice );
}
void CUDABoard::Play(int row, int col, GoColor color){
// GoPoint targetPoint = GoPointUtil::Pt(col, row);
// Play(targetPoint, color);
//dim3 threadShape( boardSize, boardSize );
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
hipLaunchKernelGGL(( playBoard), dim3(numberOfBlock), dim3(threadShape), 0, 0, this->boardDevice, this->debugFlagDevice, row, col, color, this->stateDevice);
hipDeviceSynchronize();
}
void CUDABoard::Play(GoPoint p, GoColor color){
}
void CUDABoard::Play(GoPoint p){
}
void CUDABoard::RandomPlay(){
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
hipLaunchKernelGGL(( randomPlay), dim3(numberOfBlock), dim3(threadShape), 0, 0, this->boardDevice, this->debugFlagDevice, this->currentPlayer, this->stateDevice);
hipDeviceSynchronize();
}
void CUDABoard::RestoreData(){
hipMemcpy( this->boardHost, this->boardDevice, this->valueSizeDevice, hipMemcpyDeviceToHost );
hipMemcpy( this->debugFlagHost, this->debugFlagDevice, this->debugFlagSize, hipMemcpyDeviceToHost );
hipDeviceSynchronize();
}
ostream& operator<<(ostream& out, const CUDABoard& cudaBoard){
out << "Whole board:" << endl;
for (int i=boardSize-1; i>=0; i--){
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (cudaBoard.boardHost[index].color == 0){
out << ".";
}else if (cudaBoard.boardHost[index].color == GO_BLACK){
out << "o";
}else if (cudaBoard.boardHost[index].color == GO_WHITE){
out << "x";
}else if (cudaBoard.boardHost[index].color == GO_BORDER){
out << "H";
}
}
if (cudaBoard.detailDebug){
out << " ";
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (cudaBoard.boardHost[index].color == GO_BORDER){
out << "HHHH";
}else {
int value = cudaBoard.boardHost[index].moveValue%1000;
std::stringstream ss;
std::string outputString;
//ss<<" ";
ss<< "___" << value;
ss>>outputString;
out << outputString.substr(outputString.length()-3);
out << "|";
}
}
}
out << "\n";
}
return out;
}
//int main()
//{
//
// struct timeval start_tv;
// gettimeofday(&start_tv,NULL);
//
//
//
//// for (int i=0; i<19; i++){
//// hipLaunchKernelGGL(( playBoard), dim3(numberOfBlock), dim3(threadShape), 0, 0, boardDevice, globalFlag, i, i, 2);
//// }
//
//// hipLaunchKernelGGL(( playBoard), dim3(numberOfBlock), dim3(threadShape), 0, 0, boardDevice, debugFlagDevice, 15, 12, 1);
//
// //updateLegleMove<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, GO_BLACK);
// //updateLegleMove<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, GO_WHITE);
//
// hipDeviceSynchronize();
//
// hipMemcpy( boardHost, boardDevice, valueSizeDevice, hipMemcpyDeviceToHost );
// hipMemcpy( debugFlagHost, debugFlagDevice, debugFlagSize, hipMemcpyDeviceToHost );
//
//
//
// hipDeviceSynchronize();
//
// struct timeval end_tv;
// gettimeofday(&end_tv,NULL);
//
// for (int i=boardSize-1; i>=0; i--){
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// if (boardHost[index].color == 0){
// printf(".");
// }else if (boardHost[index].color == GO_BLACK){
// printf("o");
// }else if (boardHost[index].color == GO_WHITE){
// printf("x");
// }else if (boardHost[index].color == GO_BORDER){
// printf("H");
// }
// }
// printf("\n");
//
// }
//
//// for (int i=boardSize-1; i>=0; i--){
//// for (int j=0; j<boardSize; j++){
//// int index = i*boardSize + j;
////// if (boardHost[index].color == GO_BLACK || boardHost[index].color == GO_WHITE){
//// printf("%d, %d | ", boardHost[index].groupID, boardHost[index].libertyNumber);
////// } else if (boardHost[index].color == GO_EMPTY) {
////// printf(" , | ");
////// }
//// }
//// printf("\n");
////
//// }
//
// for (int i=boardSize-1; i>=0; i--){
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// if (boardHost[index].color == GO_BORDER){
// printf("H");
// }else{
// if (boardHost[index].isBlackLegal){
// printf("o");
// }else {
// printf(".");
// }
// }
// }
//
// printf(" ");
//
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// if (boardHost[index].color == GO_BORDER){
// printf("H");
// }else{
// if (boardHost[index].isWhiteLegal){
// printf("x");
// }else {
// printf(".");
// }
// }
// }
//
// printf("\n");
//
// }
//
//
//
//// for (int i=boardSize-1; i>=0; i--){
//// for (int j=0; j<boardSize; j++){
//// int index = i*boardSize + j;
//// printf("%d | ", debugFlagHost[index].libertyCount);
//// }
//// printf("\n");
////
//// }
//
//
// printf("\n");
//
// if(end_tv.tv_usec >= start_tv.tv_usec){
// printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec);
// }else{
// printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec - 1, 1000000 - start_tv.tv_usec + end_tv.tv_usec);
// }
//
//
// return EXIT_SUCCESS;
//
//}
| a55213789f0c15887f3e022f871d4afbf1c2f794.cu | #include "CUDABoard.h"
namespace{
__device__
inline void updateStatus(BoardPoint *boardDevice,
int index,
GoColor color,
int *globalLiberty,
int *globalMoveValue,
curandState *state);
__global__
void initBoard(BoardPoint *boardDevice, curandState *state, long randSeed){
int index = threadIdx.y * boardSize + threadIdx.x;
curand_init(randSeed, index, 0, &state[index]);
if (threadIdx.x == 0 || threadIdx.x == boardSize-1 || threadIdx.y == 0 || threadIdx.y == boardSize-1){
boardDevice[index].color = 3;
boardDevice[index].moveValue = GO_ILLEGAL_MOVE;
boardDevice[index].groupID = index;
} else {
boardDevice[index].color = 0;
boardDevice[index].moveValue = GO_VALUE_NOT_SET;
//boardDevice[index].groupID = index;
}
__syncthreads();
//__threadfence_block();
__shared__ int globalLiberty[totalSize];
__shared__ int globalMoveValue[totalSize];
updateStatus(boardDevice, index, GO_BLACK, globalLiberty, globalMoveValue, state);
//all the initial group ID will be zero..
}
__device__
inline int invertColor(int color){
if (color == GO_BLACK){
return GO_WHITE;
}else if(color == GO_WHITE){
return GO_BLACK;
}
return GO_EMPTY;
}
__device__
inline int generateRandomValue(int index, curandState *state){
return curand(&state[index])>>3; // move left by 3 bit to make sure that it will not be negative after assigned to int.
}
__device__
inline void updateStatus(BoardPoint *boardDevice,
int index,
GoColor color,
int *globalLiberty,
int *globalMoveValue,
curandState *state){
globalLiberty[index] = 0;
boardDevice[index].libertyNumber = 0;
boardDevice[index].blackNeighbourNumber = 0;
boardDevice[index].whiteNeighbourNumber = 0;
__syncthreads();
if (boardDevice[index].color == GO_EMPTY){
// updating liberty for each point
if (boardDevice[index-1].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else if (boardDevice[index-1].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index-1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-1].groupID], 1);
}else if (boardDevice[index-1].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index-1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-1].groupID], 1);
}
if (boardDevice[index+boardSize].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else{
if (boardDevice[index+boardSize].groupID != boardDevice[index-1].groupID){
if (boardDevice[index+boardSize].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index+boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+boardSize].groupID], 1);
}else if (boardDevice[index+boardSize].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index+boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+boardSize].groupID], 1);
}
}
}
if (boardDevice[index+1].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else{
if (boardDevice[index+1].groupID != boardDevice[index-1].groupID &&
boardDevice[index+1].groupID != boardDevice[index+boardSize].groupID){
if (boardDevice[index+1].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index+1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+1].groupID], 1);
}else if (boardDevice[index+1].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index+1].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index+1].groupID], 1);
}
}
}
if (boardDevice[index-boardSize].color == GO_EMPTY){
// atomicAdd(&globalLiberty[boardDevice[index].groupID], 1);
boardDevice[index].emptyLibertyNumber++;
}else{
if (boardDevice[index-boardSize].groupID != boardDevice[index-1].groupID &&
boardDevice[index-boardSize].groupID != boardDevice[index+1].groupID &&
boardDevice[index-boardSize].groupID != boardDevice[index+boardSize].groupID){
if (boardDevice[index-boardSize].color == GO_BLACK){
boardDevice[index].blackNeighbourNumber++;
boardDevice[index].blackLibertyNumber += boardDevice[index-boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-boardSize].groupID], 1);
}else if (boardDevice[index-boardSize].color == GO_WHITE){
boardDevice[index].whiteNeighbourNumber++;
boardDevice[index].whiteLibertyNumber += boardDevice[index-boardSize].libertyNumber;
atomicAdd(&globalLiberty[boardDevice[index-boardSize].groupID], 1);
}
}
}
}
__syncthreads();
//__threadfence_block();
boardDevice[index].libertyNumber = globalLiberty[boardDevice[index].groupID];
__syncthreads();
//__threadfence_block();
// computing move value for each point
if (boardDevice[index].color == GO_EMPTY){
if (boardDevice[index].emptyLibertyNumber > 0){
globalMoveValue[index] = generateRandomValue(index, state);
}else{
if (color == GO_WHITE){
if (boardDevice[index].blackLibertyNumber > boardDevice[index].blackNeighbourNumber){
globalMoveValue[index] = generateRandomValue(index, state);
}else{
globalMoveValue[index] = GO_ILLEGAL_MOVE;
}
}else if (color == GO_BLACK){
if (boardDevice[index].whiteLibertyNumber > boardDevice[index].whiteNeighbourNumber){
globalMoveValue[index] = generateRandomValue(index, state);
}else{
globalMoveValue[index] = GO_ILLEGAL_MOVE;
}
}
}
// if (boardDevice[index].moveValue == GO_VALUE_NOT_SET || boardDevice[index].moveValue == GO_ILLEGAL_MOVE){
// if (color == GO_WHITE){
// //assuming that next move will be black, as current move is white.
// if( (boardDevice[index - 1].color == GO_WHITE && boardDevice[index-1].libertyNumber == 1) ||
// (boardDevice[index + 1].color == GO_WHITE && boardDevice[index+1].libertyNumber == 1) ||
// (boardDevice[index - boardSize].color == GO_WHITE && boardDevice[index-boardSize].libertyNumber == 1) ||
// (boardDevice[index + boardSize].color == GO_WHITE && boardDevice[index+boardSize].libertyNumber == 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else {
// if (boardDevice[index - 1].color == GO_EMPTY ||
// (boardDevice[index - 1].color == GO_BLACK && boardDevice[index-1].libertyNumber > 1)||
// boardDevice[index + 1].color == GO_EMPTY ||
// (boardDevice[index + 1].color == GO_BLACK && boardDevice[index+1].libertyNumber > 1)||
// boardDevice[index - boardSize].color == GO_EMPTY ||
// (boardDevice[index - boardSize].color == GO_BLACK && boardDevice[index-boardSize].libertyNumber > 1)||
// boardDevice[index + boardSize].color == GO_EMPTY ||
// (boardDevice[index + boardSize].color == GO_BLACK && boardDevice[index+boardSize].libertyNumber > 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else{
// globalMoveValue[index] = GO_ILLEGAL_MOVE;
// }
// }
// }else if (color == GO_BLACK){
// //assuming that next move will be white, as current move is black.
// if( (boardDevice[index - 1].color == GO_BLACK && boardDevice[index-1].libertyNumber == 1) ||
// (boardDevice[index + 1].color == GO_BLACK && boardDevice[index+1].libertyNumber == 1) ||
// (boardDevice[index - boardSize].color == GO_BLACK && boardDevice[index-boardSize].libertyNumber == 1) ||
// (boardDevice[index + boardSize].color == GO_BLACK && boardDevice[index+boardSize].libertyNumber == 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else {
// if (boardDevice[index - 1].color == GO_EMPTY ||
// (boardDevice[index - 1].color == GO_WHITE && boardDevice[index-1].libertyNumber > 1)||
// boardDevice[index + 1].color == GO_EMPTY ||
// (boardDevice[index + 1].color == GO_WHITE && boardDevice[index+1].libertyNumber > 1)||
// boardDevice[index - boardSize].color == GO_EMPTY ||
// (boardDevice[index - boardSize].color == GO_WHITE && boardDevice[index-boardSize].libertyNumber > 1)||
// boardDevice[index + boardSize].color == GO_EMPTY ||
// (boardDevice[index + boardSize].color == GO_WHITE && boardDevice[index+boardSize].libertyNumber > 1)){
// globalMoveValue[index] = generateRandomValue(index, state);
// }else{
// globalMoveValue[index] = GO_ILLEGAL_MOVE;
// }
// }
// }
// }// endif boardDevice[index].moveValue check.
}else{
// current point is not empty, it is ilegal move, set the value to zero.
globalMoveValue[index] = GO_ILLEGAL_MOVE;
}
__syncthreads();
//__threadfence_block();
// this line is not necessory at this time.
boardDevice[index].moveValue = globalMoveValue[index];
}
__device__
void playStone(BoardPoint *boardDevice,
DebugFlag *debugFlagDevice,
int *selectedMove,
GoColor color,
int *globalLiberty,
int *globalMoveValue,
curandState *state){
int index = threadIdx.y*boardSize + threadIdx.x;
int playPoint = *selectedMove;
GoColor enemyColor = invertColor(color);
__shared__ int targetGroupID[4];
__shared__ int removedGroupID[4];
//__shared__ bool hasStoneRemoved;
if (threadIdx.y == 0 || threadIdx.y == boardSize || threadIdx.x == 0 || threadIdx.x == boardSize){
// out of the real board, reset the liberty of Group 0 to 0, then return.
globalLiberty[0] = 0;
return;
}
if (index == playPoint){
boardDevice[index].color = color;
boardDevice[index].groupID = index;
if (boardDevice[index+1].color == color){
targetGroupID[0] = boardDevice[index+1].groupID;
}else if(boardDevice[index + 1].color == enemyColor){
if (boardDevice[index + 1].libertyNumber == 1){
removedGroupID[0] = boardDevice[index + 1].groupID;
}else{
removedGroupID[0] = -1;
}
}
else{
targetGroupID[0] = -1;
removedGroupID[0] = -1;
}
if (boardDevice[index-1].color == color){
targetGroupID[1] = boardDevice[index-1].groupID;
}else if(boardDevice[index - 1].color == enemyColor){
if (boardDevice[index - 1].libertyNumber == 1){
removedGroupID[1] = boardDevice[index - 1].groupID;
}else{
removedGroupID[1] = -1;
}
}
else{
targetGroupID[1] = -1;
removedGroupID[1] = -1;
}
if (boardDevice[index+boardSize].color == color){
targetGroupID[2] = boardDevice[index+boardSize].groupID;
}else if(boardDevice[index + boardSize].color == enemyColor){
if (boardDevice[index + boardSize].libertyNumber == 1){
removedGroupID[0] = boardDevice[index + boardSize].groupID;
}else{
removedGroupID[2] = -1;
}
}
else{
targetGroupID[2] = -1;
removedGroupID[2] = -1;
}
if (boardDevice[index-boardSize].color == color){
targetGroupID[3] = boardDevice[index-boardSize].groupID;
}else if(boardDevice[index - boardSize].color == enemyColor){
if (boardDevice[index - boardSize].libertyNumber == 1){
removedGroupID[0] = boardDevice[index - boardSize].groupID;
}else{
removedGroupID[3] = -1;
}
}
else{
targetGroupID[3] = -1;
removedGroupID[3] = -1;
}
}
globalLiberty[index] = 0;
//hasStoneRemoved = false;
__syncthreads();
//@todo , check whether this fence is necessory.
//__threadfence_block();
if (boardDevice[index].groupID == targetGroupID[0] ||
boardDevice[index].groupID == targetGroupID[1] ||
boardDevice[index].groupID == targetGroupID[2] ||
boardDevice[index].groupID == targetGroupID[3] ){
boardDevice[index].groupID = playPoint;
}
if (boardDevice[index].groupID == removedGroupID[0] ||
boardDevice[index].groupID == removedGroupID[1] ||
boardDevice[index].groupID == removedGroupID[2] ||
boardDevice[index].groupID == removedGroupID[3] ){
boardDevice[index].groupID = 0;
boardDevice[index].color = GO_EMPTY;
boardDevice[index].moveValue = GO_VALUE_NOT_SET;
//hasStoneRemoved = true;
}
__syncthreads();
//__threadfence_block();
updateStatus(boardDevice, index, color, globalLiberty, globalMoveValue, state);
//
//
//
// if (boardDevice[index].pointGroup != NULL){
// debugFlagDevice[index].changeFlag = boardDevice[index].pointGroup.numberOfLiberty;
//
// }
//
//
// debugFlagDevice[index].counter++;
// }
//
}
__device__ void selectMove(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int index, GoColor color, int *globalMoveValue, int *selectedMove, int *maxMoveValue){
// atomicMax(maxMoveValue, boardDevice[index].moveValue);
//
// __syncthreads();
//
// if (boardDevice[index].moveValue == *maxMoveValue){
// *selectedMove = index;
// }
if (threadIdx.x == 0 && threadIdx.y == 0){
int maxValue = -1;
int maxIndex = 0;
for (int i=0; i<totalSize; i++){
if (globalMoveValue[i] > maxValue){
maxValue = globalMoveValue[i];
maxIndex = i;
}
}
*selectedMove = maxIndex;
}
}
__global__
void randomPlay(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, GoColor color, curandState *state){
int index = threadIdx.y*boardSize + threadIdx.x;
__shared__ int globalLiberty[totalSize];
__shared__ int globalMoveValue[totalSize];
__shared__ int selectedMove;
__shared__ int maxMoveValue;
maxMoveValue = -1;
int currentColor = color;
for (int i=0; i<500; i++){
selectMove(boardDevice, debugFlagDevice, index, currentColor, globalMoveValue, &selectedMove, &maxMoveValue);
__syncthreads();
if (selectedMove < 0){
break;
}
playStone(boardDevice, debugFlagDevice, &selectedMove, currentColor, globalLiberty, globalMoveValue, state);
currentColor = invertColor(currentColor);
maxMoveValue = -1;
__syncthreads();
}
}
__global__
void playBoard(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int row, int col, GoColor color, curandState *state){
__shared__ int selectedMove;
__shared__ int globalLiberty[totalSize]; // shared array to count the liberty of each group.
__shared__ int globalMoveValue[totalSize];
if (threadIdx.x == 0 && threadIdx.y ==0){
// the corner point is special point for global operation.
int playPoint = row*boardSize + col;
selectedMove = playPoint;
}
__syncthreads();
//__threadfence_block();
playStone(boardDevice, debugFlagDevice, &selectedMove, color, globalLiberty, globalMoveValue, state);
}
// __global__
// void playBoard(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int row, int col, int color){
// dim3 threadShape( boardSize, boardSize );
// int numberOfBlock = 1;
// playBoardInside<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, row, col, color);
//
// }
//
// __global__
// void updateLegleMove(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int color){
// int index = threadIdx.y*boardSize + threadIdx.x;
//
// if (boardDevice[index].color != GO_EMPTY){
// boardDevice[index].isBlackLegal = false;
// boardDevice[index].isWhiteLegal = false;
// }else{
// if (boardDevice[index - 1].color == GO_EMPTY ||
// boardDevice[index + 1].color == GO_EMPTY ||
// boardDevice[index - boardSize].color == GO_EMPTY ||
// boardDevice[index + boardSize].color == GO_EMPTY){
// boardDevice[index].isBlackLegal = true;
// boardDevice[index].isWhiteLegal = true;
//
// }else{
// int totalLiberty = 0;
//
// if (boardDevice[index - 1].color == color){
// totalLiberty = totalLiberty + boardDevice[index - 1].libertyNumber - 1;
// }else if(boardDevice[index - 1].color == GO_EMPTY){
// totalLiberty++;
// }
//
// if (boardDevice[index + 1].color == color){
// totalLiberty = totalLiberty + boardDevice[index + 1].libertyNumber - 1;
// }else if(boardDevice[index + 1].color == GO_EMPTY){
// totalLiberty++;
// }
//
// if (boardDevice[index - boardSize].color == color){
// totalLiberty = totalLiberty + boardDevice[index - boardSize].libertyNumber - 1;
// }else if(boardDevice[index - boardSize].color == GO_EMPTY){
// totalLiberty++;
// }
//
// if (boardDevice[index + boardSize].color == color){
// totalLiberty = totalLiberty + boardDevice[index + boardSize].libertyNumber - 1;
// }else if(boardDevice[index + boardSize].color == GO_EMPTY){
// totalLiberty++;
// }
//
// debugFlagDevice[index].libertyCount = totalLiberty;
//
// if (totalLiberty > 0){
// if (color == GO_BLACK){
// boardDevice[index].isBlackLegal = true;
// }else if (color == GO_WHITE){
// boardDevice[index].isWhiteLegal = true;
// }
// }else{
// if (color == GO_BLACK){
// boardDevice[index].isBlackLegal = false;
// }else if (color == GO_WHITE){
// boardDevice[index].isWhiteLegal = false;
// }
//
// }
//
// }
//
// }// any of the points around boardDevice[index] is GO_EMPTY?
// }// boardDevice[index].color == GO_EMPTY?
//
}//namespace
CUDABoard::CUDABoard(){
this->currentPlayer = GO_BLACK;
this->detailDebug = false;
cudaMalloc( (void**)&(this->boardDevice), this->valueSizeDevice );
cudaMalloc( (void**)&(this->debugFlagDevice), this->debugFlagSize );
cudaMalloc( (void**)&(this->stateDevice), valueSizeDevice );
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
srand((unsigned int)time(NULL));
initBoard<<<numberOfBlock, threadShape>>>(boardDevice, stateDevice, rand());
}
CUDABoard::~CUDABoard(){
cudaFree( boardDevice );
cudaFree( debugFlagDevice );
cudaFree( stateDevice );
}
void CUDABoard::Play(int row, int col, GoColor color){
// GoPoint targetPoint = GoPointUtil::Pt(col, row);
// Play(targetPoint, color);
//dim3 threadShape( boardSize, boardSize );
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
playBoard<<<numberOfBlock, threadShape>>>(this->boardDevice, this->debugFlagDevice, row, col, color, this->stateDevice);
cudaDeviceSynchronize();
}
void CUDABoard::Play(GoPoint p, GoColor color){
}
void CUDABoard::Play(GoPoint p){
}
void CUDABoard::RandomPlay(){
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
randomPlay<<<numberOfBlock, threadShape>>>(this->boardDevice, this->debugFlagDevice, this->currentPlayer, this->stateDevice);
cudaDeviceSynchronize();
}
void CUDABoard::RestoreData(){
cudaMemcpy( this->boardHost, this->boardDevice, this->valueSizeDevice, cudaMemcpyDeviceToHost );
cudaMemcpy( this->debugFlagHost, this->debugFlagDevice, this->debugFlagSize, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
}
ostream& operator<<(ostream& out, const CUDABoard& cudaBoard){
out << "Whole board:" << endl;
for (int i=boardSize-1; i>=0; i--){
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (cudaBoard.boardHost[index].color == 0){
out << ".";
}else if (cudaBoard.boardHost[index].color == GO_BLACK){
out << "o";
}else if (cudaBoard.boardHost[index].color == GO_WHITE){
out << "x";
}else if (cudaBoard.boardHost[index].color == GO_BORDER){
out << "H";
}
}
if (cudaBoard.detailDebug){
out << " ";
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (cudaBoard.boardHost[index].color == GO_BORDER){
out << "HHHH";
}else {
int value = cudaBoard.boardHost[index].moveValue%1000;
std::stringstream ss;
std::string outputString;
//ss<<" ";
ss<< "___" << value;
ss>>outputString;
out << outputString.substr(outputString.length()-3);
out << "|";
}
}
}
out << "\n";
}
return out;
}
//int main()
//{
//
// struct timeval start_tv;
// gettimeofday(&start_tv,NULL);
//
//
//
//// for (int i=0; i<19; i++){
//// playBoard<<<numberOfBlock, threadShape>>>(boardDevice, globalFlag, i, i, 2);
//// }
//
//// playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 15, 12, 1);
//
// //updateLegleMove<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, GO_BLACK);
// //updateLegleMove<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, GO_WHITE);
//
// cudaDeviceSynchronize();
//
// cudaMemcpy( boardHost, boardDevice, valueSizeDevice, cudaMemcpyDeviceToHost );
// cudaMemcpy( debugFlagHost, debugFlagDevice, debugFlagSize, cudaMemcpyDeviceToHost );
//
//
//
// cudaDeviceSynchronize();
//
// struct timeval end_tv;
// gettimeofday(&end_tv,NULL);
//
// for (int i=boardSize-1; i>=0; i--){
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// if (boardHost[index].color == 0){
// printf(".");
// }else if (boardHost[index].color == GO_BLACK){
// printf("o");
// }else if (boardHost[index].color == GO_WHITE){
// printf("x");
// }else if (boardHost[index].color == GO_BORDER){
// printf("H");
// }
// }
// printf("\n");
//
// }
//
//// for (int i=boardSize-1; i>=0; i--){
//// for (int j=0; j<boardSize; j++){
//// int index = i*boardSize + j;
////// if (boardHost[index].color == GO_BLACK || boardHost[index].color == GO_WHITE){
//// printf("%d, %d | ", boardHost[index].groupID, boardHost[index].libertyNumber);
////// } else if (boardHost[index].color == GO_EMPTY) {
////// printf(" , | ");
////// }
//// }
//// printf("\n");
////
//// }
//
// for (int i=boardSize-1; i>=0; i--){
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// if (boardHost[index].color == GO_BORDER){
// printf("H");
// }else{
// if (boardHost[index].isBlackLegal){
// printf("o");
// }else {
// printf(".");
// }
// }
// }
//
// printf(" ");
//
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// if (boardHost[index].color == GO_BORDER){
// printf("H");
// }else{
// if (boardHost[index].isWhiteLegal){
// printf("x");
// }else {
// printf(".");
// }
// }
// }
//
// printf("\n");
//
// }
//
//
//
//// for (int i=boardSize-1; i>=0; i--){
//// for (int j=0; j<boardSize; j++){
//// int index = i*boardSize + j;
//// printf("%d | ", debugFlagHost[index].libertyCount);
//// }
//// printf("\n");
////
//// }
//
//
// printf("\n");
//
// if(end_tv.tv_usec >= start_tv.tv_usec){
// printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec);
// }else{
// printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec - 1, 1000000 - start_tv.tv_usec + end_tv.tv_usec);
// }
//
//
// return EXIT_SUCCESS;
//
//}
|
e2e3f5a14b312e572e9cd3c32e074209f9223c16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//This code is taken from https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i+1;
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
printf("h_c[10] = %f\n", h_c[10]);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| e2e3f5a14b312e572e9cd3c32e074209f9223c16.cu | //This code is taken from https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i+1;
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
printf("h_c[10] = %f\n", h_c[10]);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
53ca3187c35c8c0e028252c65703def1cd4b3c1a.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
* Lempel, Ziv, Storer, and Szymanski Encoding and Decoding on CUDA
*
*
****************************************************************************
* CUDA LZSS
* Authors : Adnan Ozsoy, Martin Swany,Indiana University - Bloomington
* Date : April 11, 2011
****************************************************************************
Copyright 2011 Adnan Ozsoy, Martin Swany, Indiana University - Bloomington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
/***************************************************************************
* Code is adopted from below source
*
* LZSS: An ANSI C LZss Encoding/Decoding Routine
* Copyright (C) 2003 by Michael Dipperstein (mdipper@cs.ucsb.edu)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
***************************************************************************/
/***************************************************************************
* INCLUDED FILES
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "getopt.h"
#include <time.h>
#include "gpu_compress.h"
#include <pthread.h>
#include <unistd.h>
//#include "cuPrintf.hip"
#include <sys/time.h>
/***************************************************************************
* CUDA FILES
***************************************************************************/
#include <assert.h>
#include <hip/hip_runtime.h>
//#include "cuPrintf.hip"
/***************************************************************************
* GLOBAL VARIABLES
***************************************************************************/
//unsigned char * decompressed_buffer;
//unsigned char * init_in_d;
//unsigned char * init_out_d;
texture<unsigned char, 1, hipReadModeElementType> in_d_tex;
hipStream_t * streams;
int instreams = 16;
int nstreams = 4*instreams;
/***************************************************************************
* PROTOTYPES
***************************************************************************/
/****************************************************************************
* Function : FindMatch
* Description: This function will search through the slidingWindow
* dictionary for the longest sequence matching the MAX_CODED
* long string stored in uncodedLookahed.
* Parameters : windowHead - head of sliding window
* uncodedHead - head of uncoded lookahead buffer
* Effects : NONE
* Returned : The sliding window index where the match starts and the
* length of the match. If there is no match a length of
* zero will be returned.
****************************************************************************/
__device__ encoded_string_t FindMatch(int windowHead, int uncodedHead, unsigned char* slidingWindow, unsigned char* uncodedLookahead, \
int tx, int bx, int wfilepoint, int lastcheck, int loadcounter)
{
encoded_string_t matchData;
int i, j;
int maxcheck;
int matchingState=0;
int loop=0;
matchData.length = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
matchData.offset = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
i = windowHead ; // start at the beginning of the sliding window //
j = 0; //counter for matchings
//if(lastcheck)
maxcheck = MAX_CODED - tx*lastcheck;
//else
// maxcheck = MAX_CODED;
int tempi=0;
while (loop<WINDOW_SIZE)
{
if (slidingWindow[i] == uncodedLookahead[(uncodedHead+j)% (WINDOW_SIZE+MAX_CODED)])
{
j++;
matchingState=1;
}
else
{
if(matchingState && j > matchData.length)
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
j=0;
matchingState=0;
}
i = (i + 1) % (WINDOW_SIZE+MAX_CODED);
loop++;
if (loop >= maxcheck-1)
{
/// we wrapped around ///
loop = WINDOW_SIZE; //break;
}
}
if(j > matchData.length && matchingState )
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
return matchData;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__global__ void EncodeKernel(unsigned char * in_d, unsigned char * out_d, int SIZEBLOCK)
{
/* cyclic buffer sliding window of already read characters */
__shared__ unsigned char slidingWindow[WINDOW_SIZE+(MAX_CODED)];
__shared__ unsigned char uncodedLookahead[MAX_CODED*2];
__shared__ unsigned char encodedData[MAX_CODED*2];
encoded_string_t matchData;
int windowHead, uncodedHead; // head of sliding window and lookahead //
int filepoint; //file index pointer for reading
int wfilepoint; //file index pointer for writing
int lastcheck; //flag for last run of the packet
int loadcounter=0;
int bx = blockIdx.x;
int tx = threadIdx.x;
//***********************************************************************
// * Fill the sliding window buffer with some known values. DecodeLZSS must
// * use the same values. If common characters are used, there's an
// * increased chance of matching to the earlier strings.
// *********************************************************************** //
slidingWindow[tx] = ' ';
windowHead = tx;
uncodedHead = tx;
filepoint=0;
wfilepoint=0;
lastcheck=0;
__syncthreads();
//***********************************************************************
//* Copy MAX_CODED bytes from the input file into the uncoded lookahead
//* buffer.
//*********************************************************************** //
//uncodedLookahead[tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx); //in_d[bx * PCKTSIZE + tx];
uncodedLookahead[tx] = in_d[bx * PCKTSIZE + tx];
filepoint+=MAX_CODED;
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx);//uncodedLookahead[uncodedHead];
__syncthreads();
//uncodedLookahead[MAX_CODED+tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx); //in_d[bx * PCKTSIZE + filepoint + tx];
uncodedLookahead[MAX_CODED+tx] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
__syncthreads();
loadcounter++;
// Look for matching string in sliding window //
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead, tx, bx, 0, 0,loadcounter);
__syncthreads();
// now encoded the rest of the file until an EOF is read //
while ((filepoint) <= PCKTSIZE && !lastcheck)
{
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
__syncthreads();
//if(lastcheck==1)
//{
// break;
//}
//if(!lastcheck)
{
if(filepoint<PCKTSIZE){
//uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx);
uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
//find the location for the thread specific view of window
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//__syncthreads();
}
else{
lastcheck++;
slidingWindow[(windowHead + MAX_CODED ) % (WINDOW_SIZE+MAX_CODED)] = '^';
}
__syncthreads();
loadcounter++;
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead,tx,bx, wfilepoint, lastcheck,loadcounter);
}
} //while
if(lastcheck==1)
{
if(matchData.length > (MAX_CODED - tx))
matchData.length = MAX_CODED - tx;
}
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
}
unsigned char * initGPUmem(int buf_length)
{
unsigned char * mem_d;
hipMalloc((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initGPUmemIN, mem alloc to gpu");
return mem_d;
}
unsigned char * initCPUmem(int buf_length)
{
unsigned char * mem_d;
hipHostMalloc((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initCPUmemIN, mem alloc to cpu");
return mem_d;
}
void deleteGPUmem(unsigned char * mem_d)
{
hipFree(mem_d);
}
void deleteCPUmem(unsigned char * mem_d)
{
hipHostFree(mem_d);
checkCUDAError("deleteCPUmem func,hipHostFree");
}
void deleteGPUStreams()
{
for (int i = 0; i < nstreams; ++i)
{
hipStreamDestroy(streams[i]);
checkCUDAError("deleteCPUmem func, hipStreamDestroy" + i);
}
}
void initGPU()
{
//hipDeviceReset();
hipSetDevice(0);
//hipSetDeviceFlags(hipDeviceScheduleAuto);
checkCUDAError("initialize GPU");
streams = (hipStream_t*) malloc(nstreams * sizeof(hipStream_t));
for(int i = 0; i < nstreams; i++) {
hipStreamCreate(&(streams[i]));
checkCUDAError("streams created");
}
}
void resetGPU()
{
hipDeviceReset();
}
int streams_in_GPU(){
return true;
}
int onestream_finish_GPU(int index)
{
//hipStreamSynchronize(streams[(index+1)*instreams -1]);
int check = (index+1)*instreams-1;
if (check == instreams * nstreams)
check = check -1;
while(hipStreamQuery(streams[check])!=hipSuccess);
checkCUDAError("cuda stream sync");
return true;
}
int compression_kernel_wrapper(unsigned char *buffer, int buf_length, unsigned char * bufferout, int compression_type,int wsize,\
int numthre, int noop,int index,unsigned char * in_d,unsigned char * out_d)
{
int numThreads = numthre;
int numblocks = (buf_length / (PCKTSIZE*instreams)) + (((buf_length % (PCKTSIZE*instreams))>0)?1:0);
int i=0;
hipFuncSetCacheConfig(EncodeKernel, hipFuncCachePreferL1);//hipFuncCachePreferShared);
for(i = 0; i < instreams; i++)
{
//copy memory to cuda device
hipMemcpyAsync(in_d+ i * (buf_length / instreams), buffer+ i * (buf_length / instreams), \
sizeof(char)*(buf_length / instreams),hipMemcpyHostToDevice, streams[index*instreams + i]);
checkCUDAError("mem copy to gpu");
}
for(i = 0; i < instreams; i++)
{
hipLaunchKernelGGL(( EncodeKernel), dim3(numblocks), dim3(numThreads), 0, streams[index*instreams + i], in_d + i * (buf_length / instreams),\
out_d + 2 * i * (buf_length / instreams),numThreads);
checkCUDAError("kernel invocation"); // Check for any CUDA errors
}
//copy memory back
for(i = 0; i < instreams; i++)
{
hipMemcpyAsync(bufferout + 2 * i * (buf_length / instreams), out_d + 2 * i * (buf_length / instreams),\
sizeof(char)*(buf_length / instreams)*2, hipMemcpyDeviceToHost, streams[index*instreams + i]);
checkCUDAError("mem copy back");
}
return 1;
}
void *aftercomp (void *q)
{
aftercompdata_t * data=(aftercompdata_t *)q;
int i=0, j=0, k=0, m=0, temptot=0, tempj=0;
int finish=0;
unsigned char flags =0;
unsigned char flagPos = 0x01;
unsigned char holdbuf[16];
int holdbufcount=0;
int morecounter=0;
//reset the flags again
flagPos = 0x01;
flags =0;
temptot=0;
holdbufcount=0;
unsigned char * bufferout = data->bufferout;
unsigned char * buffer = data->buffer;
int * header = data->header;
int buf_length = data->buf_length;
i = (data->tid)*((buf_length*2)/(data->numts));
j = (data->tid)*((buf_length)/(data->numts));
k = (data->tid)*(buf_length/(PCKTSIZE*data->numts));
finish = (data->tid + 1)*((buf_length)/(data->numts));
while(i<(finish*2))
{
if (j>finish) {
printf("compression took more, size is %d!!! \n",j);
data->comptookmore = 1;
break;
}
temptot = bufferout[i];
if(temptot == 1) //if no matching
{
flags |= flagPos; // mark with uncoded byte flag //
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+2;
}
else //if there is mathcing
{
holdbuf[holdbufcount]=temptot;
holdbufcount++;
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+(temptot*2);
}
if (flagPos == 0x80) //if we have looked at 8 characters that fills the flag holder
{
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
// reset encoded data buffer //
flags = 0;
flagPos = 0x01;
holdbufcount=0;
}
else
{
// we don't have 8 code flags yet, use next bit for next flag //
flagPos <<= 1;
}
// for each packet with the size of 4096 bytes
if(i%8192 == 0 && i>0){ //PCKTSIZE*2
if(holdbufcount>0){
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
holdbufcount=0;
}
flags = 0;
flagPos = 0x01;
if((j-tempj) >= PCKTSIZE){
morecounter++;
//compression took more, so just write the file without compression info
}
header[k]=j-tempj;
tempj=j;
k++;
}
}
data->newlen = j - (data->tid)*((buf_length)/(data->numts)) ;
return 0;
}
int aftercompression_wrapper(unsigned char * buffer, int buf_length, unsigned char * bufferout, int * comp_length)
{
int comptookmore = 0;
//struct timeval t1_start,t1_end;
//double alltime;
//gettimeofday(&t1_start,0);
// allocate memory to contain the header of the file:
int * header;
header = (int *)malloc (sizeof(int)*(buf_length/PCKTSIZE));
if (header == NULL) {printf ("Memory error, header"); exit (2);}
pthread_t afcomp[NWORKERS];
aftercompdata_t data[NWORKERS];
int l=0;
for(l=0;l<NWORKERS;l++)
{
data[l].tid=l;
data[l].header=header; /* offset to start of longest match */
data[l].buffer=buffer;
data[l].buf_length=buf_length;
data[l].bufferout=bufferout;
data[l].numts = NWORKERS;
data[l].comptookmore=0;
data[l].newlen=0;
pthread_create (&afcomp[l], NULL, &aftercomp, &data[l]);
}
int i=0, j=0, k=0;//, m=0, temptot=0, tempj=0;
void *status;
for(l=0;l<NWORKERS;l++){
pthread_join( afcomp[l], &status);
comptookmore += data[l].comptookmore;
if(l!=0)
{
for(i=0;i<data[l].newlen;i++)
{
buffer[j+i]=buffer[(l*(buf_length/NWORKERS))+i];
}
}
j+=data[l].newlen;
}
k=(buf_length/PCKTSIZE);
if(!comptookmore){
//Add header to buffer
unsigned char cc;
for(i=0;i<k;i++)
{
cc = (unsigned char)(header[i]>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)header[i];
buffer[j]=cc;
j++;
}
//Add total size
cc = (unsigned char)(buf_length>>24);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>16);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)buf_length;
buffer[j]=cc;
j++;
//Add pad size
int paddingsize = 0;
cc = (unsigned char)(paddingsize>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)paddingsize;
buffer[j]=cc;
j++;
}
if(comptookmore!=0)
return 0;
if(j>buf_length)
printf("compression TOOK more!!! %d\n",j);
*comp_length = j;
free(header);
return 1;
} | 53ca3187c35c8c0e028252c65703def1cd4b3c1a.cu | /***************************************************************************
* Lempel, Ziv, Storer, and Szymanski Encoding and Decoding on CUDA
*
*
****************************************************************************
* CUDA LZSS
* Authors : Adnan Ozsoy, Martin Swany,Indiana University - Bloomington
* Date : April 11, 2011
****************************************************************************
Copyright 2011 Adnan Ozsoy, Martin Swany, Indiana University - Bloomington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
/***************************************************************************
* Code is adopted from below source
*
* LZSS: An ANSI C LZss Encoding/Decoding Routine
* Copyright (C) 2003 by Michael Dipperstein (mdipper@cs.ucsb.edu)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
***************************************************************************/
/***************************************************************************
* INCLUDED FILES
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "getopt.h"
#include <time.h>
#include "gpu_compress.h"
#include <pthread.h>
#include <unistd.h>
//#include "cuPrintf.cu"
#include <sys/time.h>
/***************************************************************************
* CUDA FILES
***************************************************************************/
#include <assert.h>
#include <cuda.h>
//#include "cuPrintf.cu"
/***************************************************************************
* GLOBAL VARIABLES
***************************************************************************/
//unsigned char * decompressed_buffer;
//unsigned char * init_in_d;
//unsigned char * init_out_d;
texture<unsigned char, 1, cudaReadModeElementType> in_d_tex;
cudaStream_t * streams;
int instreams = 16;
int nstreams = 4*instreams;
/***************************************************************************
* PROTOTYPES
***************************************************************************/
/****************************************************************************
* Function : FindMatch
* Description: This function will search through the slidingWindow
* dictionary for the longest sequence matching the MAX_CODED
* long string stored in uncodedLookahed.
* Parameters : windowHead - head of sliding window
* uncodedHead - head of uncoded lookahead buffer
* Effects : NONE
* Returned : The sliding window index where the match starts and the
* length of the match. If there is no match a length of
* zero will be returned.
****************************************************************************/
__device__ encoded_string_t FindMatch(int windowHead, int uncodedHead, unsigned char* slidingWindow, unsigned char* uncodedLookahead, \
int tx, int bx, int wfilepoint, int lastcheck, int loadcounter)
{
encoded_string_t matchData;
int i, j;
int maxcheck;
int matchingState=0;
int loop=0;
matchData.length = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
matchData.offset = 1; // make it 1 in the 0 case, it will be returned as 1, 0 gives problems
i = windowHead ; // start at the beginning of the sliding window //
j = 0; //counter for matchings
//if(lastcheck)
maxcheck = MAX_CODED - tx*lastcheck;
//else
// maxcheck = MAX_CODED;
int tempi=0;
while (loop<WINDOW_SIZE)
{
if (slidingWindow[i] == uncodedLookahead[(uncodedHead+j)% (WINDOW_SIZE+MAX_CODED)])
{
j++;
matchingState=1;
}
else
{
if(matchingState && j > matchData.length)
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
j=0;
matchingState=0;
}
i = (i + 1) % (WINDOW_SIZE+MAX_CODED);
loop++;
if (loop >= maxcheck-1)
{
/// we wrapped around ///
loop = WINDOW_SIZE; //break;
}
}
if(j > matchData.length && matchingState )
{
matchData.length = j;
tempi=i-j;
if(tempi<0)
tempi+=WINDOW_SIZE+MAX_CODED;
matchData.offset = tempi;
}
return matchData;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__global__ void EncodeKernel(unsigned char * in_d, unsigned char * out_d, int SIZEBLOCK)
{
/* cyclic buffer sliding window of already read characters */
__shared__ unsigned char slidingWindow[WINDOW_SIZE+(MAX_CODED)];
__shared__ unsigned char uncodedLookahead[MAX_CODED*2];
__shared__ unsigned char encodedData[MAX_CODED*2];
encoded_string_t matchData;
int windowHead, uncodedHead; // head of sliding window and lookahead //
int filepoint; //file index pointer for reading
int wfilepoint; //file index pointer for writing
int lastcheck; //flag for last run of the packet
int loadcounter=0;
int bx = blockIdx.x;
int tx = threadIdx.x;
//***********************************************************************
// * Fill the sliding window buffer with some known values. DecodeLZSS must
// * use the same values. If common characters are used, there's an
// * increased chance of matching to the earlier strings.
// *********************************************************************** //
slidingWindow[tx] = ' ';
windowHead = tx;
uncodedHead = tx;
filepoint=0;
wfilepoint=0;
lastcheck=0;
__syncthreads();
//***********************************************************************
//* Copy MAX_CODED bytes from the input file into the uncoded lookahead
//* buffer.
//*********************************************************************** //
//uncodedLookahead[tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx); //in_d[bx * PCKTSIZE + tx];
uncodedLookahead[tx] = in_d[bx * PCKTSIZE + tx];
filepoint+=MAX_CODED;
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//tex1Dfetch(in_d_tex, bx * PCKTSIZE + tx);//uncodedLookahead[uncodedHead];
__syncthreads();
//uncodedLookahead[MAX_CODED+tx] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx); //in_d[bx * PCKTSIZE + filepoint + tx];
uncodedLookahead[MAX_CODED+tx] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
__syncthreads();
loadcounter++;
// Look for matching string in sliding window //
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead, tx, bx, 0, 0,loadcounter);
__syncthreads();
// now encoded the rest of the file until an EOF is read //
while ((filepoint) <= PCKTSIZE && !lastcheck)
{
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
__syncthreads();
//if(lastcheck==1)
//{
// break;
//}
//if(!lastcheck)
{
if(filepoint<PCKTSIZE){
//uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = tex1Dfetch(in_d_tex, bx * PCKTSIZE + filepoint + tx);
uncodedLookahead[(uncodedHead+ MAX_CODED)% (MAX_CODED*2)] = in_d[bx * PCKTSIZE + filepoint + tx];
filepoint+=MAX_CODED;
//find the location for the thread specific view of window
slidingWindow[ (windowHead + WINDOW_SIZE ) % (WINDOW_SIZE + MAX_CODED) ] = uncodedLookahead[uncodedHead];
//__syncthreads();
}
else{
lastcheck++;
slidingWindow[(windowHead + MAX_CODED ) % (WINDOW_SIZE+MAX_CODED)] = '^';
}
__syncthreads();
loadcounter++;
matchData = FindMatch(windowHead, uncodedHead,slidingWindow,uncodedLookahead,tx,bx, wfilepoint, lastcheck,loadcounter);
}
} //while
if(lastcheck==1)
{
if(matchData.length > (MAX_CODED - tx))
matchData.length = MAX_CODED - tx;
}
if (matchData.length >= MAX_CODED)
{
// garbage beyond last data happened to extend match length //
matchData.length = MAX_CODED-1;
}
if (matchData.length <= MAX_UNCODED)
{
// not long enough match. write uncoded byte //
matchData.length = 1; // set to 1 for 1 byte uncoded //
encodedData[tx*2] = 1;
encodedData[tx*2 + 1] = uncodedLookahead[uncodedHead];
}
else if(matchData.length > MAX_UNCODED)
{
// match length > MAX_UNCODED. Encode as offset and length. //
encodedData[tx*2] = (unsigned char)matchData.length;
encodedData[tx*2+1] = (unsigned char)matchData.offset;
}
//write out the encoded data into output
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2] = encodedData[tx*2];
out_d[bx * PCKTSIZE*2 + wfilepoint + tx*2 + 1] = encodedData[tx*2+1];
//update written pointer and heads
wfilepoint = wfilepoint + MAX_CODED*2;
windowHead = (windowHead + MAX_CODED) % (WINDOW_SIZE+MAX_CODED);
uncodedHead = (uncodedHead + MAX_CODED) % (MAX_CODED*2);
}
unsigned char * initGPUmem(int buf_length)
{
unsigned char * mem_d;
cudaMalloc((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initGPUmemIN, mem alloc to gpu");
return mem_d;
}
unsigned char * initCPUmem(int buf_length)
{
unsigned char * mem_d;
cudaMallocHost((void **) &mem_d, sizeof(char)*buf_length);
checkCUDAError("function, initCPUmemIN, mem alloc to cpu");
return mem_d;
}
void deleteGPUmem(unsigned char * mem_d)
{
cudaFree(mem_d);
}
void deleteCPUmem(unsigned char * mem_d)
{
cudaFreeHost(mem_d);
checkCUDAError("deleteCPUmem func,cudaFreeHost");
}
void deleteGPUStreams()
{
for (int i = 0; i < nstreams; ++i)
{
cudaStreamDestroy(streams[i]);
checkCUDAError("deleteCPUmem func, cudaStreamDestroy" + i);
}
}
void initGPU()
{
//cudaDeviceReset();
cudaSetDevice(0);
//cudaSetDeviceFlags(cudaDeviceScheduleAuto);
checkCUDAError("initialize GPU");
streams = (cudaStream_t*) malloc(nstreams * sizeof(cudaStream_t));
for(int i = 0; i < nstreams; i++) {
cudaStreamCreate(&(streams[i]));
checkCUDAError("streams created");
}
}
void resetGPU()
{
cudaDeviceReset();
}
int streams_in_GPU(){
return true;
}
int onestream_finish_GPU(int index)
{
//cudaStreamSynchronize(streams[(index+1)*instreams -1]);
int check = (index+1)*instreams-1;
if (check == instreams * nstreams)
check = check -1;
while(cudaStreamQuery(streams[check])!=cudaSuccess);
checkCUDAError("cuda stream sync");
return true;
}
int compression_kernel_wrapper(unsigned char *buffer, int buf_length, unsigned char * bufferout, int compression_type,int wsize,\
int numthre, int noop,int index,unsigned char * in_d,unsigned char * out_d)
{
int numThreads = numthre;
int numblocks = (buf_length / (PCKTSIZE*instreams)) + (((buf_length % (PCKTSIZE*instreams))>0)?1:0);
int i=0;
cudaFuncSetCacheConfig(EncodeKernel, cudaFuncCachePreferL1);//cudaFuncCachePreferShared);
for(i = 0; i < instreams; i++)
{
//copy memory to cuda device
cudaMemcpyAsync(in_d+ i * (buf_length / instreams), buffer+ i * (buf_length / instreams), \
sizeof(char)*(buf_length / instreams),cudaMemcpyHostToDevice, streams[index*instreams + i]);
checkCUDAError("mem copy to gpu");
}
for(i = 0; i < instreams; i++)
{
EncodeKernel<<< numblocks, numThreads, 0, streams[index*instreams + i]>>>(in_d + i * (buf_length / instreams),\
out_d + 2 * i * (buf_length / instreams),numThreads);
checkCUDAError("kernel invocation"); // Check for any CUDA errors
}
//copy memory back
for(i = 0; i < instreams; i++)
{
cudaMemcpyAsync(bufferout + 2 * i * (buf_length / instreams), out_d + 2 * i * (buf_length / instreams),\
sizeof(char)*(buf_length / instreams)*2, cudaMemcpyDeviceToHost, streams[index*instreams + i]);
checkCUDAError("mem copy back");
}
return 1;
}
void *aftercomp (void *q)
{
aftercompdata_t * data=(aftercompdata_t *)q;
int i=0, j=0, k=0, m=0, temptot=0, tempj=0;
int finish=0;
unsigned char flags =0;
unsigned char flagPos = 0x01;
unsigned char holdbuf[16];
int holdbufcount=0;
int morecounter=0;
//reset the flags again
flagPos = 0x01;
flags =0;
temptot=0;
holdbufcount=0;
unsigned char * bufferout = data->bufferout;
unsigned char * buffer = data->buffer;
int * header = data->header;
int buf_length = data->buf_length;
i = (data->tid)*((buf_length*2)/(data->numts));
j = (data->tid)*((buf_length)/(data->numts));
k = (data->tid)*(buf_length/(PCKTSIZE*data->numts));
finish = (data->tid + 1)*((buf_length)/(data->numts));
while(i<(finish*2))
{
if (j>finish) {
printf("compression took more, size is %d!!! \n",j);
data->comptookmore = 1;
break;
}
temptot = bufferout[i];
if(temptot == 1) //if no matching
{
flags |= flagPos; // mark with uncoded byte flag //
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+2;
}
else //if there is mathcing
{
holdbuf[holdbufcount]=temptot;
holdbufcount++;
holdbuf[holdbufcount]=bufferout[i+1];
holdbufcount++;
i=i+(temptot*2);
}
if (flagPos == 0x80) //if we have looked at 8 characters that fills the flag holder
{
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
// reset encoded data buffer //
flags = 0;
flagPos = 0x01;
holdbufcount=0;
}
else
{
// we don't have 8 code flags yet, use next bit for next flag //
flagPos <<= 1;
}
// for each packet with the size of 4096 bytes
if(i%8192 == 0 && i>0){ //PCKTSIZE*2
if(holdbufcount>0){
buffer[j] = flags;
j++;
for(m=0;m<holdbufcount;m++){
buffer[j] = holdbuf[m];
j++;
}
holdbufcount=0;
}
flags = 0;
flagPos = 0x01;
if((j-tempj) >= PCKTSIZE){
morecounter++;
//compression took more, so just write the file without compression info
}
header[k]=j-tempj;
tempj=j;
k++;
}
}
data->newlen = j - (data->tid)*((buf_length)/(data->numts)) ;
return 0;
}
int aftercompression_wrapper(unsigned char * buffer, int buf_length, unsigned char * bufferout, int * comp_length)
{
int comptookmore = 0;
//struct timeval t1_start,t1_end;
//double alltime;
//gettimeofday(&t1_start,0);
// allocate memory to contain the header of the file:
int * header;
header = (int *)malloc (sizeof(int)*(buf_length/PCKTSIZE));
if (header == NULL) {printf ("Memory error, header"); exit (2);}
pthread_t afcomp[NWORKERS];
aftercompdata_t data[NWORKERS];
int l=0;
for(l=0;l<NWORKERS;l++)
{
data[l].tid=l;
data[l].header=header; /* offset to start of longest match */
data[l].buffer=buffer;
data[l].buf_length=buf_length;
data[l].bufferout=bufferout;
data[l].numts = NWORKERS;
data[l].comptookmore=0;
data[l].newlen=0;
pthread_create (&afcomp[l], NULL, &aftercomp, &data[l]);
}
int i=0, j=0, k=0;//, m=0, temptot=0, tempj=0;
void *status;
for(l=0;l<NWORKERS;l++){
pthread_join( afcomp[l], &status);
comptookmore += data[l].comptookmore;
if(l!=0)
{
for(i=0;i<data[l].newlen;i++)
{
buffer[j+i]=buffer[(l*(buf_length/NWORKERS))+i];
}
}
j+=data[l].newlen;
}
k=(buf_length/PCKTSIZE);
if(!comptookmore){
//Add header to buffer
unsigned char cc;
for(i=0;i<k;i++)
{
cc = (unsigned char)(header[i]>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)header[i];
buffer[j]=cc;
j++;
}
//Add total size
cc = (unsigned char)(buf_length>>24);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>16);
buffer[j]=cc;
j++;
cc = (unsigned char)(buf_length>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)buf_length;
buffer[j]=cc;
j++;
//Add pad size
int paddingsize = 0;
cc = (unsigned char)(paddingsize>>8);
buffer[j]=cc;
j++;
cc=(unsigned char)paddingsize;
buffer[j]=cc;
j++;
}
if(comptookmore!=0)
return 0;
if(j>buf_length)
printf("compression TOOK more!!! %d\n",j);
*comp_length = j;
free(header);
return 1;
} |
e6a24c00b29d5d947e0b0d9f8c2a2741da4d8025.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
//initialization
__global__ void init_kernel(double* d_U, double dx, double dy, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
const double rou1 = 1.0, u1 = 2.9, v1 = 0.0, p1 = 0.71429; //uniform inlet condition at left boundary
const double rou2 = 1.69997, u2 = 2.61934, v2 = -0.50632, p2 = 1.52819; //uniform inlet condition at up boundary
const double pi = 3.141592654, alpha = 29 * pi / 180; //incidence radian of oblique shock wave
if (tid >= col * row)
{
return;
}
int size = col * row;
int xx = tid % col;
int yy = tid / col;
double x = (1 - yy * dy) / tan(alpha);
if (xx * dx <= x)
{
d_U[tid] = rou1;
d_U[tid + size] = rou1 * u1;
d_U[tid + size * 2] = rou1 * v1;
d_U[tid + size * 3] = p1 / (gam - 1) + rou1 * (u1 * u1 + v1 * v1) / 2;
}
else
{
d_U[tid] = rou2;
d_U[tid + size] = rou2 * u2;
d_U[tid + size * 2] = rou2 * v2;
d_U[tid + size * 3] = p2 / (gam - 1) + rou2 * (u2 * u2 + v2 * v2) / 2;
}
}
void call_init(double* d_U, double dx, double dy, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
init_kernel << <BSIZE, TPB >> > (d_U, dx, dy, gam, col, row);
hipDeviceSynchronize();
}
//calculation of dt based on the algorithm stability parameter cfl
__global__ void cfl_kernel(double* d_U, double dx, double dy, double* dt, double gam, double sf, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
double maxvel = 0;
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx >= col || yy < 1 || yy >= row)
{
return;
}
double u = d_U[tid + size] / d_U[tid];
double v = d_U[tid + size * 2] / d_U[tid];
double p = (gam - 1) * (d_U[tid + size * 3] - 0.5 * d_U[tid] * (u * u + v * v));
double velocity = sqrt(gam * p / d_U[tid]) + sqrt(u * u + v * v); //feature speed
if (velocity > maxvel)
{
maxvel = velocity;
}
dt[0] = sf * min(dx, dy) / maxvel;
}
void call_cfl(double* d_U, double dx, double dy, double* dt, double sf, double gam, int col, int row)
{
double maxvel = 0;
long int size = col * row;
for (int i = 1; i <= gx; i++)
{
for (int j = 1; j <= gy; j++)
{
double u0 = d_U[i + j * col];
double u1 = d_U[i + j * col + size];
double u2 = d_U[i + j * col + size * 2];
double u3 = d_U[i + j * col + size * 3];
double u = u1 / u0;
double v = u2 / u0;
double p = (gam - 1) * (u3 - 0.5 * u0 * (u * u + v * v));
double velocity = sqrt(gam * p / u0) + sqrt(u * u + v * v); //feature speed
if (velocity > maxvel)
{
maxvel = velocity;
}
}
}
*dt = sf * min(dx, dy) / maxvel;
}
//deal with the boundary
__global__ void bound_kernel(double* d_U, double gam, int col, int row)
{
const double rou1 = 1.0, u1 = 2.9, v1 = 0.0, p1 = 0.71429; //uniform inlet condition at left boundary
const double rou2 = 1.69997, u2 = 2.61934, v2 = -0.50632, p2 = 1.52819; //uniform inlet condition at up boundary
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx >= col || yy >= row)
{
return;
}
//left
if (xx == 0 && yy <= gy + 1)
{
d_U[tid] = rou1;
d_U[tid + size] = rou1 * u1;
d_U[tid + size * 2] = rou1 * v1;
d_U[tid + size * 3] = p1 / (gam - 1) + rou1 * (u1 * u1 + v1 * v1) / 2;
}
//right
if (xx == gx + 1 && yy <= gy + 1)
{
for (int k = 0; k < 4; k++)
{
d_U[tid + k * size] = d_U[gx + yy * col + k * size];
}
}
//up
if (yy == gy + 1 && xx <= gx + 1)
{
d_U[tid] = rou2;
d_U[tid + size] = rou2 * u2;
d_U[tid + size * 2] = rou2 * v2;
d_U[tid + size * 3] = p2 / (gam - 1) + rou2 * (u2 * u2 + v2 * v2) / 2;
}
//down
if (yy == 0 && xx <= gx + 1)
{
d_U[tid] = d_U[xx + 1 * col];
d_U[tid + size] = d_U[xx + 1 * col + size];
d_U[tid + size * 2] = 0;
d_U[tid + size * 3] = d_U[xx + 1 * col + size * 3];
}
}
void call_bound(double* d_U, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
bound_kernel << <BSIZE, TPB >> > (d_U, gam, col, row);
hipDeviceSynchronize();
}
//differential in x-direction
__global__ void updataU_kernel(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
const int a = 3.0; //speed of sound not exceeding 3
double eta = (a * dt / dx) * (1 - a * dt / dx);
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
//switching function
double theta = fabs(fabs(d_U[xx + 1 + yy * col] - d_U[tid]) - fabs(d_U[tid] - d_U[xx - 1 + yy * col]))
/ (fabs(d_U[xx + 1 + yy * col] - d_U[tid]) + fabs(d_U[tid] - d_U[xx - 1 + yy * col]) + 1e-100);
for (int k = 0; k < 4; k++)
{
temp[tid + k * size] = d_U[tid + k * size] + 0.5 * eta * theta * (d_U[xx + 1 + yy * col + k * size]
- 2 * d_U[tid + k * size] + d_U[xx - 1 + yy * col + k * size]);
}
}
__global__ void updataU2_kernel(double* d_U, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U[tid + i * size] = d_U[tid + i * size] - (dt / dx) *
(d_F[tid + i * size] - d_F[xx - 1 + yy * col + i * size]);
}
}
__global__ void temp2U_kernel(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
for (int k = 0; k < 4; k++)
{
d_U[tid + k * size] = temp[tid + k * size];
}
}
__global__ void updataF_kernel(double* d_U, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy + 1)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_F[tid] = u1[tid];
d_F[tid + size] = u0[tid] * u * u + p;
d_F[tid + size * 2] = u0[tid] * u * v;
d_F[tid + size * 3] = (u3[tid] + p) * u;
}
__global__ void updataF_kernel2(double* d_U, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_F[tid] = u1[tid];
d_F[tid + size] = u0[tid] * u * u + p;
d_F[tid + size * 2] = u0[tid] * u * v;
d_F[tid + size * 3] = (u3[tid] + p) * u;
}
__global__ void updataUhalf_kernel(double* d_U, double* d_U_half, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U_half[tid + i * size] = 0.5 * (d_U[xx + 1 + yy * col + i * size]
+ d_U[tid + i * size]) - 0.5 * dt / dx * (d_F[xx + 1 + yy * col + i * size]
- d_F[tid + i * size]);
}
}
//differential in y-direction
__global__ void updataU_kernel_y(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
const int a = 3.0; //speed of sound not exceeding 3
double eta = (a * dt / dx) * (1 - a * dt / dx);
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 1 || yy > gy)
{
return;
}
//switching function
double theta = fabs(fabs(d_U[xx + (yy + 1) * col] - d_U[tid]) - fabs(d_U[tid] - d_U[xx + (yy - 1) * col]))
/ (fabs(d_U[xx + (yy + 1) * col] - d_U[tid]) + fabs(d_U[tid] - d_U[xx + (yy - 1) * col]) + 1e-100);
for (int k = 0; k < 4; k++)
{
temp[tid + k * size] = d_U[tid + k * size] + 0.5 * eta * theta * (d_U[xx + (yy + 1) * col + k * size]
- 2 * d_U[tid + k * size] + d_U[xx + (yy - 1) * col + k * size]);
}
}
__global__ void updataU2_kernel_y(double* d_U, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 1 || yy > gy)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U[tid + i * size] = d_U[tid + i * size] - (dt / dx) *
(d_G[tid + i * size] - d_G[xx + (yy - 1) * col + i * size]);
}
}
__global__ void temp2U_kernel_y(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 1 || yy > gy)
{
return;
}
for (int k = 0; k < 4; k++)
{
d_U[tid + k * size] = temp[tid + k * size];
}
}
__global__ void updataG_kernel_y(double* d_U, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy + 1)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_G[tid] = u2[tid];
d_G[tid + size] = u0[tid] * u * v;
d_G[tid + size * 2] = u0[tid] * v * v + p;
d_G[tid + size * 3] = (u3[tid] + p) * v;
}
__global__ void updataG_kernel2_y(double* d_U, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_G[tid] = u2[tid];
d_G[tid + size] = u0[tid] * u * v;
d_G[tid + size * 2] = u0[tid] * v * v + p;
d_G[tid + size * 3] = (u3[tid] + p) * v;
}
__global__ void updataUhalf_kernel_y(double* d_U, double* d_U_half, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U_half[tid + i * size] = 0.5 * (d_U[xx + (yy + 1) * col + i * size]
+ d_U[tid + i * size]) - 0.5 * dt / dx * (d_G[xx + (yy + 1) * col + i * size]
- d_G[tid + i * size]);
}
}
//Lax-Wendroff 2d sovler
void call_solve_x(double* d_U, double* d_U_half, double* d_FG, double* d_temp, double dx, double dy, double dt, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
hipMemset(d_temp, 0, sizeof(double) * col * row * 4);
updataU_kernel << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
temp2U_kernel << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
updataF_kernel << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
updataUhalf_kernel << <BSIZE, TPB >> > (d_U, d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataF_kernel2 << <BSIZE, TPB >> > (d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataU2_kernel << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
}
void call_solve_y(double* d_U, double* d_U_half, double* d_FG, double* d_temp, double dx, double dy, double dt, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
hipMemset(d_temp, 0, sizeof(double) * col * row * 4);
updataU_kernel_y << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
temp2U_kernel_y << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
updataG_kernel_y << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
updataUhalf_kernel_y << <BSIZE, TPB >> > (d_U, d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataG_kernel2_y << <BSIZE, TPB >> > (d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataU2_kernel_y << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
}
void call_solver_2d(double* d_U, double* d_U_half, double* d_FG, double* d_temp, double dx, double dy, double dt, double gam, int col, int row, int TPB)
{
call_solve_x(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
call_solve_y(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
call_solve_y(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
call_solve_x(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
}
| e6a24c00b29d5d947e0b0d9f8c2a2741da4d8025.cu | #include "kernel.h"
//initialization
__global__ void init_kernel(double* d_U, double dx, double dy, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
const double rou1 = 1.0, u1 = 2.9, v1 = 0.0, p1 = 0.71429; //uniform inlet condition at left boundary
const double rou2 = 1.69997, u2 = 2.61934, v2 = -0.50632, p2 = 1.52819; //uniform inlet condition at up boundary
const double pi = 3.141592654, alpha = 29 * pi / 180; //incidence radian of oblique shock wave
if (tid >= col * row)
{
return;
}
int size = col * row;
int xx = tid % col;
int yy = tid / col;
double x = (1 - yy * dy) / tan(alpha);
if (xx * dx <= x)
{
d_U[tid] = rou1;
d_U[tid + size] = rou1 * u1;
d_U[tid + size * 2] = rou1 * v1;
d_U[tid + size * 3] = p1 / (gam - 1) + rou1 * (u1 * u1 + v1 * v1) / 2;
}
else
{
d_U[tid] = rou2;
d_U[tid + size] = rou2 * u2;
d_U[tid + size * 2] = rou2 * v2;
d_U[tid + size * 3] = p2 / (gam - 1) + rou2 * (u2 * u2 + v2 * v2) / 2;
}
}
void call_init(double* d_U, double dx, double dy, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
init_kernel << <BSIZE, TPB >> > (d_U, dx, dy, gam, col, row);
cudaDeviceSynchronize();
}
//calculation of dt based on the algorithm stability parameter cfl
__global__ void cfl_kernel(double* d_U, double dx, double dy, double* dt, double gam, double sf, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
double maxvel = 0;
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx >= col || yy < 1 || yy >= row)
{
return;
}
double u = d_U[tid + size] / d_U[tid];
double v = d_U[tid + size * 2] / d_U[tid];
double p = (gam - 1) * (d_U[tid + size * 3] - 0.5 * d_U[tid] * (u * u + v * v));
double velocity = sqrt(gam * p / d_U[tid]) + sqrt(u * u + v * v); //feature speed
if (velocity > maxvel)
{
maxvel = velocity;
}
dt[0] = sf * min(dx, dy) / maxvel;
}
void call_cfl(double* d_U, double dx, double dy, double* dt, double sf, double gam, int col, int row)
{
double maxvel = 0;
long int size = col * row;
for (int i = 1; i <= gx; i++)
{
for (int j = 1; j <= gy; j++)
{
double u0 = d_U[i + j * col];
double u1 = d_U[i + j * col + size];
double u2 = d_U[i + j * col + size * 2];
double u3 = d_U[i + j * col + size * 3];
double u = u1 / u0;
double v = u2 / u0;
double p = (gam - 1) * (u3 - 0.5 * u0 * (u * u + v * v));
double velocity = sqrt(gam * p / u0) + sqrt(u * u + v * v); //feature speed
if (velocity > maxvel)
{
maxvel = velocity;
}
}
}
*dt = sf * min(dx, dy) / maxvel;
}
//deal with the boundary
__global__ void bound_kernel(double* d_U, double gam, int col, int row)
{
const double rou1 = 1.0, u1 = 2.9, v1 = 0.0, p1 = 0.71429; //uniform inlet condition at left boundary
const double rou2 = 1.69997, u2 = 2.61934, v2 = -0.50632, p2 = 1.52819; //uniform inlet condition at up boundary
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx >= col || yy >= row)
{
return;
}
//left
if (xx == 0 && yy <= gy + 1)
{
d_U[tid] = rou1;
d_U[tid + size] = rou1 * u1;
d_U[tid + size * 2] = rou1 * v1;
d_U[tid + size * 3] = p1 / (gam - 1) + rou1 * (u1 * u1 + v1 * v1) / 2;
}
//right
if (xx == gx + 1 && yy <= gy + 1)
{
for (int k = 0; k < 4; k++)
{
d_U[tid + k * size] = d_U[gx + yy * col + k * size];
}
}
//up
if (yy == gy + 1 && xx <= gx + 1)
{
d_U[tid] = rou2;
d_U[tid + size] = rou2 * u2;
d_U[tid + size * 2] = rou2 * v2;
d_U[tid + size * 3] = p2 / (gam - 1) + rou2 * (u2 * u2 + v2 * v2) / 2;
}
//down
if (yy == 0 && xx <= gx + 1)
{
d_U[tid] = d_U[xx + 1 * col];
d_U[tid + size] = d_U[xx + 1 * col + size];
d_U[tid + size * 2] = 0;
d_U[tid + size * 3] = d_U[xx + 1 * col + size * 3];
}
}
void call_bound(double* d_U, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
bound_kernel << <BSIZE, TPB >> > (d_U, gam, col, row);
cudaDeviceSynchronize();
}
//differential in x-direction
__global__ void updataU_kernel(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
const int a = 3.0; //speed of sound not exceeding 3
double eta = (a * dt / dx) * (1 - a * dt / dx);
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
//switching function
double theta = fabs(fabs(d_U[xx + 1 + yy * col] - d_U[tid]) - fabs(d_U[tid] - d_U[xx - 1 + yy * col]))
/ (fabs(d_U[xx + 1 + yy * col] - d_U[tid]) + fabs(d_U[tid] - d_U[xx - 1 + yy * col]) + 1e-100);
for (int k = 0; k < 4; k++)
{
temp[tid + k * size] = d_U[tid + k * size] + 0.5 * eta * theta * (d_U[xx + 1 + yy * col + k * size]
- 2 * d_U[tid + k * size] + d_U[xx - 1 + yy * col + k * size]);
}
}
__global__ void updataU2_kernel(double* d_U, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U[tid + i * size] = d_U[tid + i * size] - (dt / dx) *
(d_F[tid + i * size] - d_F[xx - 1 + yy * col + i * size]);
}
}
__global__ void temp2U_kernel(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 1 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
for (int k = 0; k < 4; k++)
{
d_U[tid + k * size] = temp[tid + k * size];
}
}
__global__ void updataF_kernel(double* d_U, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy + 1)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_F[tid] = u1[tid];
d_F[tid + size] = u0[tid] * u * u + p;
d_F[tid + size * 2] = u0[tid] * u * v;
d_F[tid + size * 3] = (u3[tid] + p) * u;
}
__global__ void updataF_kernel2(double* d_U, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_F[tid] = u1[tid];
d_F[tid + size] = u0[tid] * u * u + p;
d_F[tid + size * 2] = u0[tid] * u * v;
d_F[tid + size * 3] = (u3[tid] + p) * u;
}
__global__ void updataUhalf_kernel(double* d_U, double* d_U_half, double* d_F, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx || yy < 0 || yy > gy + 1)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U_half[tid + i * size] = 0.5 * (d_U[xx + 1 + yy * col + i * size]
+ d_U[tid + i * size]) - 0.5 * dt / dx * (d_F[xx + 1 + yy * col + i * size]
- d_F[tid + i * size]);
}
}
//differential in y-direction
__global__ void updataU_kernel_y(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
const int a = 3.0; //speed of sound not exceeding 3
double eta = (a * dt / dx) * (1 - a * dt / dx);
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 1 || yy > gy)
{
return;
}
//switching function
double theta = fabs(fabs(d_U[xx + (yy + 1) * col] - d_U[tid]) - fabs(d_U[tid] - d_U[xx + (yy - 1) * col]))
/ (fabs(d_U[xx + (yy + 1) * col] - d_U[tid]) + fabs(d_U[tid] - d_U[xx + (yy - 1) * col]) + 1e-100);
for (int k = 0; k < 4; k++)
{
temp[tid + k * size] = d_U[tid + k * size] + 0.5 * eta * theta * (d_U[xx + (yy + 1) * col + k * size]
- 2 * d_U[tid + k * size] + d_U[xx + (yy - 1) * col + k * size]);
}
}
__global__ void updataU2_kernel_y(double* d_U, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 1 || yy > gy)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U[tid + i * size] = d_U[tid + i * size] - (dt / dx) *
(d_G[tid + i * size] - d_G[xx + (yy - 1) * col + i * size]);
}
}
__global__ void temp2U_kernel_y(double* d_U, double* temp, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 1 || yy > gy)
{
return;
}
for (int k = 0; k < 4; k++)
{
d_U[tid + k * size] = temp[tid + k * size];
}
}
__global__ void updataG_kernel_y(double* d_U, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy + 1)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_G[tid] = u2[tid];
d_G[tid + size] = u0[tid] * u * v;
d_G[tid + size * 2] = u0[tid] * v * v + p;
d_G[tid + size * 3] = (u3[tid] + p) * v;
}
__global__ void updataG_kernel2_y(double* d_U, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy)
{
return;
}
double* u0 = d_U;
double* u1 = d_U + size;
double* u2 = d_U + 2 * size;
double* u3 = d_U + 3 * size;
double u = u1[tid] / u0[tid];
double v = u2[tid] / u0[tid];
double p = (gam - 1) * (u3[tid] - 0.5 * u0[tid] * (u * u + v * v));
d_G[tid] = u2[tid];
d_G[tid + size] = u0[tid] * u * v;
d_G[tid + size * 2] = u0[tid] * v * v + p;
d_G[tid + size * 3] = (u3[tid] + p) * v;
}
__global__ void updataUhalf_kernel_y(double* d_U, double* d_U_half, double* d_G, double dx, double dy, double dt, double gam, int col, int row)
{
unsigned long long tid = (blockIdx.x * blockDim.x + threadIdx.x);
int xx = tid % col;
int yy = tid / col;
int size = col * row;
if (xx < 0 || xx > gx + 1 || yy < 0 || yy > gy)
{
return;
}
for (int i = 0; i < 4; i++)
{
d_U_half[tid + i * size] = 0.5 * (d_U[xx + (yy + 1) * col + i * size]
+ d_U[tid + i * size]) - 0.5 * dt / dx * (d_G[xx + (yy + 1) * col + i * size]
- d_G[tid + i * size]);
}
}
//Lax-Wendroff 2d sovler
void call_solve_x(double* d_U, double* d_U_half, double* d_FG, double* d_temp, double dx, double dy, double dt, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
cudaMemset(d_temp, 0, sizeof(double) * col * row * 4);
updataU_kernel << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
temp2U_kernel << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
updataF_kernel << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
updataUhalf_kernel << <BSIZE, TPB >> > (d_U, d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataF_kernel2 << <BSIZE, TPB >> > (d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataU2_kernel << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
}
void call_solve_y(double* d_U, double* d_U_half, double* d_FG, double* d_temp, double dx, double dy, double dt, double gam, int col, int row, int TPB)
{
int BSIZE = (col * row + (TPB - 1)) / TPB;
cudaMemset(d_temp, 0, sizeof(double) * col * row * 4);
updataU_kernel_y << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
temp2U_kernel_y << <BSIZE, TPB >> > (d_U, d_temp, dx, dy, dt, gam, col, row);
updataG_kernel_y << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
updataUhalf_kernel_y << <BSIZE, TPB >> > (d_U, d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataG_kernel2_y << <BSIZE, TPB >> > (d_U_half, d_FG, dx, dy, dt, gam, col, row);
updataU2_kernel_y << <BSIZE, TPB >> > (d_U, d_FG, dx, dy, dt, gam, col, row);
}
void call_solver_2d(double* d_U, double* d_U_half, double* d_FG, double* d_temp, double dx, double dy, double dt, double gam, int col, int row, int TPB)
{
call_solve_x(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
call_solve_y(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
call_solve_y(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
call_solve_x(d_U, d_U_half, d_FG, d_temp, dx, dy, dt / 2.0, gam, col, row, TPB);
call_bound(d_U, gam, col, row, TPB);
}
|
832ad9032ade97fa0a6076dd16efc40d743f0324.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuBLAS.
*/
#include <stdexcept>
#if CUTLASS_ENABLE_CUBLAS
#include "cublas_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuBLAS status to cutlass::Status
Status get_cutlass_status(hipblasStatus_t cublas) {
switch (cublas) {
case HIPBLAS_STATUS_SUCCESS:
return Status::kSuccess;
case HIPBLAS_STATUS_INVALID_VALUE:
return Status::kErrorInvalidProblem;
case HIPBLAS_STATUS_NOT_SUPPORTED:
return Status::kErrorNotSupported;
default: break;
}
return Status::kErrorInternal;
}
/// Converts a cuBLASS status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(hipblasStatus_t cublas_status) {
if (cublas_status == HIPBLAS_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cublas_status == HIPBLAS_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
bool get_cublas_transpose_operation(
hipblasOperation_t &operation,
library::LayoutTypeID layout,
library::ComplexTransform transform) {
switch (layout) {
case library::LayoutTypeID::kColumnMajor:
if (transform == library::ComplexTransform::kNone) {
operation = HIPBLAS_OP_N;
return true;
}
else {
return false;
}
break;
case library::LayoutTypeID::kRowMajor:
if (transform == library::ComplexTransform::kNone) {
operation = HIPBLAS_OP_T;
return true;
}
else if (transform == library::ComplexTransform::kConjugate) {
operation = HIPBLAS_OP_C;
return true;
}
break;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
bool get_cublas_datatype(hipDataType &data_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kF16:
data_type = HIP_R_16F;
return true;
case library::NumericTypeID::kBF16:
break;
case library::NumericTypeID::kTF32:
break;
case library::NumericTypeID::kF32:
data_type = HIP_R_32F;
return true;
case library::NumericTypeID::kF64:
data_type = HIP_R_64F;
return true;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
data_type = HIP_R_8I;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
data_type = HIP_R_32I;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
data_type = HIP_R_8U;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
data_type = HIP_R_32U;
return true;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kCF32:
data_type = HIP_C_32F;
return true;
case library::NumericTypeID::kCF64:
data_type = HIP_C_64F;
return true;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps a cutlass::SideMode to cuBLAS side mode
bool get_cublas_side_mode(hipblasSideMode_t& side, SideMode side_mode) {
switch (side_mode) {
case SideMode::kLeft:
side = HIPBLAS_SIDE_LEFT;
return true;
case SideMode::kRight:
side = HIPBLAS_SIDE_RIGHT;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::FillMode to cuBLAS fill mode
bool get_cublas_fill_mode(hipblasFillMode_t& uplo, FillMode fill_mode) {
switch (fill_mode) {
case FillMode::kLower:
uplo = HIPBLAS_FILL_MODE_LOWER;
return true;
case FillMode::kUpper:
uplo = HIPBLAS_FILL_MODE_UPPER;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::DiagType to cuBLAS diag type
bool get_cublas_diag_type(hipblasDiagType_t& diag, DiagType diag_type) {
switch (diag_type) {
case DiagType::kNonUnit:
diag = HIPBLAS_DIAG_NON_UNIT;
return true;
case DiagType::kUnit:
diag = HIPBLAS_DIAG_UNIT;
return true;
default: break;
}
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
hipblasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) {
return (opcode_class == library::OpcodeClassID::kSimt ?
HIPBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular GEMM description
Status cublas_satisfies(library::GemmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasGemmExDispatcher::cublasGemmExDispatcher(
library::GemmDescription const &op_desc,
library::GemmUniversalConfiguration configuration_,
library::GemmUniversalArguments arguments_,
hipblasGemmAlgo_t algorithm
):
configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
op_desc.B.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case HIP_R_32F:
case HIP_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case HIP_R_64F:
case HIP_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case HIP_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case HIP_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes GEMM using these arguments
hipblasStatus_t cublasGemmExDispatcher::operator()(hipblasHandle_t handle) {
if (configuration.mode == library::GemmUniversalMode::kBatched) {
return hipblasGemmStridedBatchedEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.batch_stride_A,
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.batch_stride_B,
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
arguments.batch_stride_C,
configuration.batch_count,
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
else {
return hipblasGemmEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular RankK description
Status cublas_satisfies(library::RankKDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasRankKDispatcher::cublasRankKDispatcher(
library::RankKDescription const &op_desc,
library::RankKConfiguration configuration_,
library::RankKArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
num_ranks = op_desc.num_ranks;
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case HIP_R_32F:
case HIP_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case HIP_R_64F:
case HIP_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case HIP_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case HIP_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes RankK using these arguments
hipblasStatus_t cublasRankKDispatcher::operator()(hipblasHandle_t handle) {
// SYRK and HERK
if (num_ranks == 1) {
if (data_type_A == data_type_C && data_type_A == HIP_R_64F) {
return hipblasDsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == HIP_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
return hipblasSsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == HIP_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return hipblasZherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return hipblasZsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const hipDoubleComplex*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipDoubleComplex*>(arguments.beta),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == HIP_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return hipblasCherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return hipblasCsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const hipComplex*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipComplex*>(arguments.beta),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return HIPBLAS_STATUS_NOT_SUPPORTED;
}
}
// SYR2K and HER2K
else if (num_ranks == 2) {
if (data_type_A == data_type_C && data_type_A == HIP_R_64F) {
return hipblasDsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == HIP_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
return hipblasSsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == HIP_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return hipblasZher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const hipDoubleComplex*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return hipblasZsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const hipDoubleComplex*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const hipDoubleComplex*>(arguments.beta),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == HIP_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return hipblasCher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const hipComplex*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return hipblasCsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const hipComplex*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const hipComplex*>(arguments.beta),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return HIPBLAS_STATUS_NOT_SUPPORTED;
}
}
else {
return HIPBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular TRMM description
Status cublas_satisfies(library::TrmmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.D.element == library::NumericTypeID::kS4 ||
desc.D.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasTrmmDispatcher::cublasTrmmDispatcher(
library::TrmmDescription const &op_desc,
library::TrmmConfiguration configuration_,
library::TrmmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_diag_type(diag, op_desc.diag_type));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_D, op_desc.D.element));
// if A is Transposed, then for cuBLAS that is inverted Fill Mode.
if (trans_A == HIPBLAS_OP_T || trans_A == HIPBLAS_OP_C) {
if (uplo == HIPBLAS_FILL_MODE_LOWER)
uplo = HIPBLAS_FILL_MODE_UPPER;
else
uplo = HIPBLAS_FILL_MODE_LOWER;
}
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case HIP_R_32F:
case HIP_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case HIP_R_64F:
case HIP_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case HIP_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case HIP_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes TRMM using these arguments
hipblasStatus_t cublasTrmmDispatcher::operator()(hipblasHandle_t handle) {
if (data_type_A == data_type_D && data_type_A == HIP_R_64F) {
return hipblasDtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<double*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == HIP_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
return hipblasStrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<float*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == HIP_C_64F) {
return hipblasZtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const hipDoubleComplex*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == HIP_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
return hipblasCtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const hipComplex*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipComplex*>(arguments.B),
int(configuration.ldb),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldd)
);
} else {
return HIPBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular Symm description
Status cublas_satisfies(library::SymmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.B.element == library::NumericTypeID::kBF16 ||
desc.B.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// only column major layout is supported in cuBLAS
if (desc.A.layout != library::LayoutTypeID::kColumnMajor ||
desc.transform_A != library::ComplexTransform::kNone) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasSymmDispatcher::cublasSymmDispatcher(
library::SymmDescription const &op_desc,
library::SymmConfiguration configuration_,
library::SymmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
bool good = true;
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case HIP_R_32F:
case HIP_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case HIP_R_64F:
case HIP_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case HIP_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case HIP_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes Symm using these arguments
hipblasStatus_t cublasSymmDispatcher::operator()(hipblasHandle_t handle) {
// SYMM and HEMM
if (data_type_A == data_type_C && data_type_A == HIP_R_64F) {
return hipblasDsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == HIP_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
return hipblasSsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == HIP_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return hipblasZhemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const hipDoubleComplex*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const hipDoubleComplex*>(arguments.beta),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return hipblasZsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const hipDoubleComplex*>(arguments.alpha),
static_cast<const hipDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const hipDoubleComplex*>(arguments.beta),
static_cast<hipDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == HIP_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS)
return HIPBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return hipblasChemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const hipComplex*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const hipComplex*>(arguments.beta),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return hipblasCsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const hipComplex*>(arguments.alpha),
static_cast<const hipComplex*>(arguments.A),
int(configuration.lda),
static_cast<const hipComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const hipComplex*>(arguments.beta),
static_cast<hipComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return HIPBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif // #if CUTLASS_ENABLE_CUBLAS
| 832ad9032ade97fa0a6076dd16efc40d743f0324.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuBLAS.
*/
#include <stdexcept>
#if CUTLASS_ENABLE_CUBLAS
#include "cublas_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuBLAS status to cutlass::Status
Status get_cutlass_status(cublasStatus_t cublas) {
switch (cublas) {
case CUBLAS_STATUS_SUCCESS:
return Status::kSuccess;
case CUBLAS_STATUS_INVALID_VALUE:
return Status::kErrorInvalidProblem;
case CUBLAS_STATUS_NOT_SUPPORTED:
return Status::kErrorNotSupported;
default: break;
}
return Status::kErrorInternal;
}
/// Converts a cuBLASS status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cublasStatus_t cublas_status) {
if (cublas_status == CUBLAS_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cublas_status == CUBLAS_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
bool get_cublas_transpose_operation(
cublasOperation_t &operation,
library::LayoutTypeID layout,
library::ComplexTransform transform) {
switch (layout) {
case library::LayoutTypeID::kColumnMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_N;
return true;
}
else {
return false;
}
break;
case library::LayoutTypeID::kRowMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_T;
return true;
}
else if (transform == library::ComplexTransform::kConjugate) {
operation = CUBLAS_OP_C;
return true;
}
break;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kF16:
data_type = CUDA_R_16F;
return true;
case library::NumericTypeID::kBF16:
break;
case library::NumericTypeID::kTF32:
break;
case library::NumericTypeID::kF32:
data_type = CUDA_R_32F;
return true;
case library::NumericTypeID::kF64:
data_type = CUDA_R_64F;
return true;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
data_type = CUDA_R_8I;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
data_type = CUDA_R_32I;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
data_type = CUDA_R_8U;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
data_type = CUDA_R_32U;
return true;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kCF32:
data_type = CUDA_C_32F;
return true;
case library::NumericTypeID::kCF64:
data_type = CUDA_C_64F;
return true;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps a cutlass::SideMode to cuBLAS side mode
bool get_cublas_side_mode(cublasSideMode_t& side, SideMode side_mode) {
switch (side_mode) {
case SideMode::kLeft:
side = CUBLAS_SIDE_LEFT;
return true;
case SideMode::kRight:
side = CUBLAS_SIDE_RIGHT;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::FillMode to cuBLAS fill mode
bool get_cublas_fill_mode(cublasFillMode_t& uplo, FillMode fill_mode) {
switch (fill_mode) {
case FillMode::kLower:
uplo = CUBLAS_FILL_MODE_LOWER;
return true;
case FillMode::kUpper:
uplo = CUBLAS_FILL_MODE_UPPER;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::DiagType to cuBLAS diag type
bool get_cublas_diag_type(cublasDiagType_t& diag, DiagType diag_type) {
switch (diag_type) {
case DiagType::kNonUnit:
diag = CUBLAS_DIAG_NON_UNIT;
return true;
case DiagType::kUnit:
diag = CUBLAS_DIAG_UNIT;
return true;
default: break;
}
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
cublasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) {
return (opcode_class == library::OpcodeClassID::kSimt ?
CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular GEMM description
Status cublas_satisfies(library::GemmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasGemmExDispatcher::cublasGemmExDispatcher(
library::GemmDescription const &op_desc,
library::GemmUniversalConfiguration configuration_,
library::GemmUniversalArguments arguments_,
cublasGemmAlgo_t algorithm
):
configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
op_desc.B.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes GEMM using these arguments
cublasStatus_t cublasGemmExDispatcher::operator()(cublasHandle_t handle) {
if (configuration.mode == library::GemmUniversalMode::kBatched) {
return cublasGemmStridedBatchedEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.batch_stride_A,
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.batch_stride_B,
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
arguments.batch_stride_C,
configuration.batch_count,
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
else {
return cublasGemmEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular RankK description
Status cublas_satisfies(library::RankKDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasRankKDispatcher::cublasRankKDispatcher(
library::RankKDescription const &op_desc,
library::RankKConfiguration configuration_,
library::RankKArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
num_ranks = op_desc.num_ranks;
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes RankK using these arguments
cublasStatus_t cublasRankKDispatcher::operator()(cublasHandle_t handle) {
// SYRK and HERK
if (num_ranks == 1) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
// SYR2K and HER2K
else if (num_ranks == 2) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular TRMM description
Status cublas_satisfies(library::TrmmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.D.element == library::NumericTypeID::kS4 ||
desc.D.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasTrmmDispatcher::cublasTrmmDispatcher(
library::TrmmDescription const &op_desc,
library::TrmmConfiguration configuration_,
library::TrmmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_diag_type(diag, op_desc.diag_type));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_D, op_desc.D.element));
// if A is Transposed, then for cuBLAS that is inverted Fill Mode.
if (trans_A == CUBLAS_OP_T || trans_A == CUBLAS_OP_C) {
if (uplo == CUBLAS_FILL_MODE_LOWER)
uplo = CUBLAS_FILL_MODE_UPPER;
else
uplo = CUBLAS_FILL_MODE_LOWER;
}
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes TRMM using these arguments
cublasStatus_t cublasTrmmDispatcher::operator()(cublasHandle_t handle) {
if (data_type_A == data_type_D && data_type_A == CUDA_R_64F) {
return cublasDtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<double*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasStrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<float*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_64F) {
return cublasZtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasCtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldd)
);
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular Symm description
Status cublas_satisfies(library::SymmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.B.element == library::NumericTypeID::kBF16 ||
desc.B.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// only column major layout is supported in cuBLAS
if (desc.A.layout != library::LayoutTypeID::kColumnMajor ||
desc.transform_A != library::ComplexTransform::kNone) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasSymmDispatcher::cublasSymmDispatcher(
library::SymmDescription const &op_desc,
library::SymmConfiguration configuration_,
library::SymmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
bool good = true;
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes Symm using these arguments
cublasStatus_t cublasSymmDispatcher::operator()(cublasHandle_t handle) {
// SYMM and HEMM
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZhemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasChemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif // #if CUTLASS_ENABLE_CUBLAS
|
56de108824de62c1bb5bfaadc0ae85362c1a9d78.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaComputing.cuh"
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "device_launch_parameters.h"
#include "math.h"
__device__ bool HasTheBall ;
__global__ void setDev_ball(bool dev_ball){
HasTheBall = dev_ball;
}
void setTheBall(bool Ball){
setDev_ball << <1, 1 >> >(Ball);
}
__device__ bool HasTheCube ;
__global__ void setDev_cube(bool dev_cube){
HasTheCube = dev_cube;
}
void setTheCube(bool cube){
setDev_cube << <1, 1 >> >(cube);
}
__device__ bool HasTheCy ;
__global__ void setDev_cy(bool dev_cy){
HasTheCy = dev_cy;
}
void setTheCylinder(bool Cy){
setDev_cy << <1, 1 >> >(Cy);
}
__device__ bool HasTheMirror ;
__global__ void setDev_mirror(bool dev_mirror){
HasTheMirror = dev_mirror;
}
void setTheMirror(bool mi){
setDev_mirror << <1, 1 >> >(mi);
}
__device__ bool HasTheCurve;
__global__ void setDev_curve(bool dev_curv){
HasTheCurve = dev_curv;
}
void setTheCurve(bool cur){
setDev_curve << <1, 1 >> >(cur);
}
__device__ bool HasTheShadow ;
__global__ void setDev_shadow(bool dev_sha){
HasTheShadow = dev_sha;
}
void setTheShadow(bool sha){
setDev_shadow << <1, 1 >> >(sha);
}
__device__ bool HasTheBallFlection;
__global__ void setDev_BF(bool dev_sha){
HasTheBallFlection = dev_sha;
}
void setTheBF(bool sha){
setDev_BF<< <1, 1 >> >(sha);
}
__device__ float CyHeight = 250;
__device__ float CubeX = 600;
__device__ float CubeY = 0;
__device__ float CubeZ = -400;
__device__ float CyX = 800;
__device__ float CyY = 0;
__device__ float CyZ = -300;
__device__ bool chekcSolution(float a, float b, float c){
if ((b*b - 4 * a*c)<0)return false;
return true;
}
__device__ float getSolution1(float a, float b, float c){
float rst = -b + sqrt(b*b - 4 * a*c);
rst = rst / (2 * a);
return rst;
}
__device__ float getSolution2(float a, float b, float c){
float rst = -b - sqrt(b*b - 4 * a*c);
rst = rst / (2 * a);
return rst;
}
__device__ float dot(float3 a, float3 b){
float c;
c = a.x*b.x + a.y*b.y + a.z*b.z;
return c;
}
__device__ float3 normalize(float3 n){
float length1 = n.x*n.x + n.y*n.y + n.z*n.z;
float length = sqrt(length1);
n.x = n.x / length;
n.y = n.y / length;
n.z = n.z / length;
return n;
}
__device__ float bigger(float a, float b){
if (a > b)return a;
return b;
}
__device__ bool IsHitTheCube(float3 s, float3 center, float e){
float up = center.y + e;
float down = center.y;
float left = center.x - e / 2;
float right = center.x + e / 2;
float front = center.z + e / 2;
float back = center.z - e / 2;
if (s.y <= up&&s.y >= down&&s.x >= left&&s.x <= right&&s.z <= front&&s.z >= back){
return true;
}
return false;
}
//
__device__ float4 HitTheCube(float3 t,float3 d,float3 center,float e){
float up=center.y+e;
float down=center.y;
float left=center.x-e/2;
float right=center.x+e/2;
float front=center.z+e/2;
float back=center.z-e/2;
if (t.x - d.x * 5 > right&&t.x <= right){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 1.0));
}
if (t.x - d.x * 5 < left&&t.x >= left){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 2.0));
}
if (t.y - d.y * 5 > up&&t.y <= up){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 3.0));
}
if (t.y - d.y * 5 < down&&t.y >= down){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 4.0));
}
if (t.z - d.z * 5 > front&&t.z <= front){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 5.0));
}
if (t.z - d.z * 5 < back&&t.z >= back){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 7.0));
}
return make_float4(0.0, 0.0, 0.0, 0.0);
}
__device__ bool IsHitTheCylinder(float3 s,float3 c,float r,float h){
if ((s.x - c.x)*(s.x - c.x) + (s.z - c.z)*(s.z - c.z) <= r*r&&s.y <= h&&s.y>=0){
return true;
}
return false;
}
//
__device__ float4 HitTheCylinder(float3 t,float3 d,float3 c,float r,float h){
if(t.y <= h&&t.y - d.y * 5>h){
return make_float4(t.x, t.y, t.z, 3.0);
}
if ((t.x - c.x)*(t.x - c.x) + (t.z - c.z)*(t.z - c.z) <= r*r &&
(t.x - d.x * 5 - c.x)*(t.x - d.x * 5 - c.x) + (t.z - d.z * 5 - c.z)*(t.z - d.z * 5 - c.z) > r*r){
return make_float4(t.x, t.y, t.z, 9.0);
}
}
__device__ float4 rayFromShpere(float3 s, float3 dir){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float k;
float x;
float y;
float z;
float3 d = normalize(dir);
float R = 140;
float3 t = s;
for (int i = 0; i < 100; i++){
t.x += d.x * 5;
t.y += d.y * 5;
t.z += d.z * 5;
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX,CubeY,CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
//z = 0; 7.0
if (t.z >= 0 && t.z - 5 * d.z < 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 7.0;
return rst;
}
//z=-600 5.0
//x = 0; 1.0
if (t.x <= 0 && t.x - 5 * d.x > 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 1.0;
return rst;
}
//x = 1200; 2.0
if (t.x >= 1200 && t.x - 5 * d.x < 1200){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 2.0;
return rst;
}
//y = 0; 3.0
if (t.y <= 0 && t.y - 5 * d.y > 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 3.0;
return rst;
}
//y = 600; 4.0
if (t.y >= 600 && t.y - 5 * d.y < 600){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 4.0;
return rst;
}
}
return rst;
}
__device__ bool IsHitTheBall(float3 e, float3 p, float3 cen, float R){
float a = (p.x - e.x)*(p.x - e.x) + (p.y - e.y)*(p.y - e.y) + (p.z - e.z)*(p.z - e.z);
float b = 2 * ((p.x - e.x)*(e.x - cen.x) + (p.y - e.y)*(e.y - cen.y) + (p.z - e.z)*(e.z - cen.z));
float c = (e.x - cen.x)*(e.x - cen.x) + (e.y - cen.y)*(e.y - cen.y) + (e.z - cen.z)*(e.z - cen.z) - R*R;
if (chekcSolution(a, b, c) == true){
return true;
}
return false;
}
__device__ float4 HitTheBall(float3 e, float3 p,float3 cen,float R){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 0.0;
float k;
float a = (p.x - e.x)*(p.x - e.x) + (p.y - e.y)*(p.y - e.y) + (p.z - e.z)*(p.z - e.z);
float b = 2 * ((p.x - e.x)*(e.x - cen.x) + (p.y - e.y)*(e.y - cen.y) + (p.z - e.z)*(e.z - cen.z));
float c = (e.x - cen.x)*(e.x - cen.x) + (e.y - cen.y)*(e.y - cen.y) + (e.z - cen.z)*(e.z - cen.z) - R*R;
//hit the ball
k = getSolution1(a, b, c);
rst.x = (p.x - e.x)*k + e.x;
rst.y = (p.y - e.y)*k + e.y;
rst.z = (p.z - e.z)*k + e.z;
rst.w = 6.0;
float3 L1 = make_float3((p.x - rst.x), (p.y - rst.y), (p.z - rst.z));
L1 = normalize(L1);
float3 N = make_float3((rst.x - cen.x), (rst.y - cen.y), (rst.z - cen.z));
N = normalize(N);
float3 L2 = make_float3(-2 * dot(L1, N)*N.x + L1.x, -2 * dot(L1, N)*N.y + L1.y, -2 * dot(L1, N)*N.z + L1.z);
//
if (HasTheBallFlection)return rayFromShpere(make_float3(rst.x, rst.y, rst.z), L2);
return rst;
}
__device__ float4 HitTheMirror(float3 e, float3 p, float3 cen){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float k;
float x;
float y;
float z;
float R = 140;
if (HasTheBall&&IsHitTheBall(e, p, cen, R) == true){
return HitTheBall(e, p, cen, R);
}
float3 d = normalize(make_float3(p.x - e.x, p.y - e.y, p.z - e.z));
float3 t = p;
for (int i = 0; i < 200; i++){
t = make_float3(t.x + d.x * 5, t.y + d.y * 5, t.z + d.z * 5);
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
}
z = 0;
k = (z - e.z) / (p.z - e.z);
x = (p.x - e.x)*k + e.x;
y = (p.y - e.y)*k + e.y;
if (x >= 0 && x <= 1200 && y >= 0 && y <= 600){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 7.0;
return rst;
}
x = 0;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 1.0;
return rst;
}
x = 1200;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 2.0;
return rst;
}
y = 0;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 3.0;
return rst;
}
y = 600;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 4.0;
return rst;
}
return rst;
}
__device__ float4 HitCurveMirror(float3 s, float3 d,float3 ball){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float3 L1;
float3 N;
float3 L2;
float3 t = s;
d = normalize(d);
//hit poin
for (int i = 0; i < 200; i++){
t.x += d.x * 5;
t.y += d.y * 5;
t.z += d.z * 5;
if (t.y>500)return(make_float4(t.x, t.y, t.z, 4.0));
if ((t.x - 600)*(t.x - 600) + (t.z + 225)*(t.z + 225) >= 625 * 625 && (t.x - d.x * 5 - 600)*(t.x - d.x * 5 - 600) + (t.z - d.z * 5 + 225)*(t.z - d.z * 5 + 225) < 625 * 625){
L1 = make_float3(-d.x, -d.y, -d.z);
L1 = normalize(L1);
N = make_float3(600 - t.x, 0,-225 - t.z);
N = normalize(N);
L2 = make_float3(2 * dot(L1, N)*N.x - L1.x, 2 * dot(L1, N)*N.y - L1.y, 2 * dot(L1, N)*N.z - L1.z);
return HitTheMirror(t, make_float3(t.x + L2.x, t.y + L2.y, t.z + L2.z), ball);
break;
}
}
return rst;
}
__device__ float4 HitTheWall(float3 e,float3 p,float3 cen){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 0.0;
float x;
float y;
float z;
float k;
z = -600;
k = (z - e.z) / (p.z - e.z);
x = (p.x - e.x)*k + e.x;
y = (p.y - e.y)*k + e.y;
if (x >= 0 && x <= 1200 && y >= 0 && y <= 600){
if (x >= 100 && x <= 1100 && y >= 100 && y <= 550){
if (HasTheMirror){
if (HasTheCurve){
return HitCurveMirror(make_float3(p.x, p.y, p.z), make_float3(p.x - e.x, p.y - e.y, p.z - e.z), make_float3(cen.x, cen.y, cen.z));
}
return HitTheMirror(make_float3(e.x,e.y,-1200-e.z), make_float3(x,y,z), cen);
}
if (!HasTheMirror){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 5.0;
return rst;
}
}
else{
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 5.0;
return rst;
}
}
x = 0;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 1.0;
return rst;
}
x = 1200;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 2.0;
return rst;
}
y = 0;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 3.0;
return rst;
}
y = 600;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 4.0;
if ((x - 600)*(x - 600) + (z + 300)*(z + 300)<100 * 100)rst.w = 8.0;
return rst;
}
return rst;
}
__device__ float4 getHitPoint(float3 e, float3 p, float3 cen){
//hit the ball
float R = 140;
if (IsHitTheBall(e, p, cen, R) == true && HasTheBall==true){
return HitTheBall(e, p, cen, R);
}
//hit the cube and the cylinder
float3 d = normalize(make_float3(p.x-e.x,p.y-e.y,p.z-e.z));
float3 t = p;
for (int i = 0; i < 100; i++){
t = make_float3(t.x + d.x * 5, t.y + d.y * 5, t.z + d.z * 5);
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
}
//hit the wall
return HitTheWall(e, p, cen);
}
__device__ float3 getNormal(float4 p,float cx,float cy,float cz){
float3 N;
if (p.w != 0.0){
if (p.w == 6.0){
N = make_float3(p.x - cx, p.y - cy, p.z - cz);
}
if (p.w == 5.0){
N = make_float3(0, 0, 1);
}
if (p.w == 1.0){
N = make_float3(1, 0, 0);
}
if (p.w == 2.0){
N = make_float3(-1, 0, 0);
}
if (p.w == 3.0){
N = make_float3(0, 1, 0);
}
if (p.w == 4.0){
N = make_float3(0, -1, 0);
}
if (p.w == 7.0){
N = make_float3(0, 0, -1);
}
if (p.w == 9.0){
N = make_float3(p.x-800,0,p.z+300);
}
}
N = normalize(N);
return N;
}
__device__ float4 getColor(float4 p,float3 n,float ex,float ey,float ez){
float dist = (p.x - ex)*(p.x - ex) + (p.y - ey)*(p.y - ey) + (p.z - ez)*(p.z - ez);
dist /= 1200000;
if (dist < 1)dist = 1;
//cuda dyD,dyS,dyA
float4 kd = make_float4(0.5, 0.5, 0.5, 1.0);
float4 ks = make_float4(0.0, 0.0, 0.1, 1.0);
float4 ka = make_float4(0.1, 0.1, 0.1, 1.0);
float4 dyDiffuse = make_float4(1.0, 1.0, 1.0, 1.0);
float4 dySpecular = make_float4(0.5, 0.5, 0.5, 1.0);
float4 dyAmbient = make_float4(0.2, 0.2, 0.2, 1.0);
if (p.w == 6.0){//the ball
kd = make_float4(0.5, 0.5, 0.9, 1.0);
ks = make_float4(0.0, 0.0, 0.0, 1.0);
ka = make_float4(0.5, 0.5, 0.5, 1.0);
}
if (p.w == 5.0){//back wall
kd = make_float4(0.0, 0.6, 0.0, 1.0);
ks = make_float4(0.9, 0.0, 0.0, 1.0);
ka = make_float4(0.05, 0.0, 0.0, 1.0);
}
if (p.w == 1.0){//left wall
kd = make_float4(0.5, 0.0, 0.0, 1.0);
ks = make_float4(0.1, 0.0, 0.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 2.0){//right wall
kd = make_float4(0.0, 0.0, 0.5, 1.0);
ks = make_float4(0.1, 0.0, 0.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 3.0){//floor
kd = make_float4(0.0, 0.5, 0.5, 1.0);
ks = make_float4(1.0, 1.0, 1.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 4.0){//ceil
kd = make_float4(0.0, 0.5, 0.5, 1.0);
ks = make_float4(1.0, 1.0, 1.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 7.0){//front wall
kd = make_float4(0.5, 0.0, 0.7, 1.0);
ks = make_float4(0.4, 0.4, 0.4, 1.0);
ka = make_float4(0.4, 0.4, 0.4, 1.0);
}
if (p.w == 9.0){
kd = make_float4(0.0, 1.0, 1.0, 1.0);
ks = make_float4(0.4, 0.4, 0.4, 1.0);
ka = make_float4(0.4, 0.4, 0.4, 1.0);
}
float3 V = normalize(make_float3(ex - p.x, ey - p.y, ez - p.z));
float3 L = normalize(make_float3(600 - p.x, 600 - p.y, -300 - p.z));
float3 H = normalize(make_float3(V.x + L.x, V.y + L.y, V.z + L.z));
float4 ambient1 = make_float4(ka.x*dyAmbient.x , ka.y*dyAmbient.y , ka.z*dyAmbient.z , ka.w*dyAmbient.w );
float max1 = bigger(dot(n, L), 0.0f);
float4 diffuse1 = make_float4(kd.x*max1*dyDiffuse.x / dist, kd.y*max1*dyDiffuse.y / dist, kd.z*max1*dyDiffuse.z / dist, kd.w*max1*dyDiffuse.w / dist);
float max2 = powf(bigger(dot(n, H), 0.0f),10.0f);
float4 specular1 = make_float4(ks.x*max2*dySpecular.x,ks.y*max2*dySpecular.y, ks.z*max2*dySpecular.z, ks.w*max2*dySpecular.w);
if(dot(n,L)<0) specular1 =make_float4(0.0,0.0,0.0,0.0);
float4 color1 = make_float4(ambient1.x + diffuse1.x+specular1.x,
ambient1.y + diffuse1.y + specular1.y,
ambient1.z + diffuse1.z + specular1.z,
ambient1.w + diffuse1.w + specular1.w);
if (p.w == 8.0){
color1 = make_float4(1.0, 1.0, 0.0, 1.0);
}
return color1;
}
__device__ bool shadowRay(float3 s, float3 e, float3 center, float R){
int divide = 100;
float divX = (e.x - s.x) / divide;
float divY = (e.y - s.y) / divide;
float divZ = (e.z - s.z) / divide;
float3 t = s;
for (int i = 0; i < divide; i++){
t.x += divX;
t.y += divY;
t.z += divZ;
if (HasTheBall&&((t.x - divX - center.x)*(t.x - divX - center.x) + (t.y - divY - center.y)*(t.y - divY - center.y) + (t.z - divZ - center.z)*(t.z - divZ - center.z) > R*R) && ((t.x - center.x)*(t.x - center.x) + (t.y - center.y)*(t.y - center.y) + (t.z - center.z)*(t.z - center.z) <= R*R)){
return true;
break;
}
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200) == true){
return true;
break;
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return true;
break;
}
}
return false;
}
//global
__global__ void computeSingleRay(char* tex){
//vec4 temp = getHitPoint(Ex, Ey, Ez, vPosition.x, vPosition.y, vPosition.z);
//width height
//position=thread.x
//
//int j = threadIdx.x;
//int i = blockIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
float Ex = 600;
float Ey = 300;
float Ez = 800;
float Cx = 200;
float Cy = 300;
float Cz = -350;
float3 E = make_float3(Ex, Ey, Ez);
float3 P = make_float3(i, j, 0);
float3 C = make_float3(Cx,Cy, Cz);
//float Cx = 200;
//hit float4
float4 position = getHitPoint(E,P,C);
//normal
float3 normal = getNormal(position,Cx,Cy,Cz);
//normal vec4
float4 color = getColor(position, normal,Ex,Ey,Ez);
float3 p = make_float3(position.x, position.y, position.z);
float3 e = make_float3(600, 600, -300);
float3 c = make_float3(Cx, Cy, Cz);
if (HasTheShadow&&shadowRay(p, e, c, 140) && position.w != 6.0)color = make_float4(color.x*0.2, color.y*0.2, color.z*0.2, 1);
tex[j * 1200 * 3 + i * 3] = color.x*255;
tex[j * 1200 * 3 + i * 3 + 1] = color.y*255;
tex[j * 1200 * 3 + i * 3 + 2] = color.z*255;
}
//1200*600 size, Ex Ey Ez
void computeRays(int width,int height,char *tex){
char * dev_Tex;
hipMalloc((char**)&dev_Tex, 3 * width * height * sizeof(char));
dim3 block(8, 8, 1);
dim3 grid(width/ block.x, height / block.y, 1);
computeSingleRay << <grid, block >> >(dev_Tex);
hipMemcpy(tex, dev_Tex, 3 * width * height * sizeof(char), hipMemcpyDeviceToHost);
hipFree(dev_Tex);
}
| 56de108824de62c1bb5bfaadc0ae85362c1a9d78.cu | #include "CudaComputing.cuh"
#include "cuda_runtime.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include "math.h"
__device__ bool HasTheBall ;
__global__ void setDev_ball(bool dev_ball){
HasTheBall = dev_ball;
}
void setTheBall(bool Ball){
setDev_ball << <1, 1 >> >(Ball);
}
__device__ bool HasTheCube ;
__global__ void setDev_cube(bool dev_cube){
HasTheCube = dev_cube;
}
void setTheCube(bool cube){
setDev_cube << <1, 1 >> >(cube);
}
__device__ bool HasTheCy ;
__global__ void setDev_cy(bool dev_cy){
HasTheCy = dev_cy;
}
void setTheCylinder(bool Cy){
setDev_cy << <1, 1 >> >(Cy);
}
__device__ bool HasTheMirror ;
__global__ void setDev_mirror(bool dev_mirror){
HasTheMirror = dev_mirror;
}
void setTheMirror(bool mi){
setDev_mirror << <1, 1 >> >(mi);
}
__device__ bool HasTheCurve;
__global__ void setDev_curve(bool dev_curv){
HasTheCurve = dev_curv;
}
void setTheCurve(bool cur){
setDev_curve << <1, 1 >> >(cur);
}
__device__ bool HasTheShadow ;
__global__ void setDev_shadow(bool dev_sha){
HasTheShadow = dev_sha;
}
void setTheShadow(bool sha){
setDev_shadow << <1, 1 >> >(sha);
}
__device__ bool HasTheBallFlection;
__global__ void setDev_BF(bool dev_sha){
HasTheBallFlection = dev_sha;
}
void setTheBF(bool sha){
setDev_BF<< <1, 1 >> >(sha);
}
__device__ float CyHeight = 250;
__device__ float CubeX = 600;
__device__ float CubeY = 0;
__device__ float CubeZ = -400;
__device__ float CyX = 800;
__device__ float CyY = 0;
__device__ float CyZ = -300;
__device__ bool chekcSolution(float a, float b, float c){
if ((b*b - 4 * a*c)<0)return false;
return true;
}
__device__ float getSolution1(float a, float b, float c){
float rst = -b + sqrt(b*b - 4 * a*c);
rst = rst / (2 * a);
return rst;
}
__device__ float getSolution2(float a, float b, float c){
float rst = -b - sqrt(b*b - 4 * a*c);
rst = rst / (2 * a);
return rst;
}
__device__ float dot(float3 a, float3 b){
float c;
c = a.x*b.x + a.y*b.y + a.z*b.z;
return c;
}
__device__ float3 normalize(float3 n){
float length1 = n.x*n.x + n.y*n.y + n.z*n.z;
float length = sqrt(length1);
n.x = n.x / length;
n.y = n.y / length;
n.z = n.z / length;
return n;
}
__device__ float bigger(float a, float b){
if (a > b)return a;
return b;
}
__device__ bool IsHitTheCube(float3 s, float3 center, float e){
float up = center.y + e;
float down = center.y;
float left = center.x - e / 2;
float right = center.x + e / 2;
float front = center.z + e / 2;
float back = center.z - e / 2;
if (s.y <= up&&s.y >= down&&s.x >= left&&s.x <= right&&s.z <= front&&s.z >= back){
return true;
}
return false;
}
//底中心,边长
__device__ float4 HitTheCube(float3 t,float3 d,float3 center,float e){
float up=center.y+e;
float down=center.y;
float left=center.x-e/2;
float right=center.x+e/2;
float front=center.z+e/2;
float back=center.z-e/2;
if (t.x - d.x * 5 > right&&t.x <= right){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 1.0));
}
if (t.x - d.x * 5 < left&&t.x >= left){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 2.0));
}
if (t.y - d.y * 5 > up&&t.y <= up){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 3.0));
}
if (t.y - d.y * 5 < down&&t.y >= down){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 4.0));
}
if (t.z - d.z * 5 > front&&t.z <= front){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 5.0));
}
if (t.z - d.z * 5 < back&&t.z >= back){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 7.0));
}
return make_float4(0.0, 0.0, 0.0, 0.0);
}
__device__ bool IsHitTheCylinder(float3 s,float3 c,float r,float h){
if ((s.x - c.x)*(s.x - c.x) + (s.z - c.z)*(s.z - c.z) <= r*r&&s.y <= h&&s.y>=0){
return true;
}
return false;
}
//底中心,半径,高度
__device__ float4 HitTheCylinder(float3 t,float3 d,float3 c,float r,float h){
if(t.y <= h&&t.y - d.y * 5>h){
return make_float4(t.x, t.y, t.z, 3.0);
}
if ((t.x - c.x)*(t.x - c.x) + (t.z - c.z)*(t.z - c.z) <= r*r &&
(t.x - d.x * 5 - c.x)*(t.x - d.x * 5 - c.x) + (t.z - d.z * 5 - c.z)*(t.z - d.z * 5 - c.z) > r*r){
return make_float4(t.x, t.y, t.z, 9.0);
}
}
__device__ float4 rayFromShpere(float3 s, float3 dir){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float k;
float x;
float y;
float z;
float3 d = normalize(dir);
float R = 140;
float3 t = s;
for (int i = 0; i < 100; i++){
t.x += d.x * 5;
t.y += d.y * 5;
t.z += d.z * 5;
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX,CubeY,CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
//z = 0; 7.0
if (t.z >= 0 && t.z - 5 * d.z < 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 7.0;
return rst;
}
//z=-600 5.0
//x = 0; 1.0
if (t.x <= 0 && t.x - 5 * d.x > 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 1.0;
return rst;
}
//x = 1200; 2.0
if (t.x >= 1200 && t.x - 5 * d.x < 1200){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 2.0;
return rst;
}
//y = 0; 3.0
if (t.y <= 0 && t.y - 5 * d.y > 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 3.0;
return rst;
}
//y = 600; 4.0
if (t.y >= 600 && t.y - 5 * d.y < 600){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 4.0;
return rst;
}
}
return rst;
}
__device__ bool IsHitTheBall(float3 e, float3 p, float3 cen, float R){
float a = (p.x - e.x)*(p.x - e.x) + (p.y - e.y)*(p.y - e.y) + (p.z - e.z)*(p.z - e.z);
float b = 2 * ((p.x - e.x)*(e.x - cen.x) + (p.y - e.y)*(e.y - cen.y) + (p.z - e.z)*(e.z - cen.z));
float c = (e.x - cen.x)*(e.x - cen.x) + (e.y - cen.y)*(e.y - cen.y) + (e.z - cen.z)*(e.z - cen.z) - R*R;
if (chekcSolution(a, b, c) == true){
return true;
}
return false;
}
__device__ float4 HitTheBall(float3 e, float3 p,float3 cen,float R){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 0.0;
float k;
float a = (p.x - e.x)*(p.x - e.x) + (p.y - e.y)*(p.y - e.y) + (p.z - e.z)*(p.z - e.z);
float b = 2 * ((p.x - e.x)*(e.x - cen.x) + (p.y - e.y)*(e.y - cen.y) + (p.z - e.z)*(e.z - cen.z));
float c = (e.x - cen.x)*(e.x - cen.x) + (e.y - cen.y)*(e.y - cen.y) + (e.z - cen.z)*(e.z - cen.z) - R*R;
//hit the ball
k = getSolution1(a, b, c);
rst.x = (p.x - e.x)*k + e.x;
rst.y = (p.y - e.y)*k + e.y;
rst.z = (p.z - e.z)*k + e.z;
rst.w = 6.0;
float3 L1 = make_float3((p.x - rst.x), (p.y - rst.y), (p.z - rst.z));
L1 = normalize(L1);
float3 N = make_float3((rst.x - cen.x), (rst.y - cen.y), (rst.z - cen.z));
N = normalize(N);
float3 L2 = make_float3(-2 * dot(L1, N)*N.x + L1.x, -2 * dot(L1, N)*N.y + L1.y, -2 * dot(L1, N)*N.z + L1.z);
//有所选择
if (HasTheBallFlection)return rayFromShpere(make_float3(rst.x, rst.y, rst.z), L2);
return rst;
}
__device__ float4 HitTheMirror(float3 e, float3 p, float3 cen){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float k;
float x;
float y;
float z;
float R = 140;
if (HasTheBall&&IsHitTheBall(e, p, cen, R) == true){
return HitTheBall(e, p, cen, R);
}
float3 d = normalize(make_float3(p.x - e.x, p.y - e.y, p.z - e.z));
float3 t = p;
for (int i = 0; i < 200; i++){
t = make_float3(t.x + d.x * 5, t.y + d.y * 5, t.z + d.z * 5);
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
}
z = 0;
k = (z - e.z) / (p.z - e.z);
x = (p.x - e.x)*k + e.x;
y = (p.y - e.y)*k + e.y;
if (x >= 0 && x <= 1200 && y >= 0 && y <= 600){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 7.0;
return rst;
}
x = 0;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 1.0;
return rst;
}
x = 1200;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 2.0;
return rst;
}
y = 0;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 3.0;
return rst;
}
y = 600;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 4.0;
return rst;
}
return rst;
}
__device__ float4 HitCurveMirror(float3 s, float3 d,float3 ball){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float3 L1;
float3 N;
float3 L2;
float3 t = s;
d = normalize(d);
//hit poin
for (int i = 0; i < 200; i++){
t.x += d.x * 5;
t.y += d.y * 5;
t.z += d.z * 5;
if (t.y>500)return(make_float4(t.x, t.y, t.z, 4.0));
if ((t.x - 600)*(t.x - 600) + (t.z + 225)*(t.z + 225) >= 625 * 625 && (t.x - d.x * 5 - 600)*(t.x - d.x * 5 - 600) + (t.z - d.z * 5 + 225)*(t.z - d.z * 5 + 225) < 625 * 625){
L1 = make_float3(-d.x, -d.y, -d.z);
L1 = normalize(L1);
N = make_float3(600 - t.x, 0,-225 - t.z);
N = normalize(N);
L2 = make_float3(2 * dot(L1, N)*N.x - L1.x, 2 * dot(L1, N)*N.y - L1.y, 2 * dot(L1, N)*N.z - L1.z);
return HitTheMirror(t, make_float3(t.x + L2.x, t.y + L2.y, t.z + L2.z), ball);
break;
}
}
return rst;
}
__device__ float4 HitTheWall(float3 e,float3 p,float3 cen){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 0.0;
float x;
float y;
float z;
float k;
z = -600;
k = (z - e.z) / (p.z - e.z);
x = (p.x - e.x)*k + e.x;
y = (p.y - e.y)*k + e.y;
if (x >= 0 && x <= 1200 && y >= 0 && y <= 600){
if (x >= 100 && x <= 1100 && y >= 100 && y <= 550){
if (HasTheMirror){
if (HasTheCurve){
return HitCurveMirror(make_float3(p.x, p.y, p.z), make_float3(p.x - e.x, p.y - e.y, p.z - e.z), make_float3(cen.x, cen.y, cen.z));
}
return HitTheMirror(make_float3(e.x,e.y,-1200-e.z), make_float3(x,y,z), cen);
}
if (!HasTheMirror){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 5.0;
return rst;
}
}
else{
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 5.0;
return rst;
}
}
x = 0;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 1.0;
return rst;
}
x = 1200;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 2.0;
return rst;
}
y = 0;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 3.0;
return rst;
}
y = 600;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 4.0;
if ((x - 600)*(x - 600) + (z + 300)*(z + 300)<100 * 100)rst.w = 8.0;
return rst;
}
return rst;
}
__device__ float4 getHitPoint(float3 e, float3 p, float3 cen){
//hit the ball
float R = 140;
if (IsHitTheBall(e, p, cen, R) == true && HasTheBall==true){
return HitTheBall(e, p, cen, R);
}
//hit the cube and the cylinder
float3 d = normalize(make_float3(p.x-e.x,p.y-e.y,p.z-e.z));
float3 t = p;
for (int i = 0; i < 100; i++){
t = make_float3(t.x + d.x * 5, t.y + d.y * 5, t.z + d.z * 5);
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
}
//hit the wall
return HitTheWall(e, p, cen);
}
__device__ float3 getNormal(float4 p,float cx,float cy,float cz){
float3 N;
if (p.w != 0.0){
if (p.w == 6.0){
N = make_float3(p.x - cx, p.y - cy, p.z - cz);
}
if (p.w == 5.0){
N = make_float3(0, 0, 1);
}
if (p.w == 1.0){
N = make_float3(1, 0, 0);
}
if (p.w == 2.0){
N = make_float3(-1, 0, 0);
}
if (p.w == 3.0){
N = make_float3(0, 1, 0);
}
if (p.w == 4.0){
N = make_float3(0, -1, 0);
}
if (p.w == 7.0){
N = make_float3(0, 0, -1);
}
if (p.w == 9.0){
N = make_float3(p.x-800,0,p.z+300);
}
}
N = normalize(N);
return N;
}
__device__ float4 getColor(float4 p,float3 n,float ex,float ey,float ez){
float dist = (p.x - ex)*(p.x - ex) + (p.y - ey)*(p.y - ey) + (p.z - ez)*(p.z - ez);
dist /= 1200000;
if (dist < 1)dist = 1;
//翻译成cuda dyD,dyS,dyA 整成参数
float4 kd = make_float4(0.5, 0.5, 0.5, 1.0);
float4 ks = make_float4(0.0, 0.0, 0.1, 1.0);
float4 ka = make_float4(0.1, 0.1, 0.1, 1.0);
float4 dyDiffuse = make_float4(1.0, 1.0, 1.0, 1.0);
float4 dySpecular = make_float4(0.5, 0.5, 0.5, 1.0);
float4 dyAmbient = make_float4(0.2, 0.2, 0.2, 1.0);
if (p.w == 6.0){//the ball
kd = make_float4(0.5, 0.5, 0.9, 1.0);
ks = make_float4(0.0, 0.0, 0.0, 1.0);
ka = make_float4(0.5, 0.5, 0.5, 1.0);
}
if (p.w == 5.0){//back wall
kd = make_float4(0.0, 0.6, 0.0, 1.0);
ks = make_float4(0.9, 0.0, 0.0, 1.0);
ka = make_float4(0.05, 0.0, 0.0, 1.0);
}
if (p.w == 1.0){//left wall
kd = make_float4(0.5, 0.0, 0.0, 1.0);
ks = make_float4(0.1, 0.0, 0.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 2.0){//right wall
kd = make_float4(0.0, 0.0, 0.5, 1.0);
ks = make_float4(0.1, 0.0, 0.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 3.0){//floor
kd = make_float4(0.0, 0.5, 0.5, 1.0);
ks = make_float4(1.0, 1.0, 1.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 4.0){//ceil
kd = make_float4(0.0, 0.5, 0.5, 1.0);
ks = make_float4(1.0, 1.0, 1.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 7.0){//front wall
kd = make_float4(0.5, 0.0, 0.7, 1.0);
ks = make_float4(0.4, 0.4, 0.4, 1.0);
ka = make_float4(0.4, 0.4, 0.4, 1.0);
}
if (p.w == 9.0){
kd = make_float4(0.0, 1.0, 1.0, 1.0);
ks = make_float4(0.4, 0.4, 0.4, 1.0);
ka = make_float4(0.4, 0.4, 0.4, 1.0);
}
float3 V = normalize(make_float3(ex - p.x, ey - p.y, ez - p.z));
float3 L = normalize(make_float3(600 - p.x, 600 - p.y, -300 - p.z));
float3 H = normalize(make_float3(V.x + L.x, V.y + L.y, V.z + L.z));
float4 ambient1 = make_float4(ka.x*dyAmbient.x , ka.y*dyAmbient.y , ka.z*dyAmbient.z , ka.w*dyAmbient.w );
float max1 = bigger(dot(n, L), 0.0f);
float4 diffuse1 = make_float4(kd.x*max1*dyDiffuse.x / dist, kd.y*max1*dyDiffuse.y / dist, kd.z*max1*dyDiffuse.z / dist, kd.w*max1*dyDiffuse.w / dist);
float max2 = powf(bigger(dot(n, H), 0.0f),10.0f);
float4 specular1 = make_float4(ks.x*max2*dySpecular.x,ks.y*max2*dySpecular.y, ks.z*max2*dySpecular.z, ks.w*max2*dySpecular.w);
if(dot(n,L)<0) specular1 =make_float4(0.0,0.0,0.0,0.0);
float4 color1 = make_float4(ambient1.x + diffuse1.x+specular1.x,
ambient1.y + diffuse1.y + specular1.y,
ambient1.z + diffuse1.z + specular1.z,
ambient1.w + diffuse1.w + specular1.w);
if (p.w == 8.0){
color1 = make_float4(1.0, 1.0, 0.0, 1.0);
}
return color1;
}
__device__ bool shadowRay(float3 s, float3 e, float3 center, float R){
int divide = 100;
float divX = (e.x - s.x) / divide;
float divY = (e.y - s.y) / divide;
float divZ = (e.z - s.z) / divide;
float3 t = s;
for (int i = 0; i < divide; i++){
t.x += divX;
t.y += divY;
t.z += divZ;
if (HasTheBall&&((t.x - divX - center.x)*(t.x - divX - center.x) + (t.y - divY - center.y)*(t.y - divY - center.y) + (t.z - divZ - center.z)*(t.z - divZ - center.z) > R*R) && ((t.x - center.x)*(t.x - center.x) + (t.y - center.y)*(t.y - center.y) + (t.z - center.z)*(t.z - center.z) <= R*R)){
return true;
break;
}
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200) == true){
return true;
break;
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return true;
break;
}
}
return false;
}
//global
__global__ void computeSingleRay(char* tex){
//vec4 temp = getHitPoint(Ex, Ey, Ez, vPosition.x, vPosition.y, vPosition.z);
//width height 应该是参数
//position=thread.x
//线程代表着位置
//int j = threadIdx.x;
//int i = blockIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
float Ex = 600;
float Ey = 300;
float Ez = 800;
float Cx = 200;
float Cy = 300;
float Cz = -350;
float3 E = make_float3(Ex, Ey, Ez);
float3 P = make_float3(i, j, 0);
float3 C = make_float3(Cx,Cy, Cz);
//float Cx = 200;
//计算出hit 的位置 float4
float4 position = getHitPoint(E,P,C);
//根据位置算出normal
float3 normal = getNormal(position,Cx,Cy,Cz);
//由normal算出颜色 vec4
float4 color = getColor(position, normal,Ex,Ey,Ez);
float3 p = make_float3(position.x, position.y, position.z);
float3 e = make_float3(600, 600, -300);
float3 c = make_float3(Cx, Cy, Cz);
if (HasTheShadow&&shadowRay(p, e, c, 140) && position.w != 6.0)color = make_float4(color.x*0.2, color.y*0.2, color.z*0.2, 1);
tex[j * 1200 * 3 + i * 3] = color.x*255;
tex[j * 1200 * 3 + i * 3 + 1] = color.y*255;
tex[j * 1200 * 3 + i * 3 + 2] = color.z*255;
}
//1200*600 size, Ex Ey Ez
void computeRays(int width,int height,char *tex){
char * dev_Tex;
cudaMalloc((char**)&dev_Tex, 3 * width * height * sizeof(char));
dim3 block(8, 8, 1);
dim3 grid(width/ block.x, height / block.y, 1);
computeSingleRay << <grid, block >> >(dev_Tex);
cudaMemcpy(tex, dev_Tex, 3 * width * height * sizeof(char), cudaMemcpyDeviceToHost);
cudaFree(dev_Tex);
}
|
73594aac3aa02b4beb74a54a7a9187929e158750.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/CUDAGenerator.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
#include <hiprand/hiprand_kernel.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <THH/THHGeneral.h>
namespace at{
namespace native{
namespace {
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
// Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies.
const int UNROLL = 4;
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int VEC>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel_vec(at::cuda::detail::TensorInfo<scalar_t, IndexType> a,
at::cuda::detail::TensorInfo<scalar_t, IndexType> b,
at::cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
// make sure we don't break assumption that we can't have > 4 elements / thread
static_assert(VEC <= 4, "Value of VEC must be in [2, 4]");
using LoadT = memory::aligned_vector<scalar_t, VEC>;
using MaskLoadT = memory::aligned_vector<uint8_t, VEC>;
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
// Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time
for (IndexType linearIndex = idx * VEC;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x * VEC) {
// local storage
scalar_t src[VEC];
// We'll use this to actually cause vectorized loads later
LoadT *value = reinterpret_cast<LoadT*>(&src);
//hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
// Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4)
// sets of rand.
float4 rand = hiprand_uniform4(&state);
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
// Note: We explicitly check for is_contiguous() before launching the vectorized kernel
// and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other)
// ordering.
// Single vectorized load
*value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]);
scalar_t r[VEC];
uint8_t mask[VEC];
// Perform the actual computation
#pragma unroll
for (int ii = 0; ii < VEC; ii++) {
r[ii] = src[ii]*(&rand.x)[ii]*pinv;
mask[ii] = (uint8_t)(&rand.x)[ii];
}
// Vectorized writes for both mask & result
*(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]);
*(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]);
__syncthreads();
}
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a,
cuda::detail::TensorInfo<scalar_t, IndexType> b,
cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) *
blockDim.x * gridDim.x * UNROLL;
for (IndexType linearIndex = idx;
linearIndex < rounded_size;
linearIndex += gridDim.x * blockDim.x*UNROLL) {
//hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
float4 rand = hiprand_uniform4(&state);
scalar_t src[UNROLL];
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `a`
const IndexType aOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a);
src[ii] = a.data[aOffset];
}
}
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b);
b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv;
c.data[bOffset] = (uint8_t)(&rand.x)[ii];
}
}
__syncthreads();
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){
auto iter = at::TensorIterator();
iter.add_output(ret);
iter.add_input(src);
iter.add_input(mask);
iter.dont_compute_common_dtype();
iter.build();
at::native::gpu_kernel(
iter,
[=]GPU_LAMBDA(const scalar_t src_val, const uint8_t mask_val) -> scalar_t {
return (float)mask_val * src_val * scale;
});
}
template <typename scalar_t>
int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) {
int vec_size = 4;
// get the vector size
auto memory_format = self.suggest_memory_format();
if (!self.is_contiguous(memory_format) || !ret.is_contiguous(memory_format) || !mask.is_contiguous(memory_format)) {
vec_size = 1;
} else {
vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr());
}
// check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder.
bool can_vectorize = true;
do {
can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0;
if (!can_vectorize) vec_size /= 2;
} while (vec_size > 1 && !can_vectorize);
return can_vectorize ? vec_size : 1;
}
} //anonymous namespace
std::tuple<Tensor,Tensor>
fused_dropout_cuda(const Tensor& self, double p, Generator * gen_){
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
Tensor ret = at::empty_like(self, self.suggest_memory_format());
Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte), self.suggest_memory_format());
const int64_t nelem = self.numel();
//empty tensors should not get here, but just in case, avoid FPE
if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask);
const int64_t block_size = 256;
unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size;
dim3 dim_block(block_size);
dim3 grid((nelem + block_size -1)/block_size);
grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (cuda::detail::canUse32BitIndexMath(self)){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 4>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 2>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 4>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 2>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
}
THCudaCheck(hipGetLastError());
return std::tuple<Tensor,Tensor>(ret, mask);
}
Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){
Tensor ret = at::empty_like(self, self.suggest_memory_format());
TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "masked_scale", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(scale);
masked_scale_kernel<scalar_t>(ret, self, mask, pa);
});
return ret;
}
}
}
| 73594aac3aa02b4beb74a54a7a9187929e158750.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/CUDAGenerator.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
#include <curand_kernel.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <THC/THCGeneral.h>
namespace at{
namespace native{
namespace {
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
// Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies.
const int UNROLL = 4;
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int VEC>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel_vec(at::cuda::detail::TensorInfo<scalar_t, IndexType> a,
at::cuda::detail::TensorInfo<scalar_t, IndexType> b,
at::cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
// make sure we don't break assumption that we can't have > 4 elements / thread
static_assert(VEC <= 4, "Value of VEC must be in [2, 4]");
using LoadT = memory::aligned_vector<scalar_t, VEC>;
using MaskLoadT = memory::aligned_vector<uint8_t, VEC>;
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
// Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time
for (IndexType linearIndex = idx * VEC;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x * VEC) {
// local storage
scalar_t src[VEC];
// We'll use this to actually cause vectorized loads later
LoadT *value = reinterpret_cast<LoadT*>(&src);
//curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
// Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4)
// sets of rand.
float4 rand = curand_uniform4(&state);
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
// Note: We explicitly check for is_contiguous() before launching the vectorized kernel
// and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other)
// ordering.
// Single vectorized load
*value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]);
scalar_t r[VEC];
uint8_t mask[VEC];
// Perform the actual computation
#pragma unroll
for (int ii = 0; ii < VEC; ii++) {
r[ii] = src[ii]*(&rand.x)[ii]*pinv;
mask[ii] = (uint8_t)(&rand.x)[ii];
}
// Vectorized writes for both mask & result
*(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]);
*(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]);
__syncthreads();
}
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a,
cuda::detail::TensorInfo<scalar_t, IndexType> b,
cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) *
blockDim.x * gridDim.x * UNROLL;
for (IndexType linearIndex = idx;
linearIndex < rounded_size;
linearIndex += gridDim.x * blockDim.x*UNROLL) {
//curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
float4 rand = curand_uniform4(&state);
scalar_t src[UNROLL];
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `a`
const IndexType aOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a);
src[ii] = a.data[aOffset];
}
}
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b);
b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv;
c.data[bOffset] = (uint8_t)(&rand.x)[ii];
}
}
__syncthreads();
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){
auto iter = at::TensorIterator();
iter.add_output(ret);
iter.add_input(src);
iter.add_input(mask);
iter.dont_compute_common_dtype();
iter.build();
at::native::gpu_kernel(
iter,
[=]GPU_LAMBDA(const scalar_t src_val, const uint8_t mask_val) -> scalar_t {
return (float)mask_val * src_val * scale;
});
}
template <typename scalar_t>
int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) {
int vec_size = 4;
// get the vector size
auto memory_format = self.suggest_memory_format();
if (!self.is_contiguous(memory_format) || !ret.is_contiguous(memory_format) || !mask.is_contiguous(memory_format)) {
vec_size = 1;
} else {
vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr());
}
// check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder.
bool can_vectorize = true;
do {
can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0;
if (!can_vectorize) vec_size /= 2;
} while (vec_size > 1 && !can_vectorize);
return can_vectorize ? vec_size : 1;
}
} //anonymous namespace
std::tuple<Tensor,Tensor>
fused_dropout_cuda(const Tensor& self, double p, Generator * gen_){
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
Tensor ret = at::empty_like(self, self.suggest_memory_format());
Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte), self.suggest_memory_format());
const int64_t nelem = self.numel();
//empty tensors should not get here, but just in case, avoid FPE
if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask);
const int64_t block_size = 256;
unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size;
dim3 dim_block(block_size);
dim3 grid((nelem + block_size -1)/block_size);
grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (cuda::detail::canUse32BitIndexMath(self)){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 4><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 2><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 4><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 2><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
}
THCudaCheck(cudaGetLastError());
return std::tuple<Tensor,Tensor>(ret, mask);
}
Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){
Tensor ret = at::empty_like(self, self.suggest_memory_format());
TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "masked_scale", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(scale);
masked_scale_kernel<scalar_t>(ret, self, mask, pa);
});
return ret;
}
}
}
|
e4da0a50d8cdf5d5749a1184af6d7607b93a947b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
real4 center = 0.5f*(maxPos+minPos);
center.w = 0;
for (int i = base; i < last; i++) {
pos = posq[i];
real4 delta = posq[i]-center;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
center.w = max(center.w, delta.x*delta.x+delta.y*delta.y+delta.z*delta.z);
}
center.w = sqrt(center.w);
blockBoundingBox[index] = blockSize;
blockCenter[index] = center;
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList, bool forceRebuild) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = forceRebuild;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
interactionCount[1] = 0;
}
}
__device__ int saveSinglePairs(int x, int* atoms, int* flags, int length, unsigned int maxSinglePairs, unsigned int* singlePairCount, int2* singlePairs, int* sumBuffer, volatile int& pairStartIndex) {
// Record interactions that should be computed as single pairs rather than in blocks.
const int indexInWarp = threadIdx.x%32;
int sum = 0;
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
sum += (count <= MAX_BITS_FOR_PAIRS ? count : 0);
}
for (int i = 1; i < 32; i *= 2) {
int n = __shfl_up_sync(0xffffffff, sum, i);
if (indexInWarp >= i)
sum += n;
}
if (indexInWarp == 31)
pairStartIndex = atomicAdd(singlePairCount,(unsigned int) sum);
__syncwarp();
int prevSum = __shfl_up_sync(0xffffffff, sum, 1);
int pairIndex = pairStartIndex + (indexInWarp > 0 ? prevSum : 0);
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
if (count <= MAX_BITS_FOR_PAIRS && pairIndex+count < maxSinglePairs) {
int f = flags[i];
while (f != 0) {
singlePairs[pairIndex] = make_int2(atoms[i], x*TILE_SIZE+__ffs(f)-1);
f &= f-1;
pairIndex++;
}
}
}
// Compact the remaining interactions.
const int warpMask = (1<<indexInWarp)-1;
int numCompacted = 0;
for (int start = 0; start < length; start += 32) {
int i = start+indexInWarp;
int atom = atoms[i];
int flag = flags[i];
bool include = (i < length && __popc(flags[i]) > MAX_BITS_FOR_PAIRS);
int includeFlags = BALLOT(include);
if (include) {
int index = numCompacted+__popc(includeFlags&warpMask);
atoms[index] = atom;
flags[index] = flag;
}
numCompacted += __popc(includeFlags);
}
return numCompacted;
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ __launch_bounds__(GROUP_SIZE,1) void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms,
int2* __restrict__ singlePairs, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int maxSinglePairs,
unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int workgroupFlagsBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
__shared__ int worksgroupPairStartIndex[GROUP_SIZE/32];
int* sumBuffer = (int*) posBuffer; // Reuse the same buffer to save memory
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* flagsBuffer = workgroupFlagsBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
volatile int& pairStartIndex = worksgroupPairStartIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
bool forceInclude = false;
if (includeBlock2) {
real4 blockCenterY = sortedBlockCenter[block2];
real4 blockSizeY = sortedBlockBoundingBox[block2];
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < (PADDED_CUTOFF+blockCenterX.w+blockCenterY.w)*(PADDED_CUTOFF+blockCenterX.w+blockCenterY.w));
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
#ifdef TRICLINIC
// The calculation to find the nearest periodic copy is only guaranteed to work if the nearest copy is less than half a box width away.
// If there's any possibility we might have missed it, do a detailed check.
if (periodicBoxSize.z/2-blockSizeX.z-blockSizeY.z < PADDED_CUTOFF || periodicBoxSize.y/2-blockSizeX.y-blockSizeY.y < PADDED_CUTOFF)
includeBlock2 = forceInclude = true;
#endif
if (includeBlock2) {
int y = (int) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = BALLOT(includeBlock2);
int forceIncludeFlags = BALLOT(forceInclude);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
forceInclude = (forceIncludeFlags>>i) & 1;
int y = (int) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int atom2 = y*TILE_SIZE+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
real4 blockCenterY = sortedBlockCenter[block2Base+i];
real3 atomDelta = posBuffer[warpStart+indexInWarp]-trimTo3(blockCenterY);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(atomDelta)
#endif
int atomFlags = BALLOT(forceInclude || atomDelta.x*atomDelta.x+atomDelta.y*atomDelta.y+atomDelta.z*atomDelta.z < (PADDED_CUTOFF+blockCenterY.w)*(PADDED_CUTOFF+blockCenterY.w));
int interacts = 0;
if (atom2 < NUM_ATOMS && atomFlags != 0) {
int first = __ffs(atomFlags)-1;
int last = 32-__clz(atomFlags);
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
}
else {
#endif
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = BALLOT(interacts);
if (interacts) {
int index = neighborsInBuffer+__popc(includeAtomFlags&warpMask);
buffer[index] = atom2;
flagsBuffer[index] = interacts;
}
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
#if MAX_BITS_FOR_PAIRS > 0
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (tilesToStore > 0) {
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
if (indexInWarp+TILE_SIZE*tilesToStore < BUFFER_SIZE)
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
}
// If we have a partially filled buffer, store it to memory.
#if MAX_BITS_FOR_PAIRS > 0
if (neighborsInBuffer > 32)
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
| e4da0a50d8cdf5d5749a1184af6d7607b93a947b.cu | #define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
real4 center = 0.5f*(maxPos+minPos);
center.w = 0;
for (int i = base; i < last; i++) {
pos = posq[i];
real4 delta = posq[i]-center;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
center.w = max(center.w, delta.x*delta.x+delta.y*delta.y+delta.z*delta.z);
}
center.w = sqrt(center.w);
blockBoundingBox[index] = blockSize;
blockCenter[index] = center;
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList, bool forceRebuild) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = forceRebuild;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
interactionCount[1] = 0;
}
}
__device__ int saveSinglePairs(int x, int* atoms, int* flags, int length, unsigned int maxSinglePairs, unsigned int* singlePairCount, int2* singlePairs, int* sumBuffer, volatile int& pairStartIndex) {
// Record interactions that should be computed as single pairs rather than in blocks.
const int indexInWarp = threadIdx.x%32;
int sum = 0;
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
sum += (count <= MAX_BITS_FOR_PAIRS ? count : 0);
}
for (int i = 1; i < 32; i *= 2) {
int n = __shfl_up_sync(0xffffffff, sum, i);
if (indexInWarp >= i)
sum += n;
}
if (indexInWarp == 31)
pairStartIndex = atomicAdd(singlePairCount,(unsigned int) sum);
__syncwarp();
int prevSum = __shfl_up_sync(0xffffffff, sum, 1);
int pairIndex = pairStartIndex + (indexInWarp > 0 ? prevSum : 0);
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
if (count <= MAX_BITS_FOR_PAIRS && pairIndex+count < maxSinglePairs) {
int f = flags[i];
while (f != 0) {
singlePairs[pairIndex] = make_int2(atoms[i], x*TILE_SIZE+__ffs(f)-1);
f &= f-1;
pairIndex++;
}
}
}
// Compact the remaining interactions.
const int warpMask = (1<<indexInWarp)-1;
int numCompacted = 0;
for (int start = 0; start < length; start += 32) {
int i = start+indexInWarp;
int atom = atoms[i];
int flag = flags[i];
bool include = (i < length && __popc(flags[i]) > MAX_BITS_FOR_PAIRS);
int includeFlags = BALLOT(include);
if (include) {
int index = numCompacted+__popc(includeFlags&warpMask);
atoms[index] = atom;
flags[index] = flag;
}
numCompacted += __popc(includeFlags);
}
return numCompacted;
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ __launch_bounds__(GROUP_SIZE,1) void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms,
int2* __restrict__ singlePairs, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int maxSinglePairs,
unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int workgroupFlagsBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
__shared__ int worksgroupPairStartIndex[GROUP_SIZE/32];
int* sumBuffer = (int*) posBuffer; // Reuse the same buffer to save memory
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* flagsBuffer = workgroupFlagsBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
volatile int& pairStartIndex = worksgroupPairStartIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
bool forceInclude = false;
if (includeBlock2) {
real4 blockCenterY = sortedBlockCenter[block2];
real4 blockSizeY = sortedBlockBoundingBox[block2];
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < (PADDED_CUTOFF+blockCenterX.w+blockCenterY.w)*(PADDED_CUTOFF+blockCenterX.w+blockCenterY.w));
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
#ifdef TRICLINIC
// The calculation to find the nearest periodic copy is only guaranteed to work if the nearest copy is less than half a box width away.
// If there's any possibility we might have missed it, do a detailed check.
if (periodicBoxSize.z/2-blockSizeX.z-blockSizeY.z < PADDED_CUTOFF || periodicBoxSize.y/2-blockSizeX.y-blockSizeY.y < PADDED_CUTOFF)
includeBlock2 = forceInclude = true;
#endif
if (includeBlock2) {
int y = (int) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = BALLOT(includeBlock2);
int forceIncludeFlags = BALLOT(forceInclude);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
forceInclude = (forceIncludeFlags>>i) & 1;
int y = (int) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int atom2 = y*TILE_SIZE+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
real4 blockCenterY = sortedBlockCenter[block2Base+i];
real3 atomDelta = posBuffer[warpStart+indexInWarp]-trimTo3(blockCenterY);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(atomDelta)
#endif
int atomFlags = BALLOT(forceInclude || atomDelta.x*atomDelta.x+atomDelta.y*atomDelta.y+atomDelta.z*atomDelta.z < (PADDED_CUTOFF+blockCenterY.w)*(PADDED_CUTOFF+blockCenterY.w));
int interacts = 0;
if (atom2 < NUM_ATOMS && atomFlags != 0) {
int first = __ffs(atomFlags)-1;
int last = 32-__clz(atomFlags);
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
}
else {
#endif
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = BALLOT(interacts);
if (interacts) {
int index = neighborsInBuffer+__popc(includeAtomFlags&warpMask);
buffer[index] = atom2;
flagsBuffer[index] = interacts;
}
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
#if MAX_BITS_FOR_PAIRS > 0
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (tilesToStore > 0) {
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
if (indexInWarp+TILE_SIZE*tilesToStore < BUFFER_SIZE)
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
}
// If we have a partially filled buffer, store it to memory.
#if MAX_BITS_FOR_PAIRS > 0
if (neighborsInBuffer > 32)
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
|
05d7076f973fea1a7627ee38c1ab8042cbf12a2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu/kernel/primitives.h"
#include <algorithm>
#include <cstdlib>
#include <thread>
#include <vector>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "comm/pattern/ring_comm_pattern.h"
#include "gpu/common.h"
#include "test_cuda_utils.h"
#include "test_utils.h"
#include "utils.h"
namespace gccl {
namespace {
class TestPrimitives : public testing::Test {};
TEST_F(TestPrimitives, Copy128b) {
int record_size = 32;
int buff_size = 1024;
int n_peers = 3;
int n_int = record_size / sizeof(int);
std::vector<CommPatternInfo> infos(3);
std::vector<RingCommPatternInfo *> ring_infos;
for (auto &info : infos) {
ring_infos.push_back(info.GetRingCommPatternInfo());
}
std::vector<std::pair<int, int>> send_recv_size = {{3, 2}, {2, 3}, {2, 2}};
hipSetDevice(0);
GCCLMallocAndCopy(&ring_infos[0]->recv_ids, {1, -2});
GCCLMallocAndCopy(&ring_infos[1]->recv_ids, {0, -1, 1});
GCCLMallocAndCopy(&ring_infos[2]->recv_ids, {2, -2});
GCCLMallocAndCopy(&ring_infos[0]->send_ids, {0, 2, -1});
GCCLMallocAndCopy(&ring_infos[1]->send_ids, {2, -2});
GCCLMallocAndCopy(&ring_infos[2]->send_ids, {0, -1});
std::vector<int *> inputs(n_peers);
GCCLMallocAndCopy(&inputs[0], Repeat(std::vector<int>({5, -1, 9}), n_int));
GCCLMallocAndCopy(&inputs[1], Repeat(std::vector<int>({-1, -1, 8}), n_int));
GCCLMallocAndCopy(&inputs[2], Repeat(std::vector<int>({7, 1, -1}), n_int));
GCCLMallocAndCopy((int **)&ring_infos[0]->dev_extra_mem,
Repeat(std::vector<int>({3, -1}), n_int));
GCCLMallocAndCopy((int **)&ring_infos[1]->dev_extra_mem,
Repeat(std::vector<int>({-1, 4}), n_int));
GCCLMallocAndCopy((int **)&ring_infos[2]->dev_extra_mem,
Repeat(std::vector<int>({0, -1}), n_int));
int recv_dev_mem_size = offsetof(RecvDevMem, buff) + buff_size;
for (int i = 0; i < n_peers; ++i) {
GCCLCudaMalloc((char **)&ring_infos[i]->forward_conn.recv_dev_mem,
recv_dev_mem_size);
GCCLCudaMalloc(&ring_infos[i]->forward_conn.send_dev_mem, 1);
}
for (int i = 0; i < n_peers; ++i) {
auto &next_info = ring_infos[(i + 1) % n_peers];
auto &prev_info = ring_infos[(i + n_peers - 1) % n_peers];
auto &my_info = ring_infos[i];
my_info->forward_conn.conn_info.my_stage_ready =
&my_info->forward_conn.recv_dev_mem->stage_ready;
my_info->forward_conn.conn_info.my_substage_ready =
&my_info->forward_conn.recv_dev_mem->substage_ready;
my_info->forward_conn.conn_info.my_substage_done =
&my_info->forward_conn.send_dev_mem->substage_done;
my_info->forward_conn.conn_info.next_recv_buff =
&next_info->forward_conn.recv_dev_mem->buff;
my_info->forward_conn.conn_info.next_substage_ready =
&next_info->forward_conn.recv_dev_mem->substage_ready;
my_info->forward_conn.conn_info.prev_substage_done =
&prev_info->forward_conn.send_dev_mem->substage_done;
}
std::vector<std::vector<int>> expected_inputs = {
Repeat(std::vector<int>({5, 7, 9}), n_int),
Repeat(std::vector<int>({5, 3, 8}), n_int),
Repeat(std::vector<int>({7, 1, 8}), n_int)};
std::vector<std::vector<int>> expected_extra_buff = {
Repeat(std::vector<int>({3, 0}), n_int),
Repeat(std::vector<int>({9, 4}), n_int),
Repeat(std::vector<int>({0, 4}), n_int)};
std::vector<std::thread> ths;
for (int i = 0; i < n_peers; ++i) {
ths.emplace_back([&ring_infos, i, record_size, buff_size, &send_recv_size,
&inputs, &expected_inputs, &expected_extra_buff]() {
CopyArgs args(-1, -1,
ring_infos[i]->forward_conn.conn_info.my_substage_ready,
ring_infos[i]->forward_conn.conn_info.my_substage_done,
ring_infos[i]->forward_conn.conn_info.next_substage_ready,
ring_infos[i]->forward_conn.conn_info.prev_substage_done);
args.n_128b = record_size / PACK_SIZE;
args.buff_n_128b = buff_size / PACK_SIZE;
args.input = (Pack128 *)inputs[i];
args.recv_buff =
(Pack128 *)&ring_infos[i]->forward_conn.recv_dev_mem->buff;
args.extra_buff = (Pack128 *)ring_infos[i]->dev_extra_mem;
args.next_recv_buff =
(Pack128 *)ring_infos[i]->forward_conn.conn_info.next_recv_buff;
args.send_ids = ring_infos[i]->send_ids;
args.send_size = send_recv_size[i].first;
args.recv_ids = ring_infos[i]->recv_ids;
args.recv_size = send_recv_size[i].second;
args.max_comm_size = 3;
args.extra_buff_size = 2;
int n_threads = 32;
void *kernel_args[] = {&args};
hipStream_t stream;
hipStreamCreate(&stream);
cudaLaunchKernel((void *)Copy128bGlobal, dim3(1), dim3(n_threads),
kernel_args, 0, stream);
hipStreamSynchronize(stream);
EXPECT_GPU_CPU_VEC_EQ((int *)args.input, expected_inputs[i]);
EXPECT_GPU_CPU_VEC_EQ((int *)args.extra_buff, expected_extra_buff[i]);
});
}
for (auto &t : ths) {
t.join();
}
}
int *CreateRandomArray(int size, int range) {
int *ret = new int[size];
for (int i = 0; i < size; ++i) {
ret[i] = rand() % range;
}
return ret;
}
int *CreateUniquedArray(int size, int range, std::set<int> *st = nullptr) {
int *ret = new int[size];
std::set<int> exists;
for (int i = 0; i < size; ++i) {
while (1) {
int t = rand() % range;
if (st != nullptr && st->count(t) > 0) continue;
if (exists.count(t) > 0) continue;
exists.insert(t);
ret[i] = t;
break;
}
}
std::sort(ret, ret + size);
return ret;
}
void BuildLargeCommPatternInfo(
std::vector<CommPatternInfo> *infos,
std::vector<CommPatternInfo> *cpu_infos, std::vector<int *> *inputs,
std::vector<int *> *cpu_inputs, int n_peers,
const std::vector<std::pair<int, int>> &send_recv_size, int input_size,
int extra_buff_size, int feat_size, int buff_size) {
std::vector<RingCommPatternInfo *> ring_infos;
for (auto &info : *cpu_infos) {
ring_infos.push_back(info.GetRingCommPatternInfo());
}
for (int i = 0; i < n_peers; ++i) {
cpu_inputs->at(i) = CreateRandomArray(input_size * feat_size, 100);
ring_infos[i]->dev_extra_mem =
CreateRandomArray(extra_buff_size * feat_size, 100);
int send_size = send_recv_size[i].first;
int recv_size = send_recv_size[i].second;
ring_infos[i]->send_ids =
CreateUniquedArray(send_size, input_size + extra_buff_size);
std::set<int> id_set;
for (int j = 0; j < send_size; ++j) {
auto &id = ring_infos[i]->send_ids[j];
id_set.insert(id);
if (id >= input_size) {
id = ENCODE(id - input_size);
}
}
ring_infos[i]->recv_ids =
CreateUniquedArray(recv_size, input_size + extra_buff_size, &id_set);
for (int j = 0; j < recv_size; ++j) {
auto &id = ring_infos[i]->recv_ids[j];
if (id >= input_size) {
id = ENCODE(id - input_size);
int t = ENCODE(id);
for (int k = 0; k < feat_size; ++k) {
((int *)ring_infos[i]->dev_extra_mem)[t * feat_size + k] = -1;
}
} else {
for (int k = 0; k < feat_size; ++k) {
cpu_inputs->at(i)[id * feat_size + k] = -1;
}
}
}
GCCLMallocAndCopy(&inputs->at(i), cpu_inputs->at(i),
input_size * feat_size);
GCCLMallocAndCopy(
(int **)&infos->at(i).GetRingCommPatternInfo()->dev_extra_mem,
(int *)ring_infos[i]->dev_extra_mem, input_size * feat_size);
GCCLMallocAndCopy(&infos->at(i).GetRingCommPatternInfo()->send_ids,
ring_infos[i]->send_ids, send_size);
GCCLMallocAndCopy(&infos->at(i).GetRingCommPatternInfo()->recv_ids,
ring_infos[i]->recv_ids, recv_size);
int dev_mem_size = offsetof(RecvDevMem, buff) + buff_size;
GCCLCudaMalloc((char **)&infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.recv_dev_mem,
dev_mem_size);
GCCLCudaMalloc(
&infos->at(i).GetRingCommPatternInfo()->forward_conn.send_dev_mem, 1);
}
}
void ConnectOnRing(std::vector<CommPatternInfo> *infos, int n_peers) {
for (int i = 0; i < n_peers; ++i) {
int next = (i + 1) % n_peers;
int prev = (i + n_peers - 1) % n_peers;
SendDevMem *prev_send_mem, *my_send_mem;
RecvDevMem *next_recv_mem, *my_recv_mem;
next_recv_mem =
infos->at(next).GetRingCommPatternInfo()->forward_conn.recv_dev_mem;
my_send_mem =
infos->at(i).GetRingCommPatternInfo()->forward_conn.send_dev_mem;
my_recv_mem =
infos->at(i).GetRingCommPatternInfo()->forward_conn.recv_dev_mem;
prev_send_mem =
infos->at(prev).GetRingCommPatternInfo()->forward_conn.send_dev_mem;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.my_substage_ready =
&my_recv_mem->substage_ready;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.my_substage_done = &my_send_mem->substage_done;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.next_substage_ready =
&next_recv_mem->substage_ready;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.prev_substage_done =
&prev_send_mem->substage_done;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.next_recv_buff = &next_recv_mem->buff;
}
}
void GetExpectedResult(std::vector<std::vector<int>> *exp_inputs,
std::vector<std::vector<int>> *exp_extra_buff,
const std::vector<CommPatternInfo> &cpu_infos,
const std::vector<int *> &cpu_inputs, int n_peers,
const std::vector<std::pair<int, int>> &send_recv_size,
int input_size, int extra_buff_size, int feat_size) {
for (int i = 0; i < n_peers; ++i) {
int prev = (i + n_peers - 1) % n_peers;
exp_inputs->push_back(std::vector<int>(
cpu_inputs[i], cpu_inputs[i] + input_size * feat_size));
exp_extra_buff->push_back(std::vector<int>(
(int *)cpu_infos[i].GetRingCommPatternInfo()->dev_extra_mem,
(int *)cpu_infos[i].GetRingCommPatternInfo()->dev_extra_mem +
extra_buff_size * feat_size));
int prev_send_size = send_recv_size[prev].first;
int recv_size = send_recv_size[i].second;
CHECK(prev_send_size == recv_size);
for (int j = 0; j < recv_size; ++j) {
int send_id = cpu_infos[prev].GetRingCommPatternInfo()->send_ids[j];
int recv_id = cpu_infos[i].GetRingCommPatternInfo()->recv_ids[j];
int *send_ptr, *val_ptr;
if (send_id < 0) {
send_id = ENCODE(send_id);
send_ptr =
(int *)cpu_infos[prev].GetRingCommPatternInfo()->dev_extra_mem +
send_id * feat_size;
} else {
send_ptr = cpu_inputs[prev] + send_id * feat_size;
}
if (recv_id < 0) {
recv_id = ENCODE(recv_id);
val_ptr = exp_extra_buff->at(i).data() + recv_id * feat_size;
} else {
val_ptr = exp_inputs->at(i).data() + recv_id * feat_size;
}
for (int k = 0; k < feat_size; ++k) {
*(val_ptr + k) = *(send_ptr + k);
}
}
}
}
TEST_F(TestPrimitives, Copy128bLarge) {
int buff_size = 128; // bytes
int n_peers = 3;
int n_threads = 4;
int feat_size = 4;
int record_size = feat_size * sizeof(int);
int input_size = 1024, extra_buff_size = 1024; // n elements
std::vector<CommPatternInfo> infos(3), cpu_infos(3);
std::vector<int *> inputs(n_peers), cpu_inputs(n_peers);
std::vector<std::pair<int, int>> send_recv_size = {
{1280, 256}, {16, 1280}, {256, 16}};
// std::vector<std::pair<int, int>> send_recv_size = {
// {4, 2}, {2, 4}, {2, 2}};
hipSetDevice(0);
// Element type is int
BuildLargeCommPatternInfo(&infos, &cpu_infos, &inputs, &cpu_inputs, n_peers,
send_recv_size, input_size, extra_buff_size,
feat_size, buff_size);
ConnectOnRing(&infos, n_peers);
std::vector<std::vector<int>> exp_inputs, exp_extra_buff;
GetExpectedResult(&exp_inputs, &exp_extra_buff, cpu_infos, cpu_inputs,
n_peers, send_recv_size, input_size, extra_buff_size,
feat_size);
// for(int i = 0; i < n_peers; ++i) {
// printf("Send id for %d: %s\n", i,
// VecToString(std::vector<int>(cpu_infos[i].send_ids, cpu_infos[i].send_ids +
// send_recv_size[i].first)).c_str());
// printf("Recv id for %d: %s\n", i,
// VecToString(std::vector<int>(cpu_infos[i].recv_ids, cpu_infos[i].recv_ids +
// send_recv_size[i].second)).c_str());
// printf("Expected input for %d: %s\n", i,
// VecToString(exp_inputs[i]).c_str());
// printf("Expected extra buff for %d: %s\n", i,
// VecToString(exp_extra_buff[i]).c_str());
//}
std::vector<RingCommPatternInfo *> ring_infos;
for (auto &info : infos) {
ring_infos.push_back(info.GetRingCommPatternInfo());
}
std::vector<std::thread> ths;
for (int i = 0; i < n_peers; ++i) {
ths.emplace_back([&ring_infos, i, record_size, buff_size, &send_recv_size,
&inputs, &exp_inputs, n_threads, &exp_extra_buff]() {
CopyArgs args(-1, -1,
ring_infos[i]->forward_conn.conn_info.my_substage_ready,
ring_infos[i]->forward_conn.conn_info.my_substage_done,
ring_infos[i]->forward_conn.conn_info.next_substage_ready,
ring_infos[i]->forward_conn.conn_info.prev_substage_done);
hipSetDevice(0);
args.n_128b = record_size / PACK_SIZE;
args.buff_n_128b = buff_size / PACK_SIZE;
args.input = (Pack128 *)inputs[i];
args.recv_buff =
(Pack128 *)&ring_infos[i]->forward_conn.recv_dev_mem->buff;
args.extra_buff = (Pack128 *)ring_infos[i]->dev_extra_mem;
args.next_recv_buff =
(Pack128 *)ring_infos[i]->forward_conn.conn_info.next_recv_buff;
args.send_ids = ring_infos[i]->send_ids;
args.send_size = send_recv_size[i].first;
args.recv_ids = ring_infos[i]->recv_ids;
args.recv_size = send_recv_size[i].second;
args.extra_buff_size = exp_extra_buff[i].size();
args.max_comm_size = 1280;
void *kernel_args[] = {&args};
hipStream_t stream;
hipStreamCreate(&stream);
cudaLaunchKernel((void *)Copy128bGlobal, dim3(1), dim3(n_threads),
kernel_args, 0, stream);
hipStreamSynchronize(stream);
// LOG(INFO) << "Result input for " << i << " is:" <<
// CudaVecToString((int*)args.input, exp_inputs[i].size());
// LOG(INFO) << "Result buff for " << i << " is:" <<
// CudaVecToString((int*)args.extra_buff, exp_extra_buff[i].size());
EXPECT_GPU_CPU_VEC_EQ((int *)args.input, exp_inputs[i]);
EXPECT_GPU_CPU_VEC_EQ((int *)args.extra_buff, exp_extra_buff[i]);
});
}
for (auto &t : ths) {
t.join();
}
}
} // namespace
} // namespace gccl
| 05d7076f973fea1a7627ee38c1ab8042cbf12a2d.cu | #include "gpu/kernel/primitives.h"
#include <algorithm>
#include <cstdlib>
#include <thread>
#include <vector>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "comm/pattern/ring_comm_pattern.h"
#include "gpu/common.h"
#include "test_cuda_utils.h"
#include "test_utils.h"
#include "utils.h"
namespace gccl {
namespace {
class TestPrimitives : public testing::Test {};
TEST_F(TestPrimitives, Copy128b) {
int record_size = 32;
int buff_size = 1024;
int n_peers = 3;
int n_int = record_size / sizeof(int);
std::vector<CommPatternInfo> infos(3);
std::vector<RingCommPatternInfo *> ring_infos;
for (auto &info : infos) {
ring_infos.push_back(info.GetRingCommPatternInfo());
}
std::vector<std::pair<int, int>> send_recv_size = {{3, 2}, {2, 3}, {2, 2}};
cudaSetDevice(0);
GCCLMallocAndCopy(&ring_infos[0]->recv_ids, {1, -2});
GCCLMallocAndCopy(&ring_infos[1]->recv_ids, {0, -1, 1});
GCCLMallocAndCopy(&ring_infos[2]->recv_ids, {2, -2});
GCCLMallocAndCopy(&ring_infos[0]->send_ids, {0, 2, -1});
GCCLMallocAndCopy(&ring_infos[1]->send_ids, {2, -2});
GCCLMallocAndCopy(&ring_infos[2]->send_ids, {0, -1});
std::vector<int *> inputs(n_peers);
GCCLMallocAndCopy(&inputs[0], Repeat(std::vector<int>({5, -1, 9}), n_int));
GCCLMallocAndCopy(&inputs[1], Repeat(std::vector<int>({-1, -1, 8}), n_int));
GCCLMallocAndCopy(&inputs[2], Repeat(std::vector<int>({7, 1, -1}), n_int));
GCCLMallocAndCopy((int **)&ring_infos[0]->dev_extra_mem,
Repeat(std::vector<int>({3, -1}), n_int));
GCCLMallocAndCopy((int **)&ring_infos[1]->dev_extra_mem,
Repeat(std::vector<int>({-1, 4}), n_int));
GCCLMallocAndCopy((int **)&ring_infos[2]->dev_extra_mem,
Repeat(std::vector<int>({0, -1}), n_int));
int recv_dev_mem_size = offsetof(RecvDevMem, buff) + buff_size;
for (int i = 0; i < n_peers; ++i) {
GCCLCudaMalloc((char **)&ring_infos[i]->forward_conn.recv_dev_mem,
recv_dev_mem_size);
GCCLCudaMalloc(&ring_infos[i]->forward_conn.send_dev_mem, 1);
}
for (int i = 0; i < n_peers; ++i) {
auto &next_info = ring_infos[(i + 1) % n_peers];
auto &prev_info = ring_infos[(i + n_peers - 1) % n_peers];
auto &my_info = ring_infos[i];
my_info->forward_conn.conn_info.my_stage_ready =
&my_info->forward_conn.recv_dev_mem->stage_ready;
my_info->forward_conn.conn_info.my_substage_ready =
&my_info->forward_conn.recv_dev_mem->substage_ready;
my_info->forward_conn.conn_info.my_substage_done =
&my_info->forward_conn.send_dev_mem->substage_done;
my_info->forward_conn.conn_info.next_recv_buff =
&next_info->forward_conn.recv_dev_mem->buff;
my_info->forward_conn.conn_info.next_substage_ready =
&next_info->forward_conn.recv_dev_mem->substage_ready;
my_info->forward_conn.conn_info.prev_substage_done =
&prev_info->forward_conn.send_dev_mem->substage_done;
}
std::vector<std::vector<int>> expected_inputs = {
Repeat(std::vector<int>({5, 7, 9}), n_int),
Repeat(std::vector<int>({5, 3, 8}), n_int),
Repeat(std::vector<int>({7, 1, 8}), n_int)};
std::vector<std::vector<int>> expected_extra_buff = {
Repeat(std::vector<int>({3, 0}), n_int),
Repeat(std::vector<int>({9, 4}), n_int),
Repeat(std::vector<int>({0, 4}), n_int)};
std::vector<std::thread> ths;
for (int i = 0; i < n_peers; ++i) {
ths.emplace_back([&ring_infos, i, record_size, buff_size, &send_recv_size,
&inputs, &expected_inputs, &expected_extra_buff]() {
CopyArgs args(-1, -1,
ring_infos[i]->forward_conn.conn_info.my_substage_ready,
ring_infos[i]->forward_conn.conn_info.my_substage_done,
ring_infos[i]->forward_conn.conn_info.next_substage_ready,
ring_infos[i]->forward_conn.conn_info.prev_substage_done);
args.n_128b = record_size / PACK_SIZE;
args.buff_n_128b = buff_size / PACK_SIZE;
args.input = (Pack128 *)inputs[i];
args.recv_buff =
(Pack128 *)&ring_infos[i]->forward_conn.recv_dev_mem->buff;
args.extra_buff = (Pack128 *)ring_infos[i]->dev_extra_mem;
args.next_recv_buff =
(Pack128 *)ring_infos[i]->forward_conn.conn_info.next_recv_buff;
args.send_ids = ring_infos[i]->send_ids;
args.send_size = send_recv_size[i].first;
args.recv_ids = ring_infos[i]->recv_ids;
args.recv_size = send_recv_size[i].second;
args.max_comm_size = 3;
args.extra_buff_size = 2;
int n_threads = 32;
void *kernel_args[] = {&args};
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaLaunchKernel((void *)Copy128bGlobal, dim3(1), dim3(n_threads),
kernel_args, 0, stream);
cudaStreamSynchronize(stream);
EXPECT_GPU_CPU_VEC_EQ((int *)args.input, expected_inputs[i]);
EXPECT_GPU_CPU_VEC_EQ((int *)args.extra_buff, expected_extra_buff[i]);
});
}
for (auto &t : ths) {
t.join();
}
}
int *CreateRandomArray(int size, int range) {
int *ret = new int[size];
for (int i = 0; i < size; ++i) {
ret[i] = rand() % range;
}
return ret;
}
int *CreateUniquedArray(int size, int range, std::set<int> *st = nullptr) {
int *ret = new int[size];
std::set<int> exists;
for (int i = 0; i < size; ++i) {
while (1) {
int t = rand() % range;
if (st != nullptr && st->count(t) > 0) continue;
if (exists.count(t) > 0) continue;
exists.insert(t);
ret[i] = t;
break;
}
}
std::sort(ret, ret + size);
return ret;
}
void BuildLargeCommPatternInfo(
std::vector<CommPatternInfo> *infos,
std::vector<CommPatternInfo> *cpu_infos, std::vector<int *> *inputs,
std::vector<int *> *cpu_inputs, int n_peers,
const std::vector<std::pair<int, int>> &send_recv_size, int input_size,
int extra_buff_size, int feat_size, int buff_size) {
std::vector<RingCommPatternInfo *> ring_infos;
for (auto &info : *cpu_infos) {
ring_infos.push_back(info.GetRingCommPatternInfo());
}
for (int i = 0; i < n_peers; ++i) {
cpu_inputs->at(i) = CreateRandomArray(input_size * feat_size, 100);
ring_infos[i]->dev_extra_mem =
CreateRandomArray(extra_buff_size * feat_size, 100);
int send_size = send_recv_size[i].first;
int recv_size = send_recv_size[i].second;
ring_infos[i]->send_ids =
CreateUniquedArray(send_size, input_size + extra_buff_size);
std::set<int> id_set;
for (int j = 0; j < send_size; ++j) {
auto &id = ring_infos[i]->send_ids[j];
id_set.insert(id);
if (id >= input_size) {
id = ENCODE(id - input_size);
}
}
ring_infos[i]->recv_ids =
CreateUniquedArray(recv_size, input_size + extra_buff_size, &id_set);
for (int j = 0; j < recv_size; ++j) {
auto &id = ring_infos[i]->recv_ids[j];
if (id >= input_size) {
id = ENCODE(id - input_size);
int t = ENCODE(id);
for (int k = 0; k < feat_size; ++k) {
((int *)ring_infos[i]->dev_extra_mem)[t * feat_size + k] = -1;
}
} else {
for (int k = 0; k < feat_size; ++k) {
cpu_inputs->at(i)[id * feat_size + k] = -1;
}
}
}
GCCLMallocAndCopy(&inputs->at(i), cpu_inputs->at(i),
input_size * feat_size);
GCCLMallocAndCopy(
(int **)&infos->at(i).GetRingCommPatternInfo()->dev_extra_mem,
(int *)ring_infos[i]->dev_extra_mem, input_size * feat_size);
GCCLMallocAndCopy(&infos->at(i).GetRingCommPatternInfo()->send_ids,
ring_infos[i]->send_ids, send_size);
GCCLMallocAndCopy(&infos->at(i).GetRingCommPatternInfo()->recv_ids,
ring_infos[i]->recv_ids, recv_size);
int dev_mem_size = offsetof(RecvDevMem, buff) + buff_size;
GCCLCudaMalloc((char **)&infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.recv_dev_mem,
dev_mem_size);
GCCLCudaMalloc(
&infos->at(i).GetRingCommPatternInfo()->forward_conn.send_dev_mem, 1);
}
}
void ConnectOnRing(std::vector<CommPatternInfo> *infos, int n_peers) {
for (int i = 0; i < n_peers; ++i) {
int next = (i + 1) % n_peers;
int prev = (i + n_peers - 1) % n_peers;
SendDevMem *prev_send_mem, *my_send_mem;
RecvDevMem *next_recv_mem, *my_recv_mem;
next_recv_mem =
infos->at(next).GetRingCommPatternInfo()->forward_conn.recv_dev_mem;
my_send_mem =
infos->at(i).GetRingCommPatternInfo()->forward_conn.send_dev_mem;
my_recv_mem =
infos->at(i).GetRingCommPatternInfo()->forward_conn.recv_dev_mem;
prev_send_mem =
infos->at(prev).GetRingCommPatternInfo()->forward_conn.send_dev_mem;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.my_substage_ready =
&my_recv_mem->substage_ready;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.my_substage_done = &my_send_mem->substage_done;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.next_substage_ready =
&next_recv_mem->substage_ready;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.prev_substage_done =
&prev_send_mem->substage_done;
infos->at(i)
.GetRingCommPatternInfo()
->forward_conn.conn_info.next_recv_buff = &next_recv_mem->buff;
}
}
void GetExpectedResult(std::vector<std::vector<int>> *exp_inputs,
std::vector<std::vector<int>> *exp_extra_buff,
const std::vector<CommPatternInfo> &cpu_infos,
const std::vector<int *> &cpu_inputs, int n_peers,
const std::vector<std::pair<int, int>> &send_recv_size,
int input_size, int extra_buff_size, int feat_size) {
for (int i = 0; i < n_peers; ++i) {
int prev = (i + n_peers - 1) % n_peers;
exp_inputs->push_back(std::vector<int>(
cpu_inputs[i], cpu_inputs[i] + input_size * feat_size));
exp_extra_buff->push_back(std::vector<int>(
(int *)cpu_infos[i].GetRingCommPatternInfo()->dev_extra_mem,
(int *)cpu_infos[i].GetRingCommPatternInfo()->dev_extra_mem +
extra_buff_size * feat_size));
int prev_send_size = send_recv_size[prev].first;
int recv_size = send_recv_size[i].second;
CHECK(prev_send_size == recv_size);
for (int j = 0; j < recv_size; ++j) {
int send_id = cpu_infos[prev].GetRingCommPatternInfo()->send_ids[j];
int recv_id = cpu_infos[i].GetRingCommPatternInfo()->recv_ids[j];
int *send_ptr, *val_ptr;
if (send_id < 0) {
send_id = ENCODE(send_id);
send_ptr =
(int *)cpu_infos[prev].GetRingCommPatternInfo()->dev_extra_mem +
send_id * feat_size;
} else {
send_ptr = cpu_inputs[prev] + send_id * feat_size;
}
if (recv_id < 0) {
recv_id = ENCODE(recv_id);
val_ptr = exp_extra_buff->at(i).data() + recv_id * feat_size;
} else {
val_ptr = exp_inputs->at(i).data() + recv_id * feat_size;
}
for (int k = 0; k < feat_size; ++k) {
*(val_ptr + k) = *(send_ptr + k);
}
}
}
}
TEST_F(TestPrimitives, Copy128bLarge) {
int buff_size = 128; // bytes
int n_peers = 3;
int n_threads = 4;
int feat_size = 4;
int record_size = feat_size * sizeof(int);
int input_size = 1024, extra_buff_size = 1024; // n elements
std::vector<CommPatternInfo> infos(3), cpu_infos(3);
std::vector<int *> inputs(n_peers), cpu_inputs(n_peers);
std::vector<std::pair<int, int>> send_recv_size = {
{1280, 256}, {16, 1280}, {256, 16}};
// std::vector<std::pair<int, int>> send_recv_size = {
// {4, 2}, {2, 4}, {2, 2}};
cudaSetDevice(0);
// Element type is int
BuildLargeCommPatternInfo(&infos, &cpu_infos, &inputs, &cpu_inputs, n_peers,
send_recv_size, input_size, extra_buff_size,
feat_size, buff_size);
ConnectOnRing(&infos, n_peers);
std::vector<std::vector<int>> exp_inputs, exp_extra_buff;
GetExpectedResult(&exp_inputs, &exp_extra_buff, cpu_infos, cpu_inputs,
n_peers, send_recv_size, input_size, extra_buff_size,
feat_size);
// for(int i = 0; i < n_peers; ++i) {
// printf("Send id for %d: %s\n", i,
// VecToString(std::vector<int>(cpu_infos[i].send_ids, cpu_infos[i].send_ids +
// send_recv_size[i].first)).c_str());
// printf("Recv id for %d: %s\n", i,
// VecToString(std::vector<int>(cpu_infos[i].recv_ids, cpu_infos[i].recv_ids +
// send_recv_size[i].second)).c_str());
// printf("Expected input for %d: %s\n", i,
// VecToString(exp_inputs[i]).c_str());
// printf("Expected extra buff for %d: %s\n", i,
// VecToString(exp_extra_buff[i]).c_str());
//}
std::vector<RingCommPatternInfo *> ring_infos;
for (auto &info : infos) {
ring_infos.push_back(info.GetRingCommPatternInfo());
}
std::vector<std::thread> ths;
for (int i = 0; i < n_peers; ++i) {
ths.emplace_back([&ring_infos, i, record_size, buff_size, &send_recv_size,
&inputs, &exp_inputs, n_threads, &exp_extra_buff]() {
CopyArgs args(-1, -1,
ring_infos[i]->forward_conn.conn_info.my_substage_ready,
ring_infos[i]->forward_conn.conn_info.my_substage_done,
ring_infos[i]->forward_conn.conn_info.next_substage_ready,
ring_infos[i]->forward_conn.conn_info.prev_substage_done);
cudaSetDevice(0);
args.n_128b = record_size / PACK_SIZE;
args.buff_n_128b = buff_size / PACK_SIZE;
args.input = (Pack128 *)inputs[i];
args.recv_buff =
(Pack128 *)&ring_infos[i]->forward_conn.recv_dev_mem->buff;
args.extra_buff = (Pack128 *)ring_infos[i]->dev_extra_mem;
args.next_recv_buff =
(Pack128 *)ring_infos[i]->forward_conn.conn_info.next_recv_buff;
args.send_ids = ring_infos[i]->send_ids;
args.send_size = send_recv_size[i].first;
args.recv_ids = ring_infos[i]->recv_ids;
args.recv_size = send_recv_size[i].second;
args.extra_buff_size = exp_extra_buff[i].size();
args.max_comm_size = 1280;
void *kernel_args[] = {&args};
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaLaunchKernel((void *)Copy128bGlobal, dim3(1), dim3(n_threads),
kernel_args, 0, stream);
cudaStreamSynchronize(stream);
// LOG(INFO) << "Result input for " << i << " is:" <<
// CudaVecToString((int*)args.input, exp_inputs[i].size());
// LOG(INFO) << "Result buff for " << i << " is:" <<
// CudaVecToString((int*)args.extra_buff, exp_extra_buff[i].size());
EXPECT_GPU_CPU_VEC_EQ((int *)args.input, exp_inputs[i]);
EXPECT_GPU_CPU_VEC_EQ((int *)args.extra_buff, exp_extra_buff[i]);
});
}
for (auto &t : ths) {
t.join();
}
}
} // namespace
} // namespace gccl
|
6af79906905acfae0ebf59537ffaaf2dab7e775c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "spaceFilterUpdate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
const float *U = NULL;
hipMalloc(&U, XSIZE*YSIZE);
const bool *UtU = NULL;
hipMalloc(&UtU, XSIZE*YSIZE);
const int *iC = NULL;
hipMalloc(&iC, XSIZE*YSIZE);
const int *iW = NULL;
hipMalloc(&iW, XSIZE*YSIZE);
float *dprod = NULL;
hipMalloc(&dprod, XSIZE*YSIZE);
const int *st = NULL;
hipMalloc(&st, XSIZE*YSIZE);
const int *id = NULL;
hipMalloc(&id, XSIZE*YSIZE);
const int *counter = NULL;
hipMalloc(&counter, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
spaceFilterUpdate), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,data,U,UtU,iC,iW,dprod,st,id,counter);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
spaceFilterUpdate), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,data,U,UtU,iC,iW,dprod,st,id,counter);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
spaceFilterUpdate), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,data,U,UtU,iC,iW,dprod,st,id,counter);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6af79906905acfae0ebf59537ffaaf2dab7e775c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "spaceFilterUpdate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
const float *U = NULL;
cudaMalloc(&U, XSIZE*YSIZE);
const bool *UtU = NULL;
cudaMalloc(&UtU, XSIZE*YSIZE);
const int *iC = NULL;
cudaMalloc(&iC, XSIZE*YSIZE);
const int *iW = NULL;
cudaMalloc(&iW, XSIZE*YSIZE);
float *dprod = NULL;
cudaMalloc(&dprod, XSIZE*YSIZE);
const int *st = NULL;
cudaMalloc(&st, XSIZE*YSIZE);
const int *id = NULL;
cudaMalloc(&id, XSIZE*YSIZE);
const int *counter = NULL;
cudaMalloc(&counter, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
spaceFilterUpdate<<<gridBlock,threadBlock>>>(Params,data,U,UtU,iC,iW,dprod,st,id,counter);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
spaceFilterUpdate<<<gridBlock,threadBlock>>>(Params,data,U,UtU,iC,iW,dprod,st,id,counter);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
spaceFilterUpdate<<<gridBlock,threadBlock>>>(Params,data,U,UtU,iC,iW,dprod,st,id,counter);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2532b975ee303ee65ec9b4c2c19c21f8c815445d.hip | // !!! This is a file automatically generated by hipify!!!
#include "lab3.h"
#include "hip/hip_runtime.h"
#include <cstdio>
__device__ __host__ int CeilDiv(int a, int b) { return (a - 1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (0 <= yt && 0 <= xt && yt < ht && xt < wt && mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__global__ void init(
const float *background,
const float *target,
const float *mask,
float *fixed,
const int bWidth, const int bHeight,
const int tWidth, const int tHeight,
const int Oy, const int Ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int Ct = tWidth*yt + xt;
if (yt >= 0 && xt >= 0 && yt < tHeight && xt < tWidth) {
const int yb = Oy + yt, xb = Ox + xt;
const int Cb = bWidth*yb + xb;
if (0 <= yb && yb < bHeight && 0 <= xb && xb < bWidth) {
if (mask[Ct] < 127.0f) {
// set fixed to background
for (int c = 0; c < 3; c++) {
fixed[3 * Ct + c] = background[3 * Cb + c];
}
}
else {
int Nt = tWidth*(yt - 1) + xt;
int St = tWidth*(yt + 1) + xt;
int Wt = tWidth*yt + (xt - 1);
int Et = tWidth*yt + (xt + 1);
int Nb = bWidth*(yb - 1) + xb;
int Sb = bWidth*(yb + 1) + xb;
int Wb = bWidth*yb + (xb - 1);
int Eb = bWidth*yb + (xb + 1);
//Calculation
for (int c = 0; c < 3; c++) {
float CbPrime = 0.0f;
if (yt > 0)
CbPrime += target[3 * Ct + c] - target[3 * Nt + c];
if (yt < tHeight - 1)
CbPrime += target[3 * Ct + c] - target[3 * St + c];
if (xt > 0)
CbPrime += target[3 * Ct + c] - target[3 * Wt + c];
if (xt < tWidth - 1)
CbPrime += target[3 * Ct + c] - target[3 * Et + c];
//solve boundary problems
float boundary = 0.0f;
if (yt == 0 || mask[Nt] < 127.0f)
boundary += background[3 * Nb + c];
if (yt == tHeight - 1 || mask[St] < 127.0f)
boundary += background[3 * Sb + c];
if (xt == 0 || mask[Wt] < 127.0f)
boundary += background[3 * Wb + c];
if (xt == tWidth - 1 || mask[Et] < 127.0f)
boundary += background[3 * Eb + c];
fixed[3 * Ct + c] = CbPrime + boundary;
}
}
}
}
}
__global__ void JacobiIteration(
float *fixed,
const float *mask,
float *buf1, float *buf2,
int tWidth, int ht
)
{
const int yt = (blockIdx.y * blockDim.y + threadIdx.y);
const int xt = (blockIdx.x * blockDim.x + threadIdx.x);
const int Ct = tWidth*yt + xt;
if (0 <= yt && 0 <= xt && yt < ht && xt < tWidth) {
if (mask[Ct] > 127.0f) {
int Nt = tWidth*(yt - 1) + xt;
int St = tWidth*(yt + 1) + xt;
int Wt = tWidth*yt + (xt - 1);
int Et = tWidth*yt + (xt + 1);
//Calculation
for (int c = 0; c < 3; c++) {
float sum = 0.0f;
if (yt > 0 && mask[Nt] > 127.0f)
sum += buf1[3 * Nt + c];
if (yt < ht - 1 && mask[St] > 127.0f)
sum += buf1[3 * St + c];
if (xt > 0 && mask[Wt] > 127.0f)
sum += buf1[3 * Wt + c];
if (xt < tWidth - 1 && mask[Et] > 127.0f)
sum += buf1[3 * Et + c];
float Cb_next = (sum + fixed[Ct * 3 + c]) / 4.0f;
buf2[3 * Ct + c] = Cb_next;
}
}
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
float *fixed;
hipMalloc(&fixed, 3*wt*ht*sizeof(float));
float *buf1;
hipMalloc(&buf1, 3*wt*ht*sizeof(float));
float *buf2;
hipMalloc(&buf2, 3*wt*ht*sizeof(float));
hipMemcpy(output, background, wb*hb*sizeof(float) * 3, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( init), dim3(dim3(CeilDiv(wt, 32), CeilDiv(ht, 16))), dim3(dim3(32, 16)), 0, 0, output, target, mask, fixed, wb, hb, wt, ht, oy, ox);
hipMemcpy(buf1, target, sizeof(float)*3*wt*ht, hipMemcpyDeviceToDevice);
int iterCount = 10000;
for(int i = 0; i < iterCount; i++) {
//Calculate Jacobi iteration and save target from buffer 1 to buffer 2
hipLaunchKernelGGL(( JacobiIteration), dim3(dim3(CeilDiv(wt, 32), CeilDiv(ht, 16))), dim3(dim3(32, 16)) , 0, 0, fixed, mask, buf1, buf2, wt, ht);
//Do second time in reverse direction (buffer 2 to buffer 1) in one iteration so we can save the time for swapping
hipLaunchKernelGGL(( JacobiIteration), dim3(dim3(CeilDiv(wt, 32), CeilDiv(ht, 16))), dim3(dim3(32, 16)) , 0, 0, fixed, mask, buf2, buf1, wt, ht);
}
hipLaunchKernelGGL(( SimpleClone), dim3(dim3(CeilDiv(wt, 32), CeilDiv(ht, 16))), dim3(dim3(32, 16)) , 0, 0, background, buf1, mask, output, wb, hb, wt, ht, oy, ox);
hipFree(fixed);
hipFree(buf1);
hipFree(buf2);
} | 2532b975ee303ee65ec9b4c2c19c21f8c815445d.cu | #include "lab3.h"
#include "cuda_runtime.h"
#include <cstdio>
__device__ __host__ int CeilDiv(int a, int b) { return (a - 1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (0 <= yt && 0 <= xt && yt < ht && xt < wt && mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__global__ void init(
const float *background,
const float *target,
const float *mask,
float *fixed,
const int bWidth, const int bHeight,
const int tWidth, const int tHeight,
const int Oy, const int Ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int Ct = tWidth*yt + xt;
if (yt >= 0 && xt >= 0 && yt < tHeight && xt < tWidth) {
const int yb = Oy + yt, xb = Ox + xt;
const int Cb = bWidth*yb + xb;
if (0 <= yb && yb < bHeight && 0 <= xb && xb < bWidth) {
if (mask[Ct] < 127.0f) {
// set fixed to background
for (int c = 0; c < 3; c++) {
fixed[3 * Ct + c] = background[3 * Cb + c];
}
}
else {
int Nt = tWidth*(yt - 1) + xt;
int St = tWidth*(yt + 1) + xt;
int Wt = tWidth*yt + (xt - 1);
int Et = tWidth*yt + (xt + 1);
int Nb = bWidth*(yb - 1) + xb;
int Sb = bWidth*(yb + 1) + xb;
int Wb = bWidth*yb + (xb - 1);
int Eb = bWidth*yb + (xb + 1);
//Calculation
for (int c = 0; c < 3; c++) {
float CbPrime = 0.0f;
if (yt > 0)
CbPrime += target[3 * Ct + c] - target[3 * Nt + c];
if (yt < tHeight - 1)
CbPrime += target[3 * Ct + c] - target[3 * St + c];
if (xt > 0)
CbPrime += target[3 * Ct + c] - target[3 * Wt + c];
if (xt < tWidth - 1)
CbPrime += target[3 * Ct + c] - target[3 * Et + c];
//solve boundary problems
float boundary = 0.0f;
if (yt == 0 || mask[Nt] < 127.0f)
boundary += background[3 * Nb + c];
if (yt == tHeight - 1 || mask[St] < 127.0f)
boundary += background[3 * Sb + c];
if (xt == 0 || mask[Wt] < 127.0f)
boundary += background[3 * Wb + c];
if (xt == tWidth - 1 || mask[Et] < 127.0f)
boundary += background[3 * Eb + c];
fixed[3 * Ct + c] = CbPrime + boundary;
}
}
}
}
}
__global__ void JacobiIteration(
float *fixed,
const float *mask,
float *buf1, float *buf2,
int tWidth, int ht
)
{
const int yt = (blockIdx.y * blockDim.y + threadIdx.y);
const int xt = (blockIdx.x * blockDim.x + threadIdx.x);
const int Ct = tWidth*yt + xt;
if (0 <= yt && 0 <= xt && yt < ht && xt < tWidth) {
if (mask[Ct] > 127.0f) {
int Nt = tWidth*(yt - 1) + xt;
int St = tWidth*(yt + 1) + xt;
int Wt = tWidth*yt + (xt - 1);
int Et = tWidth*yt + (xt + 1);
//Calculation
for (int c = 0; c < 3; c++) {
float sum = 0.0f;
if (yt > 0 && mask[Nt] > 127.0f)
sum += buf1[3 * Nt + c];
if (yt < ht - 1 && mask[St] > 127.0f)
sum += buf1[3 * St + c];
if (xt > 0 && mask[Wt] > 127.0f)
sum += buf1[3 * Wt + c];
if (xt < tWidth - 1 && mask[Et] > 127.0f)
sum += buf1[3 * Et + c];
float Cb_next = (sum + fixed[Ct * 3 + c]) / 4.0f;
buf2[3 * Ct + c] = Cb_next;
}
}
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
float *fixed;
cudaMalloc(&fixed, 3*wt*ht*sizeof(float));
float *buf1;
cudaMalloc(&buf1, 3*wt*ht*sizeof(float));
float *buf2;
cudaMalloc(&buf2, 3*wt*ht*sizeof(float));
cudaMemcpy(output, background, wb*hb*sizeof(float) * 3, cudaMemcpyDeviceToDevice);
init<<<dim3(CeilDiv(wt, 32), CeilDiv(ht, 16)), dim3(32, 16)>>>(output, target, mask, fixed, wb, hb, wt, ht, oy, ox);
cudaMemcpy(buf1, target, sizeof(float)*3*wt*ht, cudaMemcpyDeviceToDevice);
int iterCount = 10000;
for(int i = 0; i < iterCount; i++) {
//Calculate Jacobi iteration and save target from buffer 1 to buffer 2
JacobiIteration<<<dim3(CeilDiv(wt, 32), CeilDiv(ht, 16)), dim3(32, 16) >>>(fixed, mask, buf1, buf2, wt, ht);
//Do second time in reverse direction (buffer 2 to buffer 1) in one iteration so we can save the time for swapping
JacobiIteration<<<dim3(CeilDiv(wt, 32), CeilDiv(ht, 16)), dim3(32, 16) >>>(fixed, mask, buf2, buf1, wt, ht);
}
SimpleClone<<< dim3(CeilDiv(wt, 32), CeilDiv(ht, 16)), dim3(32, 16) >>>(background, buf1, mask, output, wb, hb, wt, ht, oy, ox);
cudaFree(fixed);
cudaFree(buf1);
cudaFree(buf2);
} |
56991f6356a4bfb566605ae7b7cced44304e5ea7.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
namespace at { namespace native {
namespace {
// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.
// So we need to define the functions with the explicit function signatures.
// As for pow, the following signatures are defined as the device function:
// pow(float, int)
// pow(double, int)
// pow(float, float)
// pow(double, double)
// As for sqrt, the following signatures are defined as the device function:
// sqrt(float)
// sqrt(double)
// As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be
// applied to the result of the inline function, and thus the result is incorrect.
// e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get
// int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1
// However, the correct result is
// int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0
#ifdef _MSC_VER
// Functions for pow
// pow for at::Half
static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {
return static_cast<at::Half>(::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow for at::BFloat16
static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) {
return static_cast<at::BFloat16>(::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow (floating, floating/int)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type
pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
// pow (integral, integral)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_integral<Base_type>::value && std::is_same<Base_type, Exp_type>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return native::powi(base, exp);
}
// pow (Otherwise)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return static_cast<Base_type>(::pow(static_cast<double>(base), static_cast<double>(exp)));
}
// pow (Complex)
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return ::pow(base, exp);
}
// Functions for sqrt
// sqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return std::sqrt(x);
}
// sqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return static_cast<T>(std::sqrt(static_cast<double>(x)));
}
// Function for inverse sqrt
// invsqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return 1.0 / std::sqrt(x);
}
// invsqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return static_cast<T>(1.0 / std::sqrt(static_cast<double>(x)));
}
#else
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
template <typename T>
static inline __host__ __device__ T sqrt_(T x) {
return ::sqrt(x);
}
template <typename T>
static inline __host__ __device__ T invsqrt_(T x) {
return 1.0 / ::sqrt(x);
}
// pow (Otherwise)
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return ::pow(base, exp);
}
#endif
void pow_tensor_tensor_kernel(TensorIterator& iter) {
if (isComplexType(iter.dtype())) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype())) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return native::powi(base, exp);
});
});
}
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIterator& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
if (d_exp == 0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return sqrt_(base);
});
} else if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return invsqrt_(base);
});
} else if (d_exp == -1) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIterator& iter, Scalar exp_scalar) {
if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
const auto exp = exp_scalar.to<float>();
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
| 56991f6356a4bfb566605ae7b7cced44304e5ea7.cu | #include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
namespace at { namespace native {
namespace {
// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.
// So we need to define the functions with the explicit function signatures.
// As for pow, the following signatures are defined as the device function:
// pow(float, int)
// pow(double, int)
// pow(float, float)
// pow(double, double)
// As for sqrt, the following signatures are defined as the device function:
// sqrt(float)
// sqrt(double)
// As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be
// applied to the result of the inline function, and thus the result is incorrect.
// e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get
// int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1
// However, the correct result is
// int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0
#ifdef _MSC_VER
// Functions for pow
// pow for at::Half
static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {
return static_cast<at::Half>(std::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow for at::BFloat16
static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) {
return static_cast<at::BFloat16>(std::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow (floating, floating/int)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type
pow_(Base_type base, Exp_type exp) {
return std::pow(base, exp);
}
// pow (integral, integral)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_integral<Base_type>::value && std::is_same<Base_type, Exp_type>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return native::powi(base, exp);
}
// pow (Otherwise)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return static_cast<Base_type>(std::pow(static_cast<double>(base), static_cast<double>(exp)));
}
// pow (Complex)
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return std::pow(base, exp);
}
// Functions for sqrt
// sqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return std::sqrt(x);
}
// sqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return static_cast<T>(std::sqrt(static_cast<double>(x)));
}
// Function for inverse sqrt
// invsqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return 1.0 / std::sqrt(x);
}
// invsqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return static_cast<T>(1.0 / std::sqrt(static_cast<double>(x)));
}
#else
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
template <typename T>
static inline __host__ __device__ T sqrt_(T x) {
return ::sqrt(x);
}
template <typename T>
static inline __host__ __device__ T invsqrt_(T x) {
return 1.0 / ::sqrt(x);
}
// pow (Otherwise)
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return std::pow(base, exp);
}
#endif
void pow_tensor_tensor_kernel(TensorIterator& iter) {
if (isComplexType(iter.dtype())) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype())) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return native::powi(base, exp);
});
});
}
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIterator& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
if (d_exp == 0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return sqrt_(base);
});
} else if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return invsqrt_(base);
});
} else if (d_exp == -1) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIterator& iter, Scalar exp_scalar) {
if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
const auto exp = exp_scalar.to<float>();
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
|
db674a54d7d9e5ea73e0710126ba3681006cf124.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "winlibs/stdafx.h"
#include "parallel_for.h"
#include "time_code.h"
#include "nbodyfft.h"
#include <hipfft.h>
#include "include/util/cuda_utils.h"
#include "include/util/matrix_broadcast_utils.h"
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__)
clock_t _nbody_fft_timer;
float _nbody_times[10];
#define _ntime(x) _nbody_times[x] += ( (float) clock() - _nbody_fft_timer ) / CLOCKS_PER_SEC; _nbody_fft_timer = clock();
static const char *_cudaGetErrorEnum(hipfftResult error)
{
switch (error)
{
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
inline void __cufftSafeCall(hipfftResult err, const char *file, const int line)
{
if( HIPFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
__global__ void copy_to_fft_input(volatile float * __restrict__ fft_input,
const float * w_coefficients_device,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half;
fft_input[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] = w_coefficients_device[current_term + current_loc * n_terms];
}
__global__ void copy_from_fft_output(volatile float * __restrict__ y_tilde_values,
const float * fft_output,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half + n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half + n_fft_coeffs_half;
y_tilde_values[current_term + n_terms * current_loc] = fft_output[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] / (float) (n_fft_coeffs * n_fft_coeffs);
}
__global__ void compute_point_box_idx(volatile int * __restrict__ point_box_idx,
volatile float * __restrict__ x_in_box,
volatile float * __restrict__ y_in_box,
const float * const xs,
const float * const ys,
const float * const box_lower_bounds,
const float coord_min,
const float box_width,
const int n_boxes,
const int n_total_boxes,
const int N)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N)
return;
register int x_idx = (int) ((xs[TID] - coord_min) / box_width);
register int y_idx = (int) ((ys[TID] - coord_min) / box_width);
x_idx = max(0, x_idx);
x_idx = min(n_boxes - 1, x_idx);
y_idx = max(0, y_idx);
y_idx = min(n_boxes - 1, y_idx);
register int box_idx = y_idx * n_boxes + x_idx;
point_box_idx[TID] = box_idx;
x_in_box[TID] = (xs[TID] - box_lower_bounds[box_idx]) / box_width;
y_in_box[TID] = (ys[TID] - box_lower_bounds[n_total_boxes + box_idx]) / box_width;
}
__global__ void interpolate_device(
volatile float * __restrict__ interpolated_values,
const float * const y_in_box,
const float * const y_tilde_spacings,
const float * const denominator,
const int n_interpolation_points,
const int N)
{
register int TID, i, j, k;
register float value, ybox_i;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N * n_interpolation_points)
return;
i = TID % N;
j = TID / N;
value = 1;
ybox_i = y_in_box[i];
for (k = 0; k < n_interpolation_points; k++) {
if (j != k) {
value *= ybox_i - y_tilde_spacings[k];
}
}
interpolated_values[j * N + i] = value / denominator[j];
}
__global__ void compute_interpolated_indices(
float * __restrict__ w_coefficients_device,
const int * const point_box_indices,
const float * const chargesQij,
const float * const x_interpolated_values,
const float * const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term];
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_indices[TID] = idx * n_terms + current_term;
atomicAdd(
w_coefficients_device + idx * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]);
}
__global__ void compute_potential_indices(
float * __restrict__ potentialsQij,
const int * const point_box_indices,
const float * const y_tilde_values,
const float * const x_interpolated_values,
const float * const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term];
// interpolated_indices[TID] = i * n_terms + current_term;
atomicAdd(
potentialsQij + i * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]);
}
__host__ __device__ float squared_cauchy_2d(float x1, float x2, float y1, float y2) {
return pow(1.0 + pow(x1 - y1, 2) + pow(x2 - y2, 2), -2);
}
__global__ void compute_kernel_tilde(
volatile float * __restrict__ kernel_tilde,
const float x_min,
const float y_min,
const float h,
const int n_interpolation_points_1d,
const int n_fft_coeffs)
{
register int TID, i, j;
register float tmp;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_interpolation_points_1d * n_interpolation_points_1d)
return;
i = TID / n_interpolation_points_1d;
j = TID % n_interpolation_points_1d;
tmp = squared_cauchy_2d(y_min + h / 2, x_min + h / 2, y_min + h / 2 + i * h, x_min + h / 2 + j * h);
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
}
__global__ void compute_upper_and_lower_bounds(
volatile float * __restrict__ box_upper_bounds,
volatile float * __restrict__ box_lower_bounds,
const float box_width,
const float x_min,
const float y_min,
const int n_boxes,
const int n_total_boxes)
{
register int TID, i, j;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_boxes * n_boxes)
return;
i = TID / n_boxes;
j = TID % n_boxes;
box_lower_bounds[i * n_boxes + j] = j * box_width + x_min;
box_upper_bounds[i * n_boxes + j] = (j + 1) * box_width + x_min;
box_lower_bounds[n_total_boxes + i * n_boxes + j] = i * box_width + y_min;
box_upper_bounds[n_total_boxes + i * n_boxes + j] = (i + 1) * box_width + y_min;
}
__global__ void copy_to_w_coefficients(
volatile float * __restrict__ w_coefficients_device,
const int * const output_indices,
const float * const output_values,
const int num_elements)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= num_elements)
return;
w_coefficients_device[output_indices[TID]] = output_values[TID];
}
void precompute_2d(hipfftHandle &plan_kernel_tilde, float x_max, float x_min, float y_max, float y_min, int n_boxes, int n_interpolation_points,
thrust::device_vector<float> &box_lower_bounds_device, thrust::device_vector<float> &box_upper_bounds_device,
thrust::device_vector<float> &kernel_tilde_device, thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device) {
const int num_threads = 32;
int num_blocks = (n_boxes * n_boxes + num_threads - 1) / num_threads;
/*
* Set up the boxes
*/
int n_total_boxes = n_boxes * n_boxes;
float box_width = (x_max - x_min) / (float) n_boxes;
// Left and right bounds of each box, first the lower bounds in the x direction, then in the y direction
hipLaunchKernelGGL(( compute_upper_and_lower_bounds), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(box_upper_bounds_device.data()),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
box_width, x_min, y_min, n_boxes, n_total_boxes);
// Coordinates of all the equispaced interpolation points
int n_interpolation_points_1d = n_interpolation_points * n_boxes;
int n_fft_coeffs = 2 * n_interpolation_points_1d;
float h = box_width / (float) n_interpolation_points;
/*
* Evaluate the kernel at the interpolation nodes and form the embedded generating kernel vector for a circulant
* matrix
*/
// thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs);
num_blocks = (n_interpolation_points_1d * n_interpolation_points_1d + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( compute_kernel_tilde), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(kernel_tilde_device.data()),
x_min, y_min, h, n_interpolation_points_1d, n_fft_coeffs);
GpuErrorCheck(hipDeviceSynchronize());
// Precompute the FFT of the kernel generating matrix
hipfftExecR2C(plan_kernel_tilde,
reinterpret_cast<hipfftReal *>(thrust::raw_pointer_cast(kernel_tilde_device.data())),
reinterpret_cast<hipfftComplex *>(thrust::raw_pointer_cast(fft_kernel_tilde_device.data())));
}
void n_body_fft_2d(
hipfftHandle &plan_dft,
hipfftHandle &plan_idft,
int N,
int n_terms,
int n_boxes,
int n_interpolation_points,
thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device,
int n_total_boxes,
int total_interpolation_points,
float coord_min,
float box_width,
int n_fft_coeffs_half,
int n_fft_coeffs,
int num_nodes,
thrust::device_vector<float> &fft_input,
thrust::device_vector<thrust::complex<float>> &fft_w_coefficients,
thrust::device_vector<float> &fft_output,
thrust::device_vector<int> &point_box_idx_device,
thrust::device_vector<float> &x_in_box_device,
thrust::device_vector<float> &y_in_box_device,
thrust::device_vector<float> &points_device,
thrust::device_vector<float> &box_lower_bounds_device,
thrust::device_vector<float> &y_tilde_spacings_device,
thrust::device_vector<float> &denominator_device,
thrust::device_vector<float> &y_tilde_values,
thrust::device_vector<float> &all_interpolated_values_device,
thrust::device_vector<float> &output_values,
thrust::device_vector<int> &all_interpolated_indices,
thrust::device_vector<int> &output_indices,
thrust::device_vector<float> &w_coefficients_device,
thrust::device_vector<float> &chargesQij_device,
thrust::device_vector<float> &x_interpolated_values_device,
thrust::device_vector<float> &y_interpolated_values_device,
thrust::device_vector<float> &potentialsQij_device) {
// std::cout << "start" << std::endl;
const int num_threads = 128;
int num_blocks = (N + num_threads - 1) / num_threads;
// Compute box indices and the relative position of each point in its box in the interval [0, 1]
hipLaunchKernelGGL(( compute_point_box_idx), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(points_device.data()),
thrust::raw_pointer_cast(points_device.data() + num_nodes + 1),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
coord_min,
box_width,
n_boxes,
n_total_boxes,
N
);
GpuErrorCheck(hipDeviceSynchronize());
/*
* Step 1: Interpolate kernel using Lagrange polynomials and compute the w coefficients
*/
// Compute the interpolated values at each real point with each Lagrange polynomial in the `x` direction
num_blocks = (N * n_interpolation_points + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( interpolate_device), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N
);
GpuErrorCheck(hipDeviceSynchronize());
// Compute the interpolated values at each real point with each Lagrange polynomial in the `y` direction
hipLaunchKernelGGL(( interpolate_device), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N
);
GpuErrorCheck(hipDeviceSynchronize());
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( compute_interpolated_indices), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(w_coefficients_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(chargesQij_device.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms
);
GpuErrorCheck(hipDeviceSynchronize());
/*
* Step 2: Compute the values v_{m, n} at the equispaced nodes, multiply the kernel matrix with the coefficients w
*/
num_blocks = ((n_terms * n_fft_coeffs_half * n_fft_coeffs_half) + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( copy_to_fft_input), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(fft_input.data()),
thrust::raw_pointer_cast(w_coefficients_device.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms
);
GpuErrorCheck(hipDeviceSynchronize());
// Compute fft values at interpolated nodes
hipfftExecR2C(plan_dft,
reinterpret_cast<hipfftReal *>(thrust::raw_pointer_cast(fft_input.data())),
reinterpret_cast<hipfftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())));
GpuErrorCheck(hipDeviceSynchronize());
// Take the broadcasted Hadamard product of a complex matrix and a complex vector
tsnecuda::util::BroadcastMatrixVector(
fft_w_coefficients, fft_kernel_tilde_device, n_fft_coeffs * (n_fft_coeffs / 2 + 1), n_terms,
thrust::multiplies<thrust::complex<float>>(), 0, thrust::complex<float>(1.0));
// Invert the computed values at the interpolated nodes
hipfftExecC2R(plan_idft,
reinterpret_cast<hipfftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())),
reinterpret_cast<hipfftReal *>(thrust::raw_pointer_cast(fft_output.data())));
GpuErrorCheck(hipDeviceSynchronize());
hipLaunchKernelGGL(( copy_from_fft_output), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(fft_output.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms
);
GpuErrorCheck(hipDeviceSynchronize());
/*
* Step 3: Compute the potentials \tilde{\phi}
*/
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( compute_potential_indices), dim3(num_blocks), dim3(num_threads), 0, 0,
thrust::raw_pointer_cast(potentialsQij_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms
);
GpuErrorCheck(hipDeviceSynchronize());
}
float* get_ntime() {
return _nbody_times;
}
void interpolate(int n_interpolation_points, int N, const float *y_in_box, const float *y_tilde_spacings,
float *interpolated_values, const float *denominator) {
// The denominators are the same across the interpolants, so we only need to compute them once
// auto *denominator = new float[n_interpolation_points];
// for (int i = 0; i < n_interpolation_points; i++) {
// denominator[i] = 1;
// for (int j = 0; j < n_interpolation_points; j++) {
// if (i != j) {
// denominator[i] *= y_tilde_spacings[i] - y_tilde_spacings[j];
// }
// }
// }
// Compute the numerators and the interpolant value
for (int i = 0; i < N; i++) {
for (int j = 0; j < n_interpolation_points; j++) {
interpolated_values[j * N + i] = 1;
for (int k = 0; k < n_interpolation_points; k++) {
if (j != k) {
interpolated_values[j * N + i] *= y_in_box[i] - y_tilde_spacings[k];
}
}
interpolated_values[j * N + i] /= denominator[j];
}
}
// delete[] denominator;
}
| db674a54d7d9e5ea73e0710126ba3681006cf124.cu | #include "winlibs/stdafx.h"
#include "parallel_for.h"
#include "time_code.h"
#include "nbodyfft.h"
#include <cufft.h>
#include "include/util/cuda_utils.h"
#include "include/util/matrix_broadcast_utils.h"
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__)
clock_t _nbody_fft_timer;
float _nbody_times[10];
#define _ntime(x) _nbody_times[x] += ( (float) clock() - _nbody_fft_timer ) / CLOCKS_PER_SEC; _nbody_fft_timer = clock();
static const char *_cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if( CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \
_cudaGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
__global__ void copy_to_fft_input(volatile float * __restrict__ fft_input,
const float * w_coefficients_device,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half;
fft_input[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] = w_coefficients_device[current_term + current_loc * n_terms];
}
__global__ void copy_from_fft_output(volatile float * __restrict__ y_tilde_values,
const float * fft_output,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
register int i, j;
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half)
return;
register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half);
register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half);
i = current_loc / n_fft_coeffs_half + n_fft_coeffs_half;
j = current_loc % n_fft_coeffs_half + n_fft_coeffs_half;
y_tilde_values[current_term + n_terms * current_loc] = fft_output[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] / (float) (n_fft_coeffs * n_fft_coeffs);
}
__global__ void compute_point_box_idx(volatile int * __restrict__ point_box_idx,
volatile float * __restrict__ x_in_box,
volatile float * __restrict__ y_in_box,
const float * const xs,
const float * const ys,
const float * const box_lower_bounds,
const float coord_min,
const float box_width,
const int n_boxes,
const int n_total_boxes,
const int N)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N)
return;
register int x_idx = (int) ((xs[TID] - coord_min) / box_width);
register int y_idx = (int) ((ys[TID] - coord_min) / box_width);
x_idx = max(0, x_idx);
x_idx = min(n_boxes - 1, x_idx);
y_idx = max(0, y_idx);
y_idx = min(n_boxes - 1, y_idx);
register int box_idx = y_idx * n_boxes + x_idx;
point_box_idx[TID] = box_idx;
x_in_box[TID] = (xs[TID] - box_lower_bounds[box_idx]) / box_width;
y_in_box[TID] = (ys[TID] - box_lower_bounds[n_total_boxes + box_idx]) / box_width;
}
__global__ void interpolate_device(
volatile float * __restrict__ interpolated_values,
const float * const y_in_box,
const float * const y_tilde_spacings,
const float * const denominator,
const int n_interpolation_points,
const int N)
{
register int TID, i, j, k;
register float value, ybox_i;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= N * n_interpolation_points)
return;
i = TID % N;
j = TID / N;
value = 1;
ybox_i = y_in_box[i];
for (k = 0; k < n_interpolation_points; k++) {
if (j != k) {
value *= ybox_i - y_tilde_spacings[k];
}
}
interpolated_values[j * N + i] = value / denominator[j];
}
__global__ void compute_interpolated_indices(
float * __restrict__ w_coefficients_device,
const int * const point_box_indices,
const float * const chargesQij,
const float * const x_interpolated_values,
const float * const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term];
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_indices[TID] = idx * n_terms + current_term;
atomicAdd(
w_coefficients_device + idx * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]);
}
__global__ void compute_potential_indices(
float * __restrict__ potentialsQij,
const int * const point_box_indices,
const float * const y_tilde_values,
const float * const x_interpolated_values,
const float * const y_interpolated_values,
const int N,
const int n_interpolation_points,
const int n_boxes,
const int n_terms)
{
register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N)
return;
current_term = TID % n_terms;
i = (TID / n_terms) % N;
interp_j = ((TID / n_terms) / N) % n_interpolation_points;
interp_i = ((TID / n_terms) / N) / n_interpolation_points;
box_idx = point_box_indices[i];
box_i = box_idx % n_boxes;
box_j = box_idx / n_boxes;
idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) +
(box_j * n_interpolation_points) + interp_j;
// interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term];
// interpolated_indices[TID] = i * n_terms + current_term;
atomicAdd(
potentialsQij + i * n_terms + current_term,
x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]);
}
__host__ __device__ float squared_cauchy_2d(float x1, float x2, float y1, float y2) {
return pow(1.0 + pow(x1 - y1, 2) + pow(x2 - y2, 2), -2);
}
__global__ void compute_kernel_tilde(
volatile float * __restrict__ kernel_tilde,
const float x_min,
const float y_min,
const float h,
const int n_interpolation_points_1d,
const int n_fft_coeffs)
{
register int TID, i, j;
register float tmp;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_interpolation_points_1d * n_interpolation_points_1d)
return;
i = TID / n_interpolation_points_1d;
j = TID % n_interpolation_points_1d;
tmp = squared_cauchy_2d(y_min + h / 2, x_min + h / 2, y_min + h / 2 + i * h, x_min + h / 2 + j * h);
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp;
kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp;
}
__global__ void compute_upper_and_lower_bounds(
volatile float * __restrict__ box_upper_bounds,
volatile float * __restrict__ box_lower_bounds,
const float box_width,
const float x_min,
const float y_min,
const int n_boxes,
const int n_total_boxes)
{
register int TID, i, j;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= n_boxes * n_boxes)
return;
i = TID / n_boxes;
j = TID % n_boxes;
box_lower_bounds[i * n_boxes + j] = j * box_width + x_min;
box_upper_bounds[i * n_boxes + j] = (j + 1) * box_width + x_min;
box_lower_bounds[n_total_boxes + i * n_boxes + j] = i * box_width + y_min;
box_upper_bounds[n_total_boxes + i * n_boxes + j] = (i + 1) * box_width + y_min;
}
__global__ void copy_to_w_coefficients(
volatile float * __restrict__ w_coefficients_device,
const int * const output_indices,
const float * const output_values,
const int num_elements)
{
register int TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= num_elements)
return;
w_coefficients_device[output_indices[TID]] = output_values[TID];
}
void precompute_2d(cufftHandle &plan_kernel_tilde, float x_max, float x_min, float y_max, float y_min, int n_boxes, int n_interpolation_points,
thrust::device_vector<float> &box_lower_bounds_device, thrust::device_vector<float> &box_upper_bounds_device,
thrust::device_vector<float> &kernel_tilde_device, thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device) {
const int num_threads = 32;
int num_blocks = (n_boxes * n_boxes + num_threads - 1) / num_threads;
/*
* Set up the boxes
*/
int n_total_boxes = n_boxes * n_boxes;
float box_width = (x_max - x_min) / (float) n_boxes;
// Left and right bounds of each box, first the lower bounds in the x direction, then in the y direction
compute_upper_and_lower_bounds<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(box_upper_bounds_device.data()),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
box_width, x_min, y_min, n_boxes, n_total_boxes);
// Coordinates of all the equispaced interpolation points
int n_interpolation_points_1d = n_interpolation_points * n_boxes;
int n_fft_coeffs = 2 * n_interpolation_points_1d;
float h = box_width / (float) n_interpolation_points;
/*
* Evaluate the kernel at the interpolation nodes and form the embedded generating kernel vector for a circulant
* matrix
*/
// thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs);
num_blocks = (n_interpolation_points_1d * n_interpolation_points_1d + num_threads - 1) / num_threads;
compute_kernel_tilde<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(kernel_tilde_device.data()),
x_min, y_min, h, n_interpolation_points_1d, n_fft_coeffs);
GpuErrorCheck(cudaDeviceSynchronize());
// Precompute the FFT of the kernel generating matrix
cufftExecR2C(plan_kernel_tilde,
reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(kernel_tilde_device.data())),
reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_kernel_tilde_device.data())));
}
void n_body_fft_2d(
cufftHandle &plan_dft,
cufftHandle &plan_idft,
int N,
int n_terms,
int n_boxes,
int n_interpolation_points,
thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device,
int n_total_boxes,
int total_interpolation_points,
float coord_min,
float box_width,
int n_fft_coeffs_half,
int n_fft_coeffs,
int num_nodes,
thrust::device_vector<float> &fft_input,
thrust::device_vector<thrust::complex<float>> &fft_w_coefficients,
thrust::device_vector<float> &fft_output,
thrust::device_vector<int> &point_box_idx_device,
thrust::device_vector<float> &x_in_box_device,
thrust::device_vector<float> &y_in_box_device,
thrust::device_vector<float> &points_device,
thrust::device_vector<float> &box_lower_bounds_device,
thrust::device_vector<float> &y_tilde_spacings_device,
thrust::device_vector<float> &denominator_device,
thrust::device_vector<float> &y_tilde_values,
thrust::device_vector<float> &all_interpolated_values_device,
thrust::device_vector<float> &output_values,
thrust::device_vector<int> &all_interpolated_indices,
thrust::device_vector<int> &output_indices,
thrust::device_vector<float> &w_coefficients_device,
thrust::device_vector<float> &chargesQij_device,
thrust::device_vector<float> &x_interpolated_values_device,
thrust::device_vector<float> &y_interpolated_values_device,
thrust::device_vector<float> &potentialsQij_device) {
// std::cout << "start" << std::endl;
const int num_threads = 128;
int num_blocks = (N + num_threads - 1) / num_threads;
// Compute box indices and the relative position of each point in its box in the interval [0, 1]
compute_point_box_idx<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(points_device.data()),
thrust::raw_pointer_cast(points_device.data() + num_nodes + 1),
thrust::raw_pointer_cast(box_lower_bounds_device.data()),
coord_min,
box_width,
n_boxes,
n_total_boxes,
N
);
GpuErrorCheck(cudaDeviceSynchronize());
/*
* Step 1: Interpolate kernel using Lagrange polynomials and compute the w coefficients
*/
// Compute the interpolated values at each real point with each Lagrange polynomial in the `x` direction
num_blocks = (N * n_interpolation_points + num_threads - 1) / num_threads;
interpolate_device<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(x_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N
);
GpuErrorCheck(cudaDeviceSynchronize());
// Compute the interpolated values at each real point with each Lagrange polynomial in the `y` direction
interpolate_device<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_in_box_device.data()),
thrust::raw_pointer_cast(y_tilde_spacings_device.data()),
thrust::raw_pointer_cast(denominator_device.data()),
n_interpolation_points,
N
);
GpuErrorCheck(cudaDeviceSynchronize());
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
compute_interpolated_indices<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(w_coefficients_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(chargesQij_device.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms
);
GpuErrorCheck(cudaDeviceSynchronize());
/*
* Step 2: Compute the values v_{m, n} at the equispaced nodes, multiply the kernel matrix with the coefficients w
*/
num_blocks = ((n_terms * n_fft_coeffs_half * n_fft_coeffs_half) + num_threads - 1) / num_threads;
copy_to_fft_input<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(fft_input.data()),
thrust::raw_pointer_cast(w_coefficients_device.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms
);
GpuErrorCheck(cudaDeviceSynchronize());
// Compute fft values at interpolated nodes
cufftExecR2C(plan_dft,
reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(fft_input.data())),
reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())));
GpuErrorCheck(cudaDeviceSynchronize());
// Take the broadcasted Hadamard product of a complex matrix and a complex vector
tsnecuda::util::BroadcastMatrixVector(
fft_w_coefficients, fft_kernel_tilde_device, n_fft_coeffs * (n_fft_coeffs / 2 + 1), n_terms,
thrust::multiplies<thrust::complex<float>>(), 0, thrust::complex<float>(1.0));
// Invert the computed values at the interpolated nodes
cufftExecC2R(plan_idft,
reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())),
reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(fft_output.data())));
GpuErrorCheck(cudaDeviceSynchronize());
copy_from_fft_output<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(fft_output.data()),
n_fft_coeffs,
n_fft_coeffs_half,
n_terms
);
GpuErrorCheck(cudaDeviceSynchronize());
/*
* Step 3: Compute the potentials \tilde{\phi}
*/
num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads;
compute_potential_indices<<<num_blocks, num_threads>>>(
thrust::raw_pointer_cast(potentialsQij_device.data()),
thrust::raw_pointer_cast(point_box_idx_device.data()),
thrust::raw_pointer_cast(y_tilde_values.data()),
thrust::raw_pointer_cast(x_interpolated_values_device.data()),
thrust::raw_pointer_cast(y_interpolated_values_device.data()),
N,
n_interpolation_points,
n_boxes,
n_terms
);
GpuErrorCheck(cudaDeviceSynchronize());
}
float* get_ntime() {
return _nbody_times;
}
void interpolate(int n_interpolation_points, int N, const float *y_in_box, const float *y_tilde_spacings,
float *interpolated_values, const float *denominator) {
// The denominators are the same across the interpolants, so we only need to compute them once
// auto *denominator = new float[n_interpolation_points];
// for (int i = 0; i < n_interpolation_points; i++) {
// denominator[i] = 1;
// for (int j = 0; j < n_interpolation_points; j++) {
// if (i != j) {
// denominator[i] *= y_tilde_spacings[i] - y_tilde_spacings[j];
// }
// }
// }
// Compute the numerators and the interpolant value
for (int i = 0; i < N; i++) {
for (int j = 0; j < n_interpolation_points; j++) {
interpolated_values[j * N + i] = 1;
for (int k = 0; k < n_interpolation_points; k++) {
if (j != k) {
interpolated_values[j * N + i] *= y_in_box[i] - y_tilde_spacings[k];
}
}
interpolated_values[j * N + i] /= denominator[j];
}
}
// delete[] denominator;
}
|
5677c25cfa28b2d3b4f11875710a911fed1648b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This example introduces __device__ functions, which are special functions
// which may be called from code executing on the device.
#include <stdlib.h>
#include <stdio.h>
// __device__ functions may only be called from __global__ functions or other
// __device__ functions. Unlike __global__ functions, __device__ functions are
// not configured, and have no restriction on return type.
__device__ int get_constant(void)
{
// just return 7
return 7;
}
__device__ int get_block_index(void)
{
// return the index of the current thread's block
return blockIdx.x;
}
__device__ int get_thread_index(void)
{
// return the index of the current thread within its block
return threadIdx.x;
}
__device__ int get_global_index(void)
{
// return the index of the current thread across the entire grid launch
return blockIdx.x * blockDim.x + threadIdx.x;
}
// kernel1 returns the result of calling the __device__ function return_constant():
__global__ void kernel1(int *array)
{
int index = get_global_index();
array[index] = get_constant();
}
// kernel2 returns the result of calling the __device__ function return_block_index():
__global__ void kernel2(int *array)
{
int index = get_global_index();
array[index] = get_block_index();
}
// kernel3 returns the result of calling the __device__ function return_thread_index():
__global__ void kernel3(int *array)
{
int index = get_global_index();
array[index] = get_thread_index();
}
// kernel4 returns the result of calling the __device__ function return_thread_index():
__global__ void kernel4(int *array)
{
int index = get_global_index();
array[index] = get_global_index();
}
int main(void)
{
int num_elements = 256;
int num_bytes = num_elements * sizeof(int);
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// hipMalloc a device array
hipMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// choose a launch configuration
int block_size = 128;
int grid_size = num_elements / block_size;
// launch each kernel and print out the results
hipLaunchKernelGGL(( kernel1), dim3(grid_size),dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
printf("kernel1 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
hipLaunchKernelGGL(( kernel2), dim3(grid_size),dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
printf("kernel2 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
hipLaunchKernelGGL(( kernel3), dim3(grid_size),dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
printf("kernel3 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
hipLaunchKernelGGL(( kernel4), dim3(grid_size),dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
printf("kernel4 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
// deallocate memory
free(host_array);
hipFree(device_array);
} | 5677c25cfa28b2d3b4f11875710a911fed1648b3.cu | // This example introduces __device__ functions, which are special functions
// which may be called from code executing on the device.
#include <stdlib.h>
#include <stdio.h>
// __device__ functions may only be called from __global__ functions or other
// __device__ functions. Unlike __global__ functions, __device__ functions are
// not configured, and have no restriction on return type.
__device__ int get_constant(void)
{
// just return 7
return 7;
}
__device__ int get_block_index(void)
{
// return the index of the current thread's block
return blockIdx.x;
}
__device__ int get_thread_index(void)
{
// return the index of the current thread within its block
return threadIdx.x;
}
__device__ int get_global_index(void)
{
// return the index of the current thread across the entire grid launch
return blockIdx.x * blockDim.x + threadIdx.x;
}
// kernel1 returns the result of calling the __device__ function return_constant():
__global__ void kernel1(int *array)
{
int index = get_global_index();
array[index] = get_constant();
}
// kernel2 returns the result of calling the __device__ function return_block_index():
__global__ void kernel2(int *array)
{
int index = get_global_index();
array[index] = get_block_index();
}
// kernel3 returns the result of calling the __device__ function return_thread_index():
__global__ void kernel3(int *array)
{
int index = get_global_index();
array[index] = get_thread_index();
}
// kernel4 returns the result of calling the __device__ function return_thread_index():
__global__ void kernel4(int *array)
{
int index = get_global_index();
array[index] = get_global_index();
}
int main(void)
{
int num_elements = 256;
int num_bytes = num_elements * sizeof(int);
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// cudaMalloc a device array
cudaMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// choose a launch configuration
int block_size = 128;
int grid_size = num_elements / block_size;
// launch each kernel and print out the results
kernel1<<<grid_size,block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel1 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
kernel2<<<grid_size,block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel2 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
kernel3<<<grid_size,block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel3 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
kernel4<<<grid_size,block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel4 results:\n");
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n\n");
// deallocate memory
free(host_array);
cudaFree(device_array);
} |
d61e44ee0947328c25b093d9351221a3c07e1331.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <despot/GPUrandom_streams.h>
#include <vector>
using namespace std;
namespace despot {
static double* tmp_streams=NULL;
static double* Hst_streams_list=NULL;
static double* Dvc_streams_list=NULL;
#define DIM 128
DEVICE Dvc_RandomStreams::Dvc_RandomStreams(int num_streams, int length, double** stream) :
position_(0) {
//need to pass a set of random streams manually from the CPU side
num_streams_=num_streams;
length_=length;
streams_=(double**)malloc(sizeof(double*)*num_streams);
for (int i = 0; i < num_streams; i++) {
streams_[i]=(double*)malloc(sizeof(double)*length);
memcpy(streams_[i],stream[i], sizeof(double)*length);
}
externel_streams=false;
}
DEVICE Dvc_RandomStreams::Dvc_RandomStreams(int num_streams, int length, double** stream, int pos)
{
//need to pass a set of random streams manually from the CPU side
position_=pos;
num_streams_=num_streams;
length_=length;
streams_=stream;
externel_streams=true;
}
DEVICE Dvc_RandomStreams::~Dvc_RandomStreams()
{
if(!externel_streams)
{
for (int i = 0; i < num_streams_; i++) {
free(streams_[i]);
}
free(streams_);
}
}
DEVICE int Dvc_RandomStreams::NumStreams() const {
return num_streams_;
}
DEVICE int Dvc_RandomStreams::Length() const {
return num_streams_ > 0 ? length_ : 0;
}
DEVICE void Dvc_RandomStreams::Advance() const {
position_++;
}
DEVICE void Dvc_RandomStreams::Back() const {
position_--;
}
DEVICE void Dvc_RandomStreams::position(int value) const {
position_ = value;
}
DEVICE int Dvc_RandomStreams::position() const {
return position_;
}
DEVICE bool Dvc_RandomStreams::Exhausted() const {
return position_ > Length() - 1;
}
DEVICE double Dvc_RandomStreams::Entry(int stream) const {
return streams_[stream][position_];
}
DEVICE double Dvc_RandomStreams::Entry(int stream, int position) const {
return streams_[stream][position];
}
__global__ void CopyMembers(Dvc_RandomStreams* des,Dvc_RandomStreams* src)
{
des->num_streams_=src->num_streams_; des->length_=src->length_;des->position_=src->position_;
}
__global__ void InitStreams(Dvc_RandomStreams* Dvc, int num_streams)
{
Dvc->streams_=(double**)malloc(sizeof(double*)*num_streams/*100*//*Dvc->NumStreams()*/);
}
__global__ void InitStreams_STEP2(Dvc_RandomStreams* Dvc,int start, int num_streams)
{
int SID=start+blockIdx.x*blockDim.x+threadIdx.x;
if(SID<num_streams)
{
Dvc->streams_[SID]=NULL;
while(Dvc->streams_[SID]==NULL)
Dvc->streams_[SID]=(double*)malloc(sizeof(double)*Dvc->Length());
}
}
HOST void Dvc_RandomStreams::Init(Dvc_RandomStreams* Dvc, int num_streams,int length, bool do_step2)
{
Dvc_RandomStreams* tmp;
HANDLE_ERROR(hipMallocManaged((void**)&tmp, sizeof(Dvc_RandomStreams)));
tmp->num_streams_=num_streams; tmp->length_=length;
hipLaunchKernelGGL(( CopyMembers), dim3(1), dim3(1), 0, 0, Dvc,tmp);
hipDeviceSynchronize();
HANDLE_ERROR(hipMalloc((void**)&(Dvc_streams_list),sizeof(double)*num_streams*length));
HANDLE_ERROR(hipHostMalloc((void**)&(Hst_streams_list),sizeof(double)*num_streams*length,0));
HANDLE_ERROR(hipFree(tmp));
dim3 grid((num_streams+DIM-1)/DIM,1);dim3 threads(DIM,1);
hipLaunchKernelGGL(( InitStreams), dim3(1), dim3(1), 0, 0, Dvc,num_streams);
HANDLE_ERROR(hipDeviceSynchronize());
if(do_step2)
{
int batch=500;
for(int bid=0;bid<(num_streams+batch-1)/batch;bid++)
{
dim3 grid1(1,1);dim3 threads1(batch,1);
int start=bid*batch;
hipLaunchKernelGGL(( InitStreams_STEP2), dim3(grid1), dim3(threads1), 0, 0, Dvc,start ,num_streams);
HANDLE_ERROR(hipDeviceSynchronize());
}
}
}
__global__ void FreeStreams(int num_streams, Dvc_RandomStreams* streams)
{
for (int i =0; i < num_streams; i++) {
if (streams->streams_[i]){
printf("%d \n", i);
printf("%0x address\n", streams->streams_[i]);
free(streams->streams_[i]);
}
}
if(streams->streams_)
free(streams->streams_);
}
HOST void Dvc_RandomStreams::Clear(Dvc_RandomStreams* Dvc)
{
HANDLE_ERROR(hipFree(tmp_streams));
if(Dvc_streams_list)
{
HANDLE_ERROR(hipFree(Dvc_streams_list));Dvc_streams_list=NULL;
}
if(Hst_streams_list)
{
HANDLE_ERROR(hipHostFree(Hst_streams_list));Hst_streams_list=NULL;
}
/*cout << "Launch FreeStreams\n";
FreeStreams<<<1, 1, 1>>>(Dvc->num_streams_, Dvc);
HANDLE_ERROR(hipDeviceSynchronize());*/
/*int batch=1;
for(int bid=0;bid<(Dvc->num_streams_+batch-1)/batch;bid++)
{
dim3 grid1(1,1);dim3 threads1(batch,1);
int start=bid*batch;
FreeStreams<<<grid1, threads1>>>(Dvc->num_streams_,start, Dvc);
HANDLE_ERROR(hipDeviceSynchronize());
}*/
}
__global__ void CopyStreams(Dvc_RandomStreams* Dvc, double* src)
{
int SID=blockIdx.y*gridDim.x+blockIdx.x;
int pos=threadIdx.x;
if(SID<Dvc->NumStreams() && pos<Dvc->Length())
{
Dvc->streams_[SID][pos]=src[pos+SID*Dvc->Length()];
}
Dvc->position_=0;
}
__global__ void CopyStreams(Dvc_RandomStreams* Dvc, double* src, int SID)
{
int pos=threadIdx.x;
if(SID<Dvc->NumStreams() && pos<Dvc->Length())
{
Dvc->streams_[SID][pos]=src[pos];
}
}
HOST void Dvc_RandomStreams::CopyToGPU(Dvc_RandomStreams* Dvc, const RandomStreams* Hst, void* cudaStream)
{
int num_streams=Hst->NumStreams();
int length=Hst->Length();
for (int i = 0; i < num_streams; i++)
{
memcpy((void*)(Hst_streams_list+i*length), (const void*)Hst->streams_[i].data(),sizeof(double)*length);
}
HANDLE_ERROR(hipMemcpy((void*)(Dvc_streams_list), (const void*)Hst_streams_list,sizeof(double)*num_streams*length, hipMemcpyHostToDevice));
dim3 grid1(num_streams,1);dim3 threads1(length,1);
hipLaunchKernelGGL(( CopyStreams), dim3(grid1), dim3(threads1), 0, 0, Dvc,Dvc_streams_list);
}
} // namespace despot
| d61e44ee0947328c25b093d9351221a3c07e1331.cu | #include <despot/GPUrandom_streams.h>
#include <vector>
using namespace std;
namespace despot {
static double* tmp_streams=NULL;
static double* Hst_streams_list=NULL;
static double* Dvc_streams_list=NULL;
#define DIM 128
DEVICE Dvc_RandomStreams::Dvc_RandomStreams(int num_streams, int length, double** stream) :
position_(0) {
//need to pass a set of random streams manually from the CPU side
num_streams_=num_streams;
length_=length;
streams_=(double**)malloc(sizeof(double*)*num_streams);
for (int i = 0; i < num_streams; i++) {
streams_[i]=(double*)malloc(sizeof(double)*length);
memcpy(streams_[i],stream[i], sizeof(double)*length);
}
externel_streams=false;
}
DEVICE Dvc_RandomStreams::Dvc_RandomStreams(int num_streams, int length, double** stream, int pos)
{
//need to pass a set of random streams manually from the CPU side
position_=pos;
num_streams_=num_streams;
length_=length;
streams_=stream;
externel_streams=true;
}
DEVICE Dvc_RandomStreams::~Dvc_RandomStreams()
{
if(!externel_streams)
{
for (int i = 0; i < num_streams_; i++) {
free(streams_[i]);
}
free(streams_);
}
}
DEVICE int Dvc_RandomStreams::NumStreams() const {
return num_streams_;
}
DEVICE int Dvc_RandomStreams::Length() const {
return num_streams_ > 0 ? length_ : 0;
}
DEVICE void Dvc_RandomStreams::Advance() const {
position_++;
}
DEVICE void Dvc_RandomStreams::Back() const {
position_--;
}
DEVICE void Dvc_RandomStreams::position(int value) const {
position_ = value;
}
DEVICE int Dvc_RandomStreams::position() const {
return position_;
}
DEVICE bool Dvc_RandomStreams::Exhausted() const {
return position_ > Length() - 1;
}
DEVICE double Dvc_RandomStreams::Entry(int stream) const {
return streams_[stream][position_];
}
DEVICE double Dvc_RandomStreams::Entry(int stream, int position) const {
return streams_[stream][position];
}
__global__ void CopyMembers(Dvc_RandomStreams* des,Dvc_RandomStreams* src)
{
des->num_streams_=src->num_streams_; des->length_=src->length_;des->position_=src->position_;
}
__global__ void InitStreams(Dvc_RandomStreams* Dvc, int num_streams)
{
Dvc->streams_=(double**)malloc(sizeof(double*)*num_streams/*100*//*Dvc->NumStreams()*/);
}
__global__ void InitStreams_STEP2(Dvc_RandomStreams* Dvc,int start, int num_streams)
{
int SID=start+blockIdx.x*blockDim.x+threadIdx.x;
if(SID<num_streams)
{
Dvc->streams_[SID]=NULL;
while(Dvc->streams_[SID]==NULL)
Dvc->streams_[SID]=(double*)malloc(sizeof(double)*Dvc->Length());
}
}
HOST void Dvc_RandomStreams::Init(Dvc_RandomStreams* Dvc, int num_streams,int length, bool do_step2)
{
Dvc_RandomStreams* tmp;
HANDLE_ERROR(cudaMallocManaged((void**)&tmp, sizeof(Dvc_RandomStreams)));
tmp->num_streams_=num_streams; tmp->length_=length;
CopyMembers<<<1, 1>>>(Dvc,tmp);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaMalloc((void**)&(Dvc_streams_list),sizeof(double)*num_streams*length));
HANDLE_ERROR(cudaHostAlloc((void**)&(Hst_streams_list),sizeof(double)*num_streams*length,0));
HANDLE_ERROR(cudaFree(tmp));
dim3 grid((num_streams+DIM-1)/DIM,1);dim3 threads(DIM,1);
InitStreams<<<1, 1>>>(Dvc,num_streams);
HANDLE_ERROR(cudaDeviceSynchronize());
if(do_step2)
{
int batch=500;
for(int bid=0;bid<(num_streams+batch-1)/batch;bid++)
{
dim3 grid1(1,1);dim3 threads1(batch,1);
int start=bid*batch;
InitStreams_STEP2<<<grid1, threads1>>>(Dvc,start ,num_streams);
HANDLE_ERROR(cudaDeviceSynchronize());
}
}
}
__global__ void FreeStreams(int num_streams, Dvc_RandomStreams* streams)
{
for (int i =0; i < num_streams; i++) {
if (streams->streams_[i]){
printf("%d \n", i);
printf("%0x address\n", streams->streams_[i]);
free(streams->streams_[i]);
}
}
if(streams->streams_)
free(streams->streams_);
}
HOST void Dvc_RandomStreams::Clear(Dvc_RandomStreams* Dvc)
{
HANDLE_ERROR(cudaFree(tmp_streams));
if(Dvc_streams_list)
{
HANDLE_ERROR(cudaFree(Dvc_streams_list));Dvc_streams_list=NULL;
}
if(Hst_streams_list)
{
HANDLE_ERROR(cudaFreeHost(Hst_streams_list));Hst_streams_list=NULL;
}
/*cout << "Launch FreeStreams\n";
FreeStreams<<<1, 1, 1>>>(Dvc->num_streams_, Dvc);
HANDLE_ERROR(cudaDeviceSynchronize());*/
/*int batch=1;
for(int bid=0;bid<(Dvc->num_streams_+batch-1)/batch;bid++)
{
dim3 grid1(1,1);dim3 threads1(batch,1);
int start=bid*batch;
FreeStreams<<<grid1, threads1>>>(Dvc->num_streams_,start, Dvc);
HANDLE_ERROR(cudaDeviceSynchronize());
}*/
}
__global__ void CopyStreams(Dvc_RandomStreams* Dvc, double* src)
{
int SID=blockIdx.y*gridDim.x+blockIdx.x;
int pos=threadIdx.x;
if(SID<Dvc->NumStreams() && pos<Dvc->Length())
{
Dvc->streams_[SID][pos]=src[pos+SID*Dvc->Length()];
}
Dvc->position_=0;
}
__global__ void CopyStreams(Dvc_RandomStreams* Dvc, double* src, int SID)
{
int pos=threadIdx.x;
if(SID<Dvc->NumStreams() && pos<Dvc->Length())
{
Dvc->streams_[SID][pos]=src[pos];
}
}
HOST void Dvc_RandomStreams::CopyToGPU(Dvc_RandomStreams* Dvc, const RandomStreams* Hst, void* cudaStream)
{
int num_streams=Hst->NumStreams();
int length=Hst->Length();
for (int i = 0; i < num_streams; i++)
{
memcpy((void*)(Hst_streams_list+i*length), (const void*)Hst->streams_[i].data(),sizeof(double)*length);
}
HANDLE_ERROR(cudaMemcpy((void*)(Dvc_streams_list), (const void*)Hst_streams_list,sizeof(double)*num_streams*length, cudaMemcpyHostToDevice));
dim3 grid1(num_streams,1);dim3 threads1(length,1);
CopyStreams<<<grid1, threads1>>>(Dvc,Dvc_streams_list);
}
} // namespace despot
|
f0f7f648c58b61ab09b4f7ad47f87319529a1bbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the scalar-tridiagonal solver distribution.
*
* Copyright (c) 2015, Endre Lszl and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Endre Lszl may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Endre Lszl ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Endre Lszl BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Written by Endre Laszlo, University of Oxford, endre.laszlo@oerc.ox.ac.uk,
// 2013-2014
// With contributions from:
// Gabor Daniel Balogh, Pazmany Peter Catholic University,
// balogh.gabor.daniel@itk.ppke.hu, 2020
// Toby Flynn, University of Warwick, T.Flynn@warwick.ac.uk, 2020
// #include "trid_mpi_cuda.hpp"
#include "tridsolver.h"
#include "trid_mpi_solver_params.hpp"
#include "trid_mpi_common.hpp"
#include "trid_linear_mpi.hpp"
#include "trid_linear_mpi_reg.hpp"
#include "trid_strided_multidim_mpi.hpp"
#include "trid_cuda_mpi_pcr.hpp"
#include "trid_iterative_mpi.hpp"
#include "cutil_inline.h"
#include "cuda_timing.h"
#include <cassert>
#include <cmath>
#include <functional>
#include <initializer_list>
#include <numeric>
#include <type_traits>
#include <iostream>
namespace {
enum class memory_env { HOST, DEVICE };
template <memory_env mem_env> struct mem_buffer {
size_t size = 0; /*<< size of the buffer in bytes */
char *buffer = nullptr; /*<< pointer to memory in mem_env */
void free() {
if (buffer) {
if (mem_env == memory_env::DEVICE) {
hipFree(buffer);
} else {
hipHostFree(buffer);
}
buffer = nullptr;
size = 0;
}
}
template <typename REAL> REAL *get_bytes_as(size_t bytes) {
if (size < bytes) {
free();
if (mem_env == memory_env::DEVICE) {
cudaSafeCall(hipMalloc(&buffer, bytes));
} else {
cudaSafeCall(hipHostMalloc(&buffer, bytes));
}
size = bytes;
}
return reinterpret_cast<REAL *>(buffer);
}
~mem_buffer() { free(); }
mem_buffer() noexcept = default;
mem_buffer(const mem_buffer &) = delete;
mem_buffer &operator=(const mem_buffer &) = delete;
mem_buffer(const mem_buffer &&) = delete;
mem_buffer &operator=(mem_buffer &&) = delete;
};
mem_buffer<memory_env::DEVICE> aa_buf, cc_buf, boundaries_buf, mpi_buffer;
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
mem_buffer<memory_env::HOST> send_buffer;
#endif
mem_buffer<memory_env::HOST> receive_buffer;
} // namespace
template <typename REAL>
inline void forward_batched(dim3 dimGrid_x, dim3 dimBlock_x, const REAL *a,
const int *a_pads, const REAL *b, const int *b_pads,
const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *aa, REAL *cc,
REAL *boundaries, REAL *send_buf_h, const int *dims,
int ndim, int solvedim, int start_sys, int bsize,
hipStream_t stream = nullptr) {
if (solvedim == 0) {
const int batch_offset = start_sys * a_pads[solvedim]; // TODO pads
int y_size = 1, y_pads = 1;
if (ndim > 1) {
y_size = dims[1];
y_pads = a_pads[1];
}
trid_linear_forward_reg(
dimGrid_x, dimBlock_x, a + batch_offset, b + batch_offset,
c + batch_offset, d + batch_offset, aa + batch_offset,
cc + batch_offset, boundaries + start_sys * 3 * 2, dims[solvedim],
a_pads[solvedim], bsize, start_sys, y_size, y_pads, stream);
} else {
DIM_V k_pads, k_dims; // TODO
for (int i = 0; i < ndim; ++i) {
k_pads.v[i] = a_pads[i];
k_dims.v[i] = dims[i];
}
hipLaunchKernelGGL(( trid_strided_multidim_forward<REAL>), dim3(dimGrid_x), dim3(dimBlock_x), 0, stream,
a, k_pads, b, k_pads, c, k_pads, d, k_pads, aa, cc, boundaries, ndim,
solvedim, bsize, k_dims, start_sys);
}
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
size_t comm_buf_size = 3 * 2 * bsize;
size_t comm_buf_offset = 3 * 2 * start_sys;
hipMemcpyAsync(send_buf_h + comm_buf_offset, boundaries + comm_buf_offset,
sizeof(REAL) * comm_buf_size, hipMemcpyDeviceToHost, stream);
#endif
}
template <typename REAL, int INC>
inline void backward_batched(dim3 dimGrid_x, dim3 dimBlock_x, const REAL *aa,
const int *a_pads, const REAL *cc,
const int *c_pads, const int *d_pads,
const REAL *boundaries, REAL *d, REAL *u,
const int *u_pads, const int *dims, int ndim,
int solvedim, int start_sys, int bsize,
hipStream_t stream = nullptr) {
if (solvedim == 0) {
const int batch_offset = start_sys * a_pads[solvedim];
int y_size = 1, y_pads = 1;
if (ndim > 1) {
y_size = dims[1];
y_pads = a_pads[1];
}
trid_linear_backward_reg<REAL, INC>(
dimGrid_x, dimBlock_x, aa + batch_offset, cc + batch_offset,
d + batch_offset, u + batch_offset, boundaries + start_sys * 2,
dims[solvedim], a_pads[solvedim], bsize, start_sys, y_size, y_pads,
stream);
} else {
DIM_V k_pads, k_dims; // TODO
for (int i = 0; i < ndim; ++i) {
k_pads.v[i] = a_pads[i];
k_dims.v[i] = dims[i];
}
hipLaunchKernelGGL(( trid_strided_multidim_backward<REAL, INC>)
, dim3(dimGrid_x), dim3(dimBlock_x), 0, stream,
aa, k_pads, cc, k_pads, d, k_pads, u, k_pads, boundaries, ndim,
solvedim, bsize, k_dims, start_sys);
}
}
template <typename REAL, int INC>
void reduced_and_backward(dim3 dimGrid_x, dim3 dimBlock_x, const REAL *aa,
const int *a_pads, const REAL *cc, const int *c_pads,
const int *d_pads, REAL *boundaries, REAL *d, REAL *u,
const int *u_pads, const REAL *recv_buf_h,
REAL *recv_buf, const int *dims, int ndim,
int solvedim, int mpi_coord, int bidx, int batch_size,
int num_batches, int reduced_len_g, int sys_n,
hipStream_t stream) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
size_t recv_comm_buf_offset = 3 * reduced_len_g * batch_start;
// copy the results of the reduced systems to the boundaries array
hipMemcpyAsync(
recv_buf + recv_comm_buf_offset, recv_buf_h + recv_comm_buf_offset,
reduced_len_g * 3 * bsize * sizeof(REAL), hipMemcpyHostToDevice, stream);
#endif
// Finish the solve for batch
BEGIN_PROFILING_CUDA2("reduced", stream);
int buf_offset = 3 * reduced_len_g * batch_start;
int bound_buf_offset = 2 * batch_start;
pcr_on_reduced_batched<REAL>(recv_buf + buf_offset,
boundaries + bound_buf_offset, bsize, mpi_coord,
reduced_len_g, stream);
END_PROFILING_CUDA2("reduced", stream);
// Perform the backward run of the modified thomas algorithm
BEGIN_PROFILING_CUDA2("thomas_backward", stream);
backward_batched<REAL, INC>(dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads,
d_pads, boundaries, d, u, u_pads, dims, ndim,
solvedim, batch_start, bsize, stream);
END_PROFILING_CUDA2("thomas_backward", stream);
}
template <typename REAL, int INC>
inline void tridMultiDimBatchSolveMPI_interleaved(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr, REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// length of reduced system
const int reduced_len_l = 2;
const int reduced_len_g = reduced_len_l * params->num_mpi_procs[solvedim];
const int batch_size = ::min(params->mpi_batch_size, sys_n);
const int num_batches = 1 + (sys_n - 1) / batch_size;
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (batch_size - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
std::vector<MPI_Request> requests(num_batches);
std::vector<hipStream_t> streams(num_batches);
#ifdef TRID_NCCL
std::vector<hipEvent_t> events(num_batches);
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaSafeCall(
hipEventCreateWithFlags(&events[bidx], hipEventDisableTiming));
#endif
for (int bidx = 0; bidx < num_batches; ++bidx)
hipStreamCreate(&streams[bidx]);
END_PROFILING2("host-overhead");
for (int bidx = 0; bidx < num_batches; ++bidx) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
size_t comm_buf_size = 3 * reduced_len_l * bsize;
size_t comm_buf_offset = 3 * reduced_len_l * batch_start;
// Do modified thomas forward pass
// For the bidx-th batch
BEGIN_PROFILING_CUDA2("thomas_forward", streams[bidx]);
#ifdef TRID_NCCL
// TODO: this actually hurts in a system where p2p is enabled between all
// GPUs
// but does it help when we need to go through the network?
if (bidx != 0) // for interleaved, forward should wait for completion of
// previous forward
cudaSafeCall(hipStreamWaitEvent(streams[bidx], events[bidx - 1], 0));
#endif
forward_batched(dimGrid_x, dimBlock_x, a, a_pads, b, b_pads, c, c_pads, d,
d_pads, aa, cc, boundaries, send_buf_h, dims, ndim,
solvedim, batch_start, bsize, streams[bidx]);
END_PROFILING_CUDA2("thomas_forward", streams[bidx]);
// wait for the previous MPI transaction to finish
if (bidx != 0) {
BEGIN_PROFILING2("mpi_wait");
#ifndef TRID_NCCL
MPI_Status status;
MPI_Wait(&requests[bidx - 1], &status);
#endif
END_PROFILING2("mpi_wait");
// Finish the previous batch
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries, d,
u, u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], bidx - 1, batch_size, num_batches,
reduced_len_g, sys_n, streams[bidx - 1]);
}
#ifdef TRID_NCCL
cudaSafeCall(hipEventRecord(events[bidx], streams[bidx]));
#else
cudaSafeCall(hipStreamSynchronize(streams[bidx]));
#endif
BEGIN_PROFILING2("MPI_Iallgather");
// Send boundaries of the current batch
size_t recv_comm_buf_offset = 3 * reduced_len_g * batch_start;
#ifdef TRID_CUDA_AWARE_MPI
// Gather the reduced system to all nodes (using CUDA aware MPI)
MPI_Iallgather(boundaries + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#elif defined(TRID_NCCL)
NCCLCHECK(ncclAllGather(boundaries + comm_buf_offset,
recv_buf + recv_comm_buf_offset,
comm_buf_size * sizeof(REAL), ncclChar,
params->ncclComms[solvedim], streams[bidx]));
#else
// Communicate boundary results
MPI_Iallgather(send_buf_h + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf_h + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#endif
END_PROFILING2("MPI_Iallgather");
} // batches
BEGIN_PROFILING2("mpi_wait");
// Need to finish last batch: receive message, do reduced and backward
// wait for the last MPI transaction to finish
#ifndef TRID_NCCL
MPI_Status status;
MPI_Wait(&requests[num_batches - 1], &status);
#endif
END_PROFILING2("mpi_wait");
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries, d, u,
u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], num_batches - 1, batch_size, num_batches,
reduced_len_g, sys_n, streams[num_batches - 1]);
BEGIN_PROFILING2("host-overhead");
#ifdef TRID_NCCL
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaSafeCall(hipEventDestroy(events[bidx]));
#endif
for (int bidx = 0; bidx < num_batches; ++bidx)
hipStreamDestroy(streams[bidx]);
END_PROFILING2("host-overhead");
}
template <typename REAL, int INC>
inline void tridMultiDimBatchSolveMPI_simple(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr, REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// length of reduced system
const int reduced_len_l = 2;
const int reduced_len_g = reduced_len_l * params->num_mpi_procs[solvedim];
const int batch_size = ::min(params->mpi_batch_size, sys_n);
const int num_batches = 1 + (sys_n - 1) / batch_size;
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (batch_size - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
std::vector<MPI_Request> requests(num_batches);
std::vector<hipStream_t> streams(num_batches);
for (int bidx = 0; bidx < num_batches; ++bidx)
hipStreamCreate(&streams[bidx]);
END_PROFILING2("host-overhead");
for (int bidx = 0; bidx < num_batches; ++bidx) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
// Do modified thomas forward pass
// For the bidx-th batch
BEGIN_PROFILING_CUDA2("thomas_forward", streams[bidx]);
forward_batched(dimGrid_x, dimBlock_x, a, a_pads, b, b_pads, c, c_pads, d,
d_pads, aa, cc, boundaries, send_buf_h, dims, ndim,
solvedim, batch_start, bsize, streams[bidx]);
END_PROFILING_CUDA2("thomas_forward", streams[bidx]);
} // batches
int ready_batches = 0;
for (int bidx = 0; bidx < num_batches; ++bidx) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
#ifndef TRID_NCCL
while (hipStreamQuery(streams[bidx]) != hipSuccess &&
ready_batches != bidx) {
int finished, found_finished;
MPI_Status status;
// up until bidx all streams communicating
MPI_Testany(bidx, requests.data(), &finished, &found_finished, &status);
if (found_finished && finished != MPI_UNDEFINED) {
ready_batches++;
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries,
d, u, u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], finished, batch_size, num_batches,
reduced_len_g, sys_n, streams[finished]);
}
}
if (ready_batches == bidx) {
hipStreamSynchronize(streams[bidx]);
}
#endif
BEGIN_PROFILING2("MPI_Iallgather");
// Send boundaries of the current batch
size_t comm_buf_size = 3 * reduced_len_l * bsize;
size_t comm_buf_offset = 3 * reduced_len_l * batch_start;
size_t recv_comm_buf_offset = 3 * reduced_len_g * batch_start;
#ifdef TRID_CUDA_AWARE_MPI
// Gather the reduced system to all nodes (using CUDA aware MPI)
MPI_Iallgather(boundaries + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#elif defined(TRID_NCCL)
NCCLCHECK(ncclAllGather(boundaries + comm_buf_offset,
recv_buf + recv_comm_buf_offset,
comm_buf_size * sizeof(REAL), ncclChar,
params->ncclComms[solvedim], streams[bidx]));
#else
// Communicate boundary results
MPI_Iallgather(send_buf_h + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf_h + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#endif
END_PROFILING2("MPI_Iallgather");
} // batches
#ifndef TRID_NCCL
MPI_Status status;
#endif
for (/*ready_batches*/; ready_batches < num_batches; ++ready_batches) {
// wait for a MPI transaction to finish
BEGIN_PROFILING2("mpi_wait");
int bidx;
#ifdef TRID_NCCL
bidx = ready_batches;
#else
int rc = MPI_Waitany(requests.size(), requests.data(), &bidx, &status);
assert(rc == MPI_SUCCESS && "error MPI communication failed");
#endif
END_PROFILING2("mpi_wait");
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries, d, u,
u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], bidx, batch_size, num_batches,
reduced_len_g, sys_n, streams[bidx]);
}
BEGIN_PROFILING2("host-overhead");
for (int bidx = 0; bidx < num_batches; ++bidx)
hipStreamDestroy(streams[bidx]);
END_PROFILING2("host-overhead");
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI_allgather(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr, REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// length of reduced system
const int reduced_len_l = 2;
const int reduced_len_g = reduced_len_l * params->num_mpi_procs[solvedim];
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (sys_n - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
const size_t comm_buf_size = 2 * 3 * sys_n;
END_PROFILING2("host-overhead");
// Do modified thomas forward pass
BEGIN_PROFILING_CUDA2("thomas_forward", 0);
forward_batched(dimGrid_x, dimBlock_x, a, a_pads, b, b_pads, c, c_pads, d,
d_pads, aa, cc, boundaries, send_buf_h, dims, ndim, solvedim,
0, sys_n);
cudaSafeCall(hipDeviceSynchronize());
END_PROFILING_CUDA2("thomas_forward", 0);
BEGIN_PROFILING2("mpi_communication");
#ifdef TRID_CUDA_AWARE_MPI
// Gather the reduced system to all nodes (using CUDA aware MPI)
MPI_Allgather(boundaries, comm_buf_size, MPI_DATATYPE(REAL), recv_buf,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim]);
#elif defined(TRID_NCCL)
NCCLCHECK(ncclAllGather(boundaries, recv_buf, comm_buf_size * sizeof(REAL),
ncclChar, params->ncclComms[solvedim], 0));
#else
// Communicate boundary results
MPI_Allgather(send_buf_h, comm_buf_size, MPI_DATATYPE(REAL), recv_buf_h,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim]);
// copy the results of the reduced systems to the beginning of the boundaries
// array
hipMemcpyAsync(recv_buf, recv_buf_h,
reduced_len_g * 3 * sys_n * sizeof(REAL),
hipMemcpyHostToDevice);
#endif
END_PROFILING2("mpi_communication");
// Solve the reduced system
BEGIN_PROFILING_CUDA2("reduced", 0);
pcr_on_reduced_batched<REAL>(recv_buf, boundaries, sys_n,
params->mpi_coords[solvedim], reduced_len_g);
END_PROFILING_CUDA2("reduced", 0);
// Do the backward pass to solve for remaining unknowns
BEGIN_PROFILING_CUDA2("thomas_backward", 0);
backward_batched<REAL, INC>(dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads,
d_pads, boundaries, d, u, u_pads, dims, ndim,
solvedim, 0, sys_n);
END_PROFILING_CUDA2("thomas_backward", 0);
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI_pcr(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr,
REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (sys_n - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
END_PROFILING2("host-overhead");
// Do modified thomas forward pass
BEGIN_PROFILING_CUDA2("forward", 0);
forward_batched_pass<REAL, true>(
dimGrid_x, dimBlock_x, params, a, a_pads, b, b_pads, c, c_pads, d, d_pads,
aa, cc, boundaries, dims, ndim, solvedim, 0, sys_n);
cudaSafeCall(hipDeviceSynchronize());
END_PROFILING_CUDA2("forward", 0);
// Solve the reduced system
BEGIN_PROFILING2("reduced");
iterative_pcr_on_reduced(dimGrid_x, dimBlock_x, params, boundaries, sys_n,
solvedim, recv_buf, recv_buf_h, send_buf_h);
END_PROFILING2("reduced");
// Do the backward pass to solve for remaining unknowns
BEGIN_PROFILING_CUDA2("backward", 0);
backward_batched_pass<REAL, INC, true>(
dimGrid_x, dimBlock_x, params, aa, a_pads, cc, c_pads, boundaries, d,
d_pads, u, u_pads, dims, ndim, solvedim, 0, sys_n);
END_PROFILING_CUDA2("backward", 0);
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI_jacobi(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr,
REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (sys_n - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
END_PROFILING2("host-overhead");
// Do modified thomas forward pass
BEGIN_PROFILING_CUDA2("forward", 0);
forward_batched_pass<REAL, true, false>(
dimGrid_x, dimBlock_x, params, a, a_pads, b, b_pads, c, c_pads, d, d_pads,
aa, cc, boundaries, dims, ndim, solvedim, 0, sys_n);
cudaSafeCall(hipDeviceSynchronize());
END_PROFILING_CUDA2("forward", 0);
// Solve the reduced system
BEGIN_PROFILING2("reduced");
iterative_jacobi_on_reduced(dimGrid_x, dimBlock_x, params, boundaries, sys_n,
solvedim, recv_buf, recv_buf_h, send_buf_h);
END_PROFILING2("reduced");
// Do the backward pass to solve for remaining unknowns
BEGIN_PROFILING_CUDA2("backward", 0);
backward_batched_pass<REAL, INC, true, false>(
dimGrid_x, dimBlock_x, params, aa, a_pads, cc, c_pads, boundaries, d,
d_pads, u, u_pads, dims, ndim, solvedim, 0, sys_n);
END_PROFILING_CUDA2("backward", 0);
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI(const MpiSolverParams *params, const REAL *a,
const int *a_pads, const REAL *b,
const int *b_pads, const REAL *c,
const int *c_pads, REAL *d, const int *d_pads,
REAL *u, const int *u_pads, int ndim,
int solvedim, const int *dims) {
assert(solvedim < ndim);
static_assert(
(std::is_same<REAL, float>::value || std::is_same<REAL, double>::value),
"trid_solve_mpi: only double or float values are supported");
// The size of the equations / our domain
assert(dims[solvedim] >= 2 &&
"One of the processes has fewer than 2 equations, this is not "
"supported\n");
const int eq_stride =
std::accumulate(dims, dims + solvedim, 1, std::multiplies<int>());
// The product of the sizes along the dimensions higher than solve_dim; needed
// for the iteration later
const int outer_size = std::accumulate(dims + solvedim + 1, dims + ndim, 1,
std::multiplies<int>());
// The number of systems to solve
// const int sys_n = eq_stride * outer_size;
int sys_n = 1;
if (solvedim == 0) {
if (ndim == 2) {
sys_n = dims[1];
} else if (ndim > 2) {
sys_n = dims[ndim - 1] * std::accumulate(a_pads + solvedim + 1,
a_pads + ndim - 1, 1,
std::multiplies<int>());
}
} else {
sys_n = eq_stride * outer_size;
}
// The local length of reduced systems
const int loc_red_len = 2;
// Allocate memory used during the solve
// const int local_helper_size = outer_size * eq_stride * local_eq_size;
const int local_helper_size =
std::accumulate(a_pads, a_pads + ndim, 1, std::multiplies<int>());
REAL *aa = aa_buf.get_bytes_as<REAL>(local_helper_size * sizeof(REAL)),
*cc = cc_buf.get_bytes_as<REAL>(local_helper_size * sizeof(REAL)),
*boundaries = boundaries_buf.get_bytes_as<REAL>(sys_n * 3 * loc_red_len *
sizeof(REAL));
// Allocate receive buffer for MPI communication of reduced system
const size_t reduced_len_g = 2 * params->num_mpi_procs[solvedim];
REAL *mpi_buf = nullptr;
REAL *send_buf = nullptr, *receive_buf = nullptr;
const size_t comm_buf_size = loc_red_len * 3 * sys_n;
switch (params->strategy) {
case MpiSolverParams::LATENCY_HIDING_INTERLEAVED:
case MpiSolverParams::LATENCY_HIDING_TWO_STEP:
case MpiSolverParams::GATHER_SCATTER:
case MpiSolverParams::ALLGATHER:
mpi_buf =
mpi_buffer.get_bytes_as<REAL>(reduced_len_g * 3 * sys_n * sizeof(REAL));
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
// MPI buffers on host
send_buf = send_buffer.get_bytes_as<REAL>(comm_buf_size * sizeof(REAL));
receive_buf = receive_buffer.get_bytes_as<REAL>(
comm_buf_size * params->num_mpi_procs[solvedim] * sizeof(REAL));
#endif
break;
case MpiSolverParams::JACOBI:
mpi_buf = mpi_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
// MPI buffers on host
send_buf = send_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
#endif
receive_buf = receive_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
break;
case MpiSolverParams::PCR:
mpi_buf = mpi_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
// MPI buffers on host
send_buf = send_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
receive_buf =
receive_buffer.get_bytes_as<REAL>(2 * 3 * sys_n * sizeof(REAL));
#endif
break;
default: assert(false && "Unknown communication strategy");
}
#ifdef TRID_NCCL
// Dry-run, first call of this is quite expensive
int rank;
MPI_Comm_rank(params->communicators[solvedim], &rank);
NCCLCHECK(ncclAllGather(mpi_buf + 1 * rank, mpi_buf, sizeof(REAL), ncclChar,
params->ncclComms[solvedim], 0));
cudaSafeCall(hipDeviceSynchronize());
#endif
#if PROFILING
MPI_Barrier(MPI_COMM_WORLD);
BEGIN_PROFILING("tridMultiDimBatchSolveMPI");
#endif
const size_t offset = ((size_t)d / sizeof(REAL)) % align<REAL>;
switch (params->strategy) {
case MpiSolverParams::GATHER_SCATTER:
assert(false && "GATHER_SCATTER is not implemented for CUDA");
// break; Release mode falls back to ALLGATHER
case MpiSolverParams::ALLGATHER:
tridMultiDimBatchSolveMPI_allgather<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::JACOBI:
tridMultiDimBatchSolveMPI_jacobi<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::PCR:
tridMultiDimBatchSolveMPI_pcr<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::LATENCY_HIDING_INTERLEAVED:
tridMultiDimBatchSolveMPI_interleaved<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::LATENCY_HIDING_TWO_STEP:
tridMultiDimBatchSolveMPI_simple<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
default: assert(false && "Unknown communication strategy");
}
cudaSafeCall(hipDeviceSynchronize());
#if PROFILING
BEGIN_PROFILING2("barrier");
cudaSafeCall(hipPeekAtLastError());
cudaSafeCall(hipDeviceSynchronize());
MPI_Barrier(params->communicators[solvedim]);
END_PROFILING2("barrier");
END_PROFILING("tridMultiDimBatchSolveMPI");
#endif
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI(const MpiSolverParams *params, const REAL *a,
const REAL *b, const REAL *c, REAL *d, REAL *u,
int ndim, int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<REAL, INC>(params, a, pads, b, pads, c, pads, d,
pads, u, pads, ndim, solvedim, dims);
}
// Solve a batch of tridiagonal systems along a specified axis ('solvedim').
// 'a', 'b', 'c', 'd' are the parameters of the tridiagonal systems which must
// be stored in arrays of size 'dims' with 'ndim' dimensions. The 'pads' array
// specifies any padding used in the arrays (the total length of each dimension
// including padding).
//
// The result is written to 'd'.
tridStatus_t tridDmtsvStridedBatch(const TridParams *ctx, const double *a,
const double *b, const double *c, double *d,
int ndim, int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<double, 0>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, nullptr, ndim, solvedim, dims,
pads);
return TRID_STATUS_SUCCESS;
}
tridStatus_t tridSmtsvStridedBatch(const TridParams *ctx, const float *a,
const float *b, const float *c, float *d,
int ndim, int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<float, 0>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, nullptr, ndim, solvedim, dims,
pads);
return TRID_STATUS_SUCCESS;
}
// Solve a batch of tridiagonal systems along a specified axis ('solvedim').
// 'a', 'b', 'c', 'd' are the parameters of the tridiagonal systems which must
// be stored in arrays of size 'dims' with 'ndim' dimensions. The 'pads' array
// specifies any padding used in the arrays (the total length of each dimension
// including padding).
//
// 'u' is incremented with the results.
tridStatus_t tridDmtsvStridedBatchInc(const TridParams *ctx, const double *a,
const double *b, const double *c,
double *d, double *u, int ndim,
int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<double, 1>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, u, ndim, solvedim, dims, pads);
return TRID_STATUS_SUCCESS;
}
tridStatus_t tridSmtsvStridedBatchInc(const TridParams *ctx, const float *a,
const float *b, const float *c, float *d,
float *u, int ndim, int solvedim,
const int *dims, const int *pads) {
tridMultiDimBatchSolveMPI<float, 1>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, u, ndim, solvedim, dims, pads);
return TRID_STATUS_SUCCESS;
}
| f0f7f648c58b61ab09b4f7ad47f87319529a1bbf.cu | /*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the scalar-tridiagonal solver distribution.
*
* Copyright (c) 2015, Endre László and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Endre László may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Endre László ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Endre László BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Written by Endre Laszlo, University of Oxford, endre.laszlo@oerc.ox.ac.uk,
// 2013-2014
// With contributions from:
// Gabor Daniel Balogh, Pazmany Peter Catholic University,
// balogh.gabor.daniel@itk.ppke.hu, 2020
// Toby Flynn, University of Warwick, T.Flynn@warwick.ac.uk, 2020
// #include "trid_mpi_cuda.hpp"
#include "tridsolver.h"
#include "trid_mpi_solver_params.hpp"
#include "trid_mpi_common.hpp"
#include "trid_linear_mpi.hpp"
#include "trid_linear_mpi_reg.hpp"
#include "trid_strided_multidim_mpi.hpp"
#include "trid_cuda_mpi_pcr.hpp"
#include "trid_iterative_mpi.hpp"
#include "cutil_inline.h"
#include "cuda_timing.h"
#include <cassert>
#include <cmath>
#include <functional>
#include <initializer_list>
#include <numeric>
#include <type_traits>
#include <iostream>
namespace {
enum class memory_env { HOST, DEVICE };
template <memory_env mem_env> struct mem_buffer {
size_t size = 0; /*<< size of the buffer in bytes */
char *buffer = nullptr; /*<< pointer to memory in mem_env */
void free() {
if (buffer) {
if (mem_env == memory_env::DEVICE) {
cudaFree(buffer);
} else {
cudaFreeHost(buffer);
}
buffer = nullptr;
size = 0;
}
}
template <typename REAL> REAL *get_bytes_as(size_t bytes) {
if (size < bytes) {
free();
if (mem_env == memory_env::DEVICE) {
cudaSafeCall(cudaMalloc(&buffer, bytes));
} else {
cudaSafeCall(cudaMallocHost(&buffer, bytes));
}
size = bytes;
}
return reinterpret_cast<REAL *>(buffer);
}
~mem_buffer() { free(); }
mem_buffer() noexcept = default;
mem_buffer(const mem_buffer &) = delete;
mem_buffer &operator=(const mem_buffer &) = delete;
mem_buffer(const mem_buffer &&) = delete;
mem_buffer &operator=(mem_buffer &&) = delete;
};
mem_buffer<memory_env::DEVICE> aa_buf, cc_buf, boundaries_buf, mpi_buffer;
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
mem_buffer<memory_env::HOST> send_buffer;
#endif
mem_buffer<memory_env::HOST> receive_buffer;
} // namespace
template <typename REAL>
inline void forward_batched(dim3 dimGrid_x, dim3 dimBlock_x, const REAL *a,
const int *a_pads, const REAL *b, const int *b_pads,
const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *aa, REAL *cc,
REAL *boundaries, REAL *send_buf_h, const int *dims,
int ndim, int solvedim, int start_sys, int bsize,
cudaStream_t stream = nullptr) {
if (solvedim == 0) {
const int batch_offset = start_sys * a_pads[solvedim]; // TODO pads
int y_size = 1, y_pads = 1;
if (ndim > 1) {
y_size = dims[1];
y_pads = a_pads[1];
}
trid_linear_forward_reg(
dimGrid_x, dimBlock_x, a + batch_offset, b + batch_offset,
c + batch_offset, d + batch_offset, aa + batch_offset,
cc + batch_offset, boundaries + start_sys * 3 * 2, dims[solvedim],
a_pads[solvedim], bsize, start_sys, y_size, y_pads, stream);
} else {
DIM_V k_pads, k_dims; // TODO
for (int i = 0; i < ndim; ++i) {
k_pads.v[i] = a_pads[i];
k_dims.v[i] = dims[i];
}
trid_strided_multidim_forward<REAL><<<dimGrid_x, dimBlock_x, 0, stream>>>(
a, k_pads, b, k_pads, c, k_pads, d, k_pads, aa, cc, boundaries, ndim,
solvedim, bsize, k_dims, start_sys);
}
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
size_t comm_buf_size = 3 * 2 * bsize;
size_t comm_buf_offset = 3 * 2 * start_sys;
cudaMemcpyAsync(send_buf_h + comm_buf_offset, boundaries + comm_buf_offset,
sizeof(REAL) * comm_buf_size, cudaMemcpyDeviceToHost, stream);
#endif
}
template <typename REAL, int INC>
inline void backward_batched(dim3 dimGrid_x, dim3 dimBlock_x, const REAL *aa,
const int *a_pads, const REAL *cc,
const int *c_pads, const int *d_pads,
const REAL *boundaries, REAL *d, REAL *u,
const int *u_pads, const int *dims, int ndim,
int solvedim, int start_sys, int bsize,
cudaStream_t stream = nullptr) {
if (solvedim == 0) {
const int batch_offset = start_sys * a_pads[solvedim];
int y_size = 1, y_pads = 1;
if (ndim > 1) {
y_size = dims[1];
y_pads = a_pads[1];
}
trid_linear_backward_reg<REAL, INC>(
dimGrid_x, dimBlock_x, aa + batch_offset, cc + batch_offset,
d + batch_offset, u + batch_offset, boundaries + start_sys * 2,
dims[solvedim], a_pads[solvedim], bsize, start_sys, y_size, y_pads,
stream);
} else {
DIM_V k_pads, k_dims; // TODO
for (int i = 0; i < ndim; ++i) {
k_pads.v[i] = a_pads[i];
k_dims.v[i] = dims[i];
}
trid_strided_multidim_backward<REAL, INC>
<<<dimGrid_x, dimBlock_x, 0, stream>>>(
aa, k_pads, cc, k_pads, d, k_pads, u, k_pads, boundaries, ndim,
solvedim, bsize, k_dims, start_sys);
}
}
template <typename REAL, int INC>
void reduced_and_backward(dim3 dimGrid_x, dim3 dimBlock_x, const REAL *aa,
const int *a_pads, const REAL *cc, const int *c_pads,
const int *d_pads, REAL *boundaries, REAL *d, REAL *u,
const int *u_pads, const REAL *recv_buf_h,
REAL *recv_buf, const int *dims, int ndim,
int solvedim, int mpi_coord, int bidx, int batch_size,
int num_batches, int reduced_len_g, int sys_n,
cudaStream_t stream) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
size_t recv_comm_buf_offset = 3 * reduced_len_g * batch_start;
// copy the results of the reduced systems to the boundaries array
cudaMemcpyAsync(
recv_buf + recv_comm_buf_offset, recv_buf_h + recv_comm_buf_offset,
reduced_len_g * 3 * bsize * sizeof(REAL), cudaMemcpyHostToDevice, stream);
#endif
// Finish the solve for batch
BEGIN_PROFILING_CUDA2("reduced", stream);
int buf_offset = 3 * reduced_len_g * batch_start;
int bound_buf_offset = 2 * batch_start;
pcr_on_reduced_batched<REAL>(recv_buf + buf_offset,
boundaries + bound_buf_offset, bsize, mpi_coord,
reduced_len_g, stream);
END_PROFILING_CUDA2("reduced", stream);
// Perform the backward run of the modified thomas algorithm
BEGIN_PROFILING_CUDA2("thomas_backward", stream);
backward_batched<REAL, INC>(dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads,
d_pads, boundaries, d, u, u_pads, dims, ndim,
solvedim, batch_start, bsize, stream);
END_PROFILING_CUDA2("thomas_backward", stream);
}
template <typename REAL, int INC>
inline void tridMultiDimBatchSolveMPI_interleaved(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr, REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// length of reduced system
const int reduced_len_l = 2;
const int reduced_len_g = reduced_len_l * params->num_mpi_procs[solvedim];
const int batch_size = std::min(params->mpi_batch_size, sys_n);
const int num_batches = 1 + (sys_n - 1) / batch_size;
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (batch_size - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
std::vector<MPI_Request> requests(num_batches);
std::vector<cudaStream_t> streams(num_batches);
#ifdef TRID_NCCL
std::vector<cudaEvent_t> events(num_batches);
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaSafeCall(
cudaEventCreateWithFlags(&events[bidx], cudaEventDisableTiming));
#endif
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaStreamCreate(&streams[bidx]);
END_PROFILING2("host-overhead");
for (int bidx = 0; bidx < num_batches; ++bidx) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
size_t comm_buf_size = 3 * reduced_len_l * bsize;
size_t comm_buf_offset = 3 * reduced_len_l * batch_start;
// Do modified thomas forward pass
// For the bidx-th batch
BEGIN_PROFILING_CUDA2("thomas_forward", streams[bidx]);
#ifdef TRID_NCCL
// TODO: this actually hurts in a system where p2p is enabled between all
// GPUs
// but does it help when we need to go through the network?
if (bidx != 0) // for interleaved, forward should wait for completion of
// previous forward
cudaSafeCall(cudaStreamWaitEvent(streams[bidx], events[bidx - 1], 0));
#endif
forward_batched(dimGrid_x, dimBlock_x, a, a_pads, b, b_pads, c, c_pads, d,
d_pads, aa, cc, boundaries, send_buf_h, dims, ndim,
solvedim, batch_start, bsize, streams[bidx]);
END_PROFILING_CUDA2("thomas_forward", streams[bidx]);
// wait for the previous MPI transaction to finish
if (bidx != 0) {
BEGIN_PROFILING2("mpi_wait");
#ifndef TRID_NCCL
MPI_Status status;
MPI_Wait(&requests[bidx - 1], &status);
#endif
END_PROFILING2("mpi_wait");
// Finish the previous batch
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries, d,
u, u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], bidx - 1, batch_size, num_batches,
reduced_len_g, sys_n, streams[bidx - 1]);
}
#ifdef TRID_NCCL
cudaSafeCall(cudaEventRecord(events[bidx], streams[bidx]));
#else
cudaSafeCall(cudaStreamSynchronize(streams[bidx]));
#endif
BEGIN_PROFILING2("MPI_Iallgather");
// Send boundaries of the current batch
size_t recv_comm_buf_offset = 3 * reduced_len_g * batch_start;
#ifdef TRID_CUDA_AWARE_MPI
// Gather the reduced system to all nodes (using CUDA aware MPI)
MPI_Iallgather(boundaries + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#elif defined(TRID_NCCL)
NCCLCHECK(ncclAllGather(boundaries + comm_buf_offset,
recv_buf + recv_comm_buf_offset,
comm_buf_size * sizeof(REAL), ncclChar,
params->ncclComms[solvedim], streams[bidx]));
#else
// Communicate boundary results
MPI_Iallgather(send_buf_h + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf_h + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#endif
END_PROFILING2("MPI_Iallgather");
} // batches
BEGIN_PROFILING2("mpi_wait");
// Need to finish last batch: receive message, do reduced and backward
// wait for the last MPI transaction to finish
#ifndef TRID_NCCL
MPI_Status status;
MPI_Wait(&requests[num_batches - 1], &status);
#endif
END_PROFILING2("mpi_wait");
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries, d, u,
u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], num_batches - 1, batch_size, num_batches,
reduced_len_g, sys_n, streams[num_batches - 1]);
BEGIN_PROFILING2("host-overhead");
#ifdef TRID_NCCL
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaSafeCall(cudaEventDestroy(events[bidx]));
#endif
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaStreamDestroy(streams[bidx]);
END_PROFILING2("host-overhead");
}
template <typename REAL, int INC>
inline void tridMultiDimBatchSolveMPI_simple(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr, REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// length of reduced system
const int reduced_len_l = 2;
const int reduced_len_g = reduced_len_l * params->num_mpi_procs[solvedim];
const int batch_size = std::min(params->mpi_batch_size, sys_n);
const int num_batches = 1 + (sys_n - 1) / batch_size;
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (batch_size - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
std::vector<MPI_Request> requests(num_batches);
std::vector<cudaStream_t> streams(num_batches);
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaStreamCreate(&streams[bidx]);
END_PROFILING2("host-overhead");
for (int bidx = 0; bidx < num_batches; ++bidx) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
// Do modified thomas forward pass
// For the bidx-th batch
BEGIN_PROFILING_CUDA2("thomas_forward", streams[bidx]);
forward_batched(dimGrid_x, dimBlock_x, a, a_pads, b, b_pads, c, c_pads, d,
d_pads, aa, cc, boundaries, send_buf_h, dims, ndim,
solvedim, batch_start, bsize, streams[bidx]);
END_PROFILING_CUDA2("thomas_forward", streams[bidx]);
} // batches
int ready_batches = 0;
for (int bidx = 0; bidx < num_batches; ++bidx) {
int batch_start = bidx * batch_size;
int bsize = bidx == num_batches - 1 ? sys_n - batch_start : batch_size;
#ifndef TRID_NCCL
while (cudaStreamQuery(streams[bidx]) != cudaSuccess &&
ready_batches != bidx) {
int finished, found_finished;
MPI_Status status;
// up until bidx all streams communicating
MPI_Testany(bidx, requests.data(), &finished, &found_finished, &status);
if (found_finished && finished != MPI_UNDEFINED) {
ready_batches++;
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries,
d, u, u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], finished, batch_size, num_batches,
reduced_len_g, sys_n, streams[finished]);
}
}
if (ready_batches == bidx) {
cudaStreamSynchronize(streams[bidx]);
}
#endif
BEGIN_PROFILING2("MPI_Iallgather");
// Send boundaries of the current batch
size_t comm_buf_size = 3 * reduced_len_l * bsize;
size_t comm_buf_offset = 3 * reduced_len_l * batch_start;
size_t recv_comm_buf_offset = 3 * reduced_len_g * batch_start;
#ifdef TRID_CUDA_AWARE_MPI
// Gather the reduced system to all nodes (using CUDA aware MPI)
MPI_Iallgather(boundaries + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#elif defined(TRID_NCCL)
NCCLCHECK(ncclAllGather(boundaries + comm_buf_offset,
recv_buf + recv_comm_buf_offset,
comm_buf_size * sizeof(REAL), ncclChar,
params->ncclComms[solvedim], streams[bidx]));
#else
// Communicate boundary results
MPI_Iallgather(send_buf_h + comm_buf_offset, comm_buf_size,
MPI_DATATYPE(REAL), recv_buf_h + recv_comm_buf_offset,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim], &requests[bidx]);
#endif
END_PROFILING2("MPI_Iallgather");
} // batches
#ifndef TRID_NCCL
MPI_Status status;
#endif
for (/*ready_batches*/; ready_batches < num_batches; ++ready_batches) {
// wait for a MPI transaction to finish
BEGIN_PROFILING2("mpi_wait");
int bidx;
#ifdef TRID_NCCL
bidx = ready_batches;
#else
int rc = MPI_Waitany(requests.size(), requests.data(), &bidx, &status);
assert(rc == MPI_SUCCESS && "error MPI communication failed");
#endif
END_PROFILING2("mpi_wait");
reduced_and_backward<REAL, INC>(
dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads, d_pads, boundaries, d, u,
u_pads, recv_buf_h, recv_buf, dims, ndim, solvedim,
params->mpi_coords[solvedim], bidx, batch_size, num_batches,
reduced_len_g, sys_n, streams[bidx]);
}
BEGIN_PROFILING2("host-overhead");
for (int bidx = 0; bidx < num_batches; ++bidx)
cudaStreamDestroy(streams[bidx]);
END_PROFILING2("host-overhead");
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI_allgather(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr, REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// length of reduced system
const int reduced_len_l = 2;
const int reduced_len_g = reduced_len_l * params->num_mpi_procs[solvedim];
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (sys_n - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
const size_t comm_buf_size = 2 * 3 * sys_n;
END_PROFILING2("host-overhead");
// Do modified thomas forward pass
BEGIN_PROFILING_CUDA2("thomas_forward", 0);
forward_batched(dimGrid_x, dimBlock_x, a, a_pads, b, b_pads, c, c_pads, d,
d_pads, aa, cc, boundaries, send_buf_h, dims, ndim, solvedim,
0, sys_n);
cudaSafeCall(cudaDeviceSynchronize());
END_PROFILING_CUDA2("thomas_forward", 0);
BEGIN_PROFILING2("mpi_communication");
#ifdef TRID_CUDA_AWARE_MPI
// Gather the reduced system to all nodes (using CUDA aware MPI)
MPI_Allgather(boundaries, comm_buf_size, MPI_DATATYPE(REAL), recv_buf,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim]);
#elif defined(TRID_NCCL)
NCCLCHECK(ncclAllGather(boundaries, recv_buf, comm_buf_size * sizeof(REAL),
ncclChar, params->ncclComms[solvedim], 0));
#else
// Communicate boundary results
MPI_Allgather(send_buf_h, comm_buf_size, MPI_DATATYPE(REAL), recv_buf_h,
comm_buf_size, MPI_DATATYPE(REAL),
params->communicators[solvedim]);
// copy the results of the reduced systems to the beginning of the boundaries
// array
cudaMemcpyAsync(recv_buf, recv_buf_h,
reduced_len_g * 3 * sys_n * sizeof(REAL),
cudaMemcpyHostToDevice);
#endif
END_PROFILING2("mpi_communication");
// Solve the reduced system
BEGIN_PROFILING_CUDA2("reduced", 0);
pcr_on_reduced_batched<REAL>(recv_buf, boundaries, sys_n,
params->mpi_coords[solvedim], reduced_len_g);
END_PROFILING_CUDA2("reduced", 0);
// Do the backward pass to solve for remaining unknowns
BEGIN_PROFILING_CUDA2("thomas_backward", 0);
backward_batched<REAL, INC>(dimGrid_x, dimBlock_x, aa, a_pads, cc, c_pads,
d_pads, boundaries, d, u, u_pads, dims, ndim,
solvedim, 0, sys_n);
END_PROFILING_CUDA2("thomas_backward", 0);
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI_pcr(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr,
REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (sys_n - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
END_PROFILING2("host-overhead");
// Do modified thomas forward pass
BEGIN_PROFILING_CUDA2("forward", 0);
forward_batched_pass<REAL, true>(
dimGrid_x, dimBlock_x, params, a, a_pads, b, b_pads, c, c_pads, d, d_pads,
aa, cc, boundaries, dims, ndim, solvedim, 0, sys_n);
cudaSafeCall(cudaDeviceSynchronize());
END_PROFILING_CUDA2("forward", 0);
// Solve the reduced system
BEGIN_PROFILING2("reduced");
iterative_pcr_on_reduced(dimGrid_x, dimBlock_x, params, boundaries, sys_n,
solvedim, recv_buf, recv_buf_h, send_buf_h);
END_PROFILING2("reduced");
// Do the backward pass to solve for remaining unknowns
BEGIN_PROFILING_CUDA2("backward", 0);
backward_batched_pass<REAL, INC, true>(
dimGrid_x, dimBlock_x, params, aa, a_pads, cc, c_pads, boundaries, d,
d_pads, u, u_pads, dims, ndim, solvedim, 0, sys_n);
END_PROFILING_CUDA2("backward", 0);
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI_jacobi(
const MpiSolverParams *params, const REAL *a, const int *a_pads,
const REAL *b, const int *b_pads, const REAL *c, const int *c_pads, REAL *d,
const int *d_pads, REAL *u, const int *u_pads, int ndim, int solvedim,
const int *dims, REAL *aa, REAL *cc, REAL *boundaries, REAL *recv_buf,
int sys_n, REAL *send_buf_h = nullptr,
REAL *recv_buf_h = nullptr) {
BEGIN_PROFILING2("host-overhead");
// Calculate required number of CUDA threads and blocksS
int blockdimx = 128;
int blockdimy = 1;
int dimgrid = 1 + (sys_n - 1) / blockdimx; // can go up to 65535
int dimgridx = dimgrid % 65536; // can go up to max 65535 on Fermi
int dimgridy = 1 + dimgrid / 65536;
dim3 dimGrid_x(dimgridx, dimgridy);
dim3 dimBlock_x(blockdimx, blockdimy);
END_PROFILING2("host-overhead");
// Do modified thomas forward pass
BEGIN_PROFILING_CUDA2("forward", 0);
forward_batched_pass<REAL, true, false>(
dimGrid_x, dimBlock_x, params, a, a_pads, b, b_pads, c, c_pads, d, d_pads,
aa, cc, boundaries, dims, ndim, solvedim, 0, sys_n);
cudaSafeCall(cudaDeviceSynchronize());
END_PROFILING_CUDA2("forward", 0);
// Solve the reduced system
BEGIN_PROFILING2("reduced");
iterative_jacobi_on_reduced(dimGrid_x, dimBlock_x, params, boundaries, sys_n,
solvedim, recv_buf, recv_buf_h, send_buf_h);
END_PROFILING2("reduced");
// Do the backward pass to solve for remaining unknowns
BEGIN_PROFILING_CUDA2("backward", 0);
backward_batched_pass<REAL, INC, true, false>(
dimGrid_x, dimBlock_x, params, aa, a_pads, cc, c_pads, boundaries, d,
d_pads, u, u_pads, dims, ndim, solvedim, 0, sys_n);
END_PROFILING_CUDA2("backward", 0);
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI(const MpiSolverParams *params, const REAL *a,
const int *a_pads, const REAL *b,
const int *b_pads, const REAL *c,
const int *c_pads, REAL *d, const int *d_pads,
REAL *u, const int *u_pads, int ndim,
int solvedim, const int *dims) {
assert(solvedim < ndim);
static_assert(
(std::is_same<REAL, float>::value || std::is_same<REAL, double>::value),
"trid_solve_mpi: only double or float values are supported");
// The size of the equations / our domain
assert(dims[solvedim] >= 2 &&
"One of the processes has fewer than 2 equations, this is not "
"supported\n");
const int eq_stride =
std::accumulate(dims, dims + solvedim, 1, std::multiplies<int>());
// The product of the sizes along the dimensions higher than solve_dim; needed
// for the iteration later
const int outer_size = std::accumulate(dims + solvedim + 1, dims + ndim, 1,
std::multiplies<int>());
// The number of systems to solve
// const int sys_n = eq_stride * outer_size;
int sys_n = 1;
if (solvedim == 0) {
if (ndim == 2) {
sys_n = dims[1];
} else if (ndim > 2) {
sys_n = dims[ndim - 1] * std::accumulate(a_pads + solvedim + 1,
a_pads + ndim - 1, 1,
std::multiplies<int>());
}
} else {
sys_n = eq_stride * outer_size;
}
// The local length of reduced systems
const int loc_red_len = 2;
// Allocate memory used during the solve
// const int local_helper_size = outer_size * eq_stride * local_eq_size;
const int local_helper_size =
std::accumulate(a_pads, a_pads + ndim, 1, std::multiplies<int>());
REAL *aa = aa_buf.get_bytes_as<REAL>(local_helper_size * sizeof(REAL)),
*cc = cc_buf.get_bytes_as<REAL>(local_helper_size * sizeof(REAL)),
*boundaries = boundaries_buf.get_bytes_as<REAL>(sys_n * 3 * loc_red_len *
sizeof(REAL));
// Allocate receive buffer for MPI communication of reduced system
const size_t reduced_len_g = 2 * params->num_mpi_procs[solvedim];
REAL *mpi_buf = nullptr;
REAL *send_buf = nullptr, *receive_buf = nullptr;
const size_t comm_buf_size = loc_red_len * 3 * sys_n;
switch (params->strategy) {
case MpiSolverParams::LATENCY_HIDING_INTERLEAVED:
case MpiSolverParams::LATENCY_HIDING_TWO_STEP:
case MpiSolverParams::GATHER_SCATTER:
case MpiSolverParams::ALLGATHER:
mpi_buf =
mpi_buffer.get_bytes_as<REAL>(reduced_len_g * 3 * sys_n * sizeof(REAL));
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
// MPI buffers on host
send_buf = send_buffer.get_bytes_as<REAL>(comm_buf_size * sizeof(REAL));
receive_buf = receive_buffer.get_bytes_as<REAL>(
comm_buf_size * params->num_mpi_procs[solvedim] * sizeof(REAL));
#endif
break;
case MpiSolverParams::JACOBI:
mpi_buf = mpi_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
// MPI buffers on host
send_buf = send_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
#endif
receive_buf = receive_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
break;
case MpiSolverParams::PCR:
mpi_buf = mpi_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
#if !(defined(TRID_CUDA_AWARE_MPI) || defined(TRID_NCCL))
// MPI buffers on host
send_buf = send_buffer.get_bytes_as<REAL>(3 * sys_n * sizeof(REAL));
receive_buf =
receive_buffer.get_bytes_as<REAL>(2 * 3 * sys_n * sizeof(REAL));
#endif
break;
default: assert(false && "Unknown communication strategy");
}
#ifdef TRID_NCCL
// Dry-run, first call of this is quite expensive
int rank;
MPI_Comm_rank(params->communicators[solvedim], &rank);
NCCLCHECK(ncclAllGather(mpi_buf + 1 * rank, mpi_buf, sizeof(REAL), ncclChar,
params->ncclComms[solvedim], 0));
cudaSafeCall(cudaDeviceSynchronize());
#endif
#if PROFILING
MPI_Barrier(MPI_COMM_WORLD);
BEGIN_PROFILING("tridMultiDimBatchSolveMPI");
#endif
const size_t offset = ((size_t)d / sizeof(REAL)) % align<REAL>;
switch (params->strategy) {
case MpiSolverParams::GATHER_SCATTER:
assert(false && "GATHER_SCATTER is not implemented for CUDA");
// break; Release mode falls back to ALLGATHER
case MpiSolverParams::ALLGATHER:
tridMultiDimBatchSolveMPI_allgather<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::JACOBI:
tridMultiDimBatchSolveMPI_jacobi<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::PCR:
tridMultiDimBatchSolveMPI_pcr<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::LATENCY_HIDING_INTERLEAVED:
tridMultiDimBatchSolveMPI_interleaved<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
case MpiSolverParams::LATENCY_HIDING_TWO_STEP:
tridMultiDimBatchSolveMPI_simple<REAL, INC>(
params, a, a_pads, b, b_pads, c, c_pads, d, d_pads, u, u_pads, ndim,
solvedim, dims, aa + offset, cc + offset, boundaries, mpi_buf, sys_n, send_buf,
receive_buf);
break;
default: assert(false && "Unknown communication strategy");
}
cudaSafeCall(cudaDeviceSynchronize());
#if PROFILING
BEGIN_PROFILING2("barrier");
cudaSafeCall(cudaPeekAtLastError());
cudaSafeCall(cudaDeviceSynchronize());
MPI_Barrier(params->communicators[solvedim]);
END_PROFILING2("barrier");
END_PROFILING("tridMultiDimBatchSolveMPI");
#endif
}
template <typename REAL, int INC>
void tridMultiDimBatchSolveMPI(const MpiSolverParams *params, const REAL *a,
const REAL *b, const REAL *c, REAL *d, REAL *u,
int ndim, int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<REAL, INC>(params, a, pads, b, pads, c, pads, d,
pads, u, pads, ndim, solvedim, dims);
}
// Solve a batch of tridiagonal systems along a specified axis ('solvedim').
// 'a', 'b', 'c', 'd' are the parameters of the tridiagonal systems which must
// be stored in arrays of size 'dims' with 'ndim' dimensions. The 'pads' array
// specifies any padding used in the arrays (the total length of each dimension
// including padding).
//
// The result is written to 'd'.
tridStatus_t tridDmtsvStridedBatch(const TridParams *ctx, const double *a,
const double *b, const double *c, double *d,
int ndim, int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<double, 0>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, nullptr, ndim, solvedim, dims,
pads);
return TRID_STATUS_SUCCESS;
}
tridStatus_t tridSmtsvStridedBatch(const TridParams *ctx, const float *a,
const float *b, const float *c, float *d,
int ndim, int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<float, 0>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, nullptr, ndim, solvedim, dims,
pads);
return TRID_STATUS_SUCCESS;
}
// Solve a batch of tridiagonal systems along a specified axis ('solvedim').
// 'a', 'b', 'c', 'd' are the parameters of the tridiagonal systems which must
// be stored in arrays of size 'dims' with 'ndim' dimensions. The 'pads' array
// specifies any padding used in the arrays (the total length of each dimension
// including padding).
//
// 'u' is incremented with the results.
tridStatus_t tridDmtsvStridedBatchInc(const TridParams *ctx, const double *a,
const double *b, const double *c,
double *d, double *u, int ndim,
int solvedim, const int *dims,
const int *pads) {
tridMultiDimBatchSolveMPI<double, 1>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, u, ndim, solvedim, dims, pads);
return TRID_STATUS_SUCCESS;
}
tridStatus_t tridSmtsvStridedBatchInc(const TridParams *ctx, const float *a,
const float *b, const float *c, float *d,
float *u, int ndim, int solvedim,
const int *dims, const int *pads) {
tridMultiDimBatchSolveMPI<float, 1>((MpiSolverParams *)ctx->mpi_params, a, b,
c, d, u, ndim, solvedim, dims, pads);
return TRID_STATUS_SUCCESS;
}
|
1630affe59925ec5a26276c124f937fdea800036.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_2d_layer_hessian_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../max_subsampling_layer.h"
#include "../nn_types.h"
struct __align__(4) window_x_x_config
{
window_x_x_config(int window_x, int x)
{
this->window_x_x_pair = (((unsigned int)window_x) << 16) | (unsigned int)x;
}
unsigned int window_x_x_pair;
};
struct __align__(4) y_feature_map_config
{
y_feature_map_config(int y, int feature_map_id)
{
this->y_feature_map_id_pair = (((unsigned int)y) << 16) | (unsigned int)feature_map_id;
}
unsigned int y_feature_map_id_pair;
};
struct __align__(4) x_y_config
{
x_y_config(int x, int y)
{
this->x_y_pair = (((unsigned int)x) << 16) | (unsigned int)y;
}
unsigned int x_y_pair;
};
extern __shared__ float arr_sh[];
__global__ void max_subsampling_2d_tex_hess_kernel(
float * __restrict output,
x_y_config * __restrict max_positions,
const float * __restrict input,
const window_x_x_config * __restrict window_x_x_config_list,
const y_feature_map_config * __restrict y_feature_map_config_list,
int subsampling_width,
int subsampling_height,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count,
int window_x_x_config_count,
int y_feature_map_config_count)
{
int window_x_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int local_thread_id = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
int threadblock_size = blockDim.z * blockDim.y * blockDim.x;
float * vals = arr_sh;
int * max_pos_y_list = (int *)(vals + threadblock_size);
bool in_bounds = (entry_id < entry_count) && (window_x_x_config_id < window_x_x_config_count) && (feature_map_config_id < y_feature_map_config_count);
float res = -1.0e37F;
int max_pos_y;
int window_x;
int output_x;
int output_y;
int feature_map_id;
if (in_bounds)
{
window_x_x_config wxx = window_x_x_config_list[window_x_x_config_id];
output_x = wxx.window_x_x_pair & 0xFFFF;
window_x = wxx.window_x_x_pair >> 16;
y_feature_map_config yfm = y_feature_map_config_list[feature_map_config_id];
feature_map_id = yfm.y_feature_map_id_pair & 0xFFFF;
output_y = yfm.y_feature_map_id_pair >> 16;
int input_x = output_x * subsampling_width + window_x;
int input_y = output_y * subsampling_height;
int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
res = input[current_input_elem_id];
max_pos_y = 0;
for(int j = 1; j < subsampling_height; ++j)
{
current_input_elem_id += input_width;
float new_val = input[current_input_elem_id];
if (new_val > res)
{
res = new_val;
max_pos_y = j;
}
}
vals[local_thread_id] = res;
max_pos_y_list[local_thread_id] = max_pos_y;
}
__syncthreads();
if (in_bounds && (window_x == 0))
{
int max_pos_x = 0;
for(int j = 1; j < subsampling_width; ++j)
{
local_thread_id++;
float new_val = vals[local_thread_id];
int new_max_pos_y = max_pos_y_list[local_thread_id];
if (new_val > res)
{
res = new_val;
max_pos_x = j;
max_pos_y = new_max_pos_y;
}
}
int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x;
output[offset] = res;
max_positions[offset].x_y_pair = (max_pos_x << 16) | max_pos_y;
}
}
__global__ void max_subsampling_2d_square_deriviative_hess_kernel(
float * __restrict input_errors,
const x_y_config * __restrict max_positions,
const float * __restrict output_errors,
const x_y_config * __restrict x_y_config_list,
int subsampling_width,
int subsampling_height,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count,
int x_y_config_count)
{
int x_y_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x_y_config_id < x_y_config_count) && (feature_map_id < feature_map_count);
if (in_bounds)
{
x_y_config xy = x_y_config_list[x_y_config_id];
int output_x = xy.x_y_pair >> 16;
int output_y = xy.x_y_pair & 0xFFFF;
int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x;
float output_error = output_errors[offset];
x_y_config max_pos_xy = max_positions[offset];
int max_pos_x = max_pos_xy.x_y_pair >> 16;
int max_pos_y = max_pos_xy.x_y_pair & 0xFFFF;
int input_x = output_x * subsampling_width + max_pos_x;
int input_y = output_y * subsampling_height + max_pos_y;
int input_offset = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
input_errors[input_offset] = output_error;
}
}
namespace nnforge
{
namespace cuda
{
max_subsampling_2d_layer_hessian_cuda::max_subsampling_2d_layer_hessian_cuda()
{
}
max_subsampling_2d_layer_hessian_cuda::~max_subsampling_2d_layer_hessian_cuda()
{
}
void max_subsampling_2d_layer_hessian_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
const float * input = *input_neurons_buffer;
float * output = *output_neurons_buffer;
x_y_config * max_positions = (x_y_config *)((void *)(*additional_buffers[0]));
int window_x_x_config_count = subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0];
const window_x_x_config * window_x_x_config_list = static_cast<const window_x_x_config *>((const void *)*additional_buffers[1]);
int y_feature_map_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count;
const y_feature_map_config * y_feature_map_config_list = static_cast<const y_feature_map_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
window_x_x_config_count,
y_feature_map_config_count,
entry_count,
subsampling_sizes[0]);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * (sizeof(float) + sizeof(int));
hipLaunchKernelGGL(( max_subsampling_2d_tex_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
output,
max_positions,
input,
window_x_x_config_list,
y_feature_map_config_list,
subsampling_sizes[0],
subsampling_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count,
window_x_x_config_count,
y_feature_map_config_count);
}
void max_subsampling_2d_layer_hessian_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
const float * output_errors = *output_errors_buffer;
const x_y_config * max_positions = (const x_y_config *)((void *)(*additional_buffers[0]));
float * input_errors = *input_errors_buffer;
int x_y_config_count = output_configuration_specific.dimension_sizes[0] * output_configuration_specific.dimension_sizes[1];
const x_y_config * x_y_config_list = static_cast<const x_y_config *>((const void *)*additional_buffers[3]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
x_y_config_count,
output_configuration_specific.feature_map_count,
entry_count);
hipLaunchKernelGGL(( max_subsampling_2d_square_deriviative_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
input_errors,
max_positions,
output_errors,
x_y_config_list,
subsampling_sizes[0],
subsampling_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count,
x_y_config_count);
}
void max_subsampling_2d_layer_hessian_cuda::hessian_configured()
{
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
subsampling_sizes = layer_derived->subsampling_sizes;
}
bool max_subsampling_2d_layer_hessian_cuda::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> max_subsampling_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(x_y_config));
return res;
}
std::vector<size_t> max_subsampling_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(window_x_x_config) * subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]);
res.push_back(sizeof(y_feature_map_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count);
res.push_back(sizeof(x_y_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[0]);
return res;
}
void max_subsampling_2d_layer_hessian_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<window_x_x_config> task_list;
for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x)
for(int window_x = 0; window_x < subsampling_sizes[0]; ++window_x)
task_list.push_back(window_x_x_config(window_x, x));
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(window_x_x_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<y_feature_map_config> task_list;
for(int feature_map_id = 0; feature_map_id < output_configuration_specific.feature_map_count; ++feature_map_id)
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
task_list.push_back(y_feature_map_config(y, feature_map_id));
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(y_feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<x_y_config> task_list;
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x)
task_list.push_back(x_y_config(x, y));
cuda_safe_call(hipMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(x_y_config) * task_list.size(), hipMemcpyHostToDevice));
}
}
}
}
| 1630affe59925ec5a26276c124f937fdea800036.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_2d_layer_hessian_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../max_subsampling_layer.h"
#include "../nn_types.h"
struct __align__(4) window_x_x_config
{
window_x_x_config(int window_x, int x)
{
this->window_x_x_pair = (((unsigned int)window_x) << 16) | (unsigned int)x;
}
unsigned int window_x_x_pair;
};
struct __align__(4) y_feature_map_config
{
y_feature_map_config(int y, int feature_map_id)
{
this->y_feature_map_id_pair = (((unsigned int)y) << 16) | (unsigned int)feature_map_id;
}
unsigned int y_feature_map_id_pair;
};
struct __align__(4) x_y_config
{
x_y_config(int x, int y)
{
this->x_y_pair = (((unsigned int)x) << 16) | (unsigned int)y;
}
unsigned int x_y_pair;
};
extern __shared__ float arr_sh[];
__global__ void max_subsampling_2d_tex_hess_kernel(
float * __restrict output,
x_y_config * __restrict max_positions,
const float * __restrict input,
const window_x_x_config * __restrict window_x_x_config_list,
const y_feature_map_config * __restrict y_feature_map_config_list,
int subsampling_width,
int subsampling_height,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count,
int window_x_x_config_count,
int y_feature_map_config_count)
{
int window_x_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int local_thread_id = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
int threadblock_size = blockDim.z * blockDim.y * blockDim.x;
float * vals = arr_sh;
int * max_pos_y_list = (int *)(vals + threadblock_size);
bool in_bounds = (entry_id < entry_count) && (window_x_x_config_id < window_x_x_config_count) && (feature_map_config_id < y_feature_map_config_count);
float res = -1.0e37F;
int max_pos_y;
int window_x;
int output_x;
int output_y;
int feature_map_id;
if (in_bounds)
{
window_x_x_config wxx = window_x_x_config_list[window_x_x_config_id];
output_x = wxx.window_x_x_pair & 0xFFFF;
window_x = wxx.window_x_x_pair >> 16;
y_feature_map_config yfm = y_feature_map_config_list[feature_map_config_id];
feature_map_id = yfm.y_feature_map_id_pair & 0xFFFF;
output_y = yfm.y_feature_map_id_pair >> 16;
int input_x = output_x * subsampling_width + window_x;
int input_y = output_y * subsampling_height;
int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
res = input[current_input_elem_id];
max_pos_y = 0;
for(int j = 1; j < subsampling_height; ++j)
{
current_input_elem_id += input_width;
float new_val = input[current_input_elem_id];
if (new_val > res)
{
res = new_val;
max_pos_y = j;
}
}
vals[local_thread_id] = res;
max_pos_y_list[local_thread_id] = max_pos_y;
}
__syncthreads();
if (in_bounds && (window_x == 0))
{
int max_pos_x = 0;
for(int j = 1; j < subsampling_width; ++j)
{
local_thread_id++;
float new_val = vals[local_thread_id];
int new_max_pos_y = max_pos_y_list[local_thread_id];
if (new_val > res)
{
res = new_val;
max_pos_x = j;
max_pos_y = new_max_pos_y;
}
}
int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x;
output[offset] = res;
max_positions[offset].x_y_pair = (max_pos_x << 16) | max_pos_y;
}
}
__global__ void max_subsampling_2d_square_deriviative_hess_kernel(
float * __restrict input_errors,
const x_y_config * __restrict max_positions,
const float * __restrict output_errors,
const x_y_config * __restrict x_y_config_list,
int subsampling_width,
int subsampling_height,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count,
int x_y_config_count)
{
int x_y_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x_y_config_id < x_y_config_count) && (feature_map_id < feature_map_count);
if (in_bounds)
{
x_y_config xy = x_y_config_list[x_y_config_id];
int output_x = xy.x_y_pair >> 16;
int output_y = xy.x_y_pair & 0xFFFF;
int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x;
float output_error = output_errors[offset];
x_y_config max_pos_xy = max_positions[offset];
int max_pos_x = max_pos_xy.x_y_pair >> 16;
int max_pos_y = max_pos_xy.x_y_pair & 0xFFFF;
int input_x = output_x * subsampling_width + max_pos_x;
int input_y = output_y * subsampling_height + max_pos_y;
int input_offset = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
input_errors[input_offset] = output_error;
}
}
namespace nnforge
{
namespace cuda
{
max_subsampling_2d_layer_hessian_cuda::max_subsampling_2d_layer_hessian_cuda()
{
}
max_subsampling_2d_layer_hessian_cuda::~max_subsampling_2d_layer_hessian_cuda()
{
}
void max_subsampling_2d_layer_hessian_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
const float * input = *input_neurons_buffer;
float * output = *output_neurons_buffer;
x_y_config * max_positions = (x_y_config *)((void *)(*additional_buffers[0]));
int window_x_x_config_count = subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0];
const window_x_x_config * window_x_x_config_list = static_cast<const window_x_x_config *>((const void *)*additional_buffers[1]);
int y_feature_map_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count;
const y_feature_map_config * y_feature_map_config_list = static_cast<const y_feature_map_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
window_x_x_config_count,
y_feature_map_config_count,
entry_count,
subsampling_sizes[0]);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * (sizeof(float) + sizeof(int));
max_subsampling_2d_tex_hess_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
output,
max_positions,
input,
window_x_x_config_list,
y_feature_map_config_list,
subsampling_sizes[0],
subsampling_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count,
window_x_x_config_count,
y_feature_map_config_count);
}
void max_subsampling_2d_layer_hessian_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
const float * output_errors = *output_errors_buffer;
const x_y_config * max_positions = (const x_y_config *)((void *)(*additional_buffers[0]));
float * input_errors = *input_errors_buffer;
int x_y_config_count = output_configuration_specific.dimension_sizes[0] * output_configuration_specific.dimension_sizes[1];
const x_y_config * x_y_config_list = static_cast<const x_y_config *>((const void *)*additional_buffers[3]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
x_y_config_count,
output_configuration_specific.feature_map_count,
entry_count);
max_subsampling_2d_square_deriviative_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
input_errors,
max_positions,
output_errors,
x_y_config_list,
subsampling_sizes[0],
subsampling_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count,
x_y_config_count);
}
void max_subsampling_2d_layer_hessian_cuda::hessian_configured()
{
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
subsampling_sizes = layer_derived->subsampling_sizes;
}
bool max_subsampling_2d_layer_hessian_cuda::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> max_subsampling_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(x_y_config));
return res;
}
std::vector<size_t> max_subsampling_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(window_x_x_config) * subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]);
res.push_back(sizeof(y_feature_map_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count);
res.push_back(sizeof(x_y_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[0]);
return res;
}
void max_subsampling_2d_layer_hessian_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<window_x_x_config> task_list;
for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x)
for(int window_x = 0; window_x < subsampling_sizes[0]; ++window_x)
task_list.push_back(window_x_x_config(window_x, x));
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(window_x_x_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<y_feature_map_config> task_list;
for(int feature_map_id = 0; feature_map_id < output_configuration_specific.feature_map_count; ++feature_map_id)
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
task_list.push_back(y_feature_map_config(y, feature_map_id));
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(y_feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<x_y_config> task_list;
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x)
task_list.push_back(x_y_config(x, y));
cuda_safe_call(cudaMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(x_y_config) * task_list.size(), cudaMemcpyHostToDevice));
}
}
}
}
|
6b2045ac89a32457a6025b715bd29ca16f7952d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
/*
* Description:
*/
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements,
int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
void THNN_CudaSpatialUpSamplingNearest_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, int scale_factor)
{
THCudaTensor_zero(state, output);
THCUNN_assertSameGPU(state, 2, input, output);
input = THCudaTensor_newContiguous(state, input);
// This is for allocating output Tensor
long no_elements = 1;
for(int i = 0; i < input->nDimension; i++){
no_elements *= input->size[i];
}
no_elements *= scale_factor * scale_factor;
int d1;
int d2;
int d3;
if (input->nDimension == 3) {
d1 = output->size[0];
d2 = output->size[1];
d3 = output->size[2];
} else {
d1 = output->size[1];
d2 = output->size[2];
d3 = output->size[3];
}
float *input_data = THCudaTensor_data(state, input);
float *output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads));
if (n_yblocks > 65535) {
THError("Input size is too large! aborting");
}
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
// kernel:
hipLaunchKernelGGL(( upscale), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, no_elements, scale_factor, d1, d2, d3);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
// final cut:
THCudaTensor_free(state, input);
}
/*
* Description:
*/
__global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements,
int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
void THNN_CudaSpatialUpSamplingNearest_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, int scale_factor)
{
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THCudaTensor_zero(state, gradInput);
float *gradInput_data = THCudaTensor_data(state, gradInput);
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
long no_elements = 1;
for(int i = 0; i < gradInput->nDimension; i++){
no_elements *= gradInput->size[i];
}
int d1;
int d2;
int d3;
if (gradInput->nDimension == 3) {
d1 = gradInput->size[0];
d2 = gradInput->size[1];
d3 = gradInput->size[2];
} else {
d1 = gradInput->size[1];
d2 = gradInput->size[2];
d3 = gradInput->size[3];
}
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads));
if (n_yblocks > 65535) {
THError("Input size is too large! aborting");
}
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
// kernel:
hipLaunchKernelGGL(( downscale), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, no_elements,
scale_factor, d1, d2, d3);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
| 6b2045ac89a32457a6025b715bd29ca16f7952d7.cu | #include "THCUNN.h"
#include "common.h"
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
/*
* Description:
*/
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements,
int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
void THNN_CudaSpatialUpSamplingNearest_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, int scale_factor)
{
THCudaTensor_zero(state, output);
THCUNN_assertSameGPU(state, 2, input, output);
input = THCudaTensor_newContiguous(state, input);
// This is for allocating output Tensor
long no_elements = 1;
for(int i = 0; i < input->nDimension; i++){
no_elements *= input->size[i];
}
no_elements *= scale_factor * scale_factor;
int d1;
int d2;
int d3;
if (input->nDimension == 3) {
d1 = output->size[0];
d2 = output->size[1];
d3 = output->size[2];
} else {
d1 = output->size[1];
d2 = output->size[2];
d3 = output->size[3];
}
float *input_data = THCudaTensor_data(state, input);
float *output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads));
if (n_yblocks > 65535) {
THError("Input size is too large! aborting");
}
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
// kernel:
upscale<<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, no_elements, scale_factor, d1, d2, d3);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
// final cut:
THCudaTensor_free(state, input);
}
/*
* Description:
*/
__global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements,
int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
void THNN_CudaSpatialUpSamplingNearest_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, int scale_factor)
{
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THCudaTensor_zero(state, gradInput);
float *gradInput_data = THCudaTensor_data(state, gradInput);
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
long no_elements = 1;
for(int i = 0; i < gradInput->nDimension; i++){
no_elements *= gradInput->size[i];
}
int d1;
int d2;
int d3;
if (gradInput->nDimension == 3) {
d1 = gradInput->size[0];
d2 = gradInput->size[1];
d3 = gradInput->size[2];
} else {
d1 = gradInput->size[1];
d2 = gradInput->size[2];
d3 = gradInput->size[3];
}
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads));
if (n_yblocks > 65535) {
THError("Input size is too large! aborting");
}
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
// kernel:
downscale<<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, no_elements,
scale_factor, d1, d2, d3);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
|
ada216fc7454bfc712106b49281d0036856fa681.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 4, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| ada216fc7454bfc712106b49281d0036856fa681.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 4, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
1ace17a9b2e32faf8cebaebc8a4a3b121ee57228.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_tanh_layer.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNTanHLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
CUDNN_CHECK(cudnnActivationForward(Caffe::cudnn_handle(),
activ_desc_,
cudnn::dataType<Ftype>::one,
fwd_bottom_desc_, bottom_data,
cudnn::dataType<Ftype>::zero,
fwd_top_desc_, top_data));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template <typename Ftype, typename Btype>
void CuDNNTanHLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Btype* top_data = top[0]->gpu_data<Btype>();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
CUDNN_CHECK(cudnnActivationBackward(Caffe::cudnn_handle(),
activ_desc_,
cudnn::dataType<Btype>::one,
bwd_top_desc_, top_data, bwd_top_desc_, top_diff,
bwd_bottom_desc_, bottom_data,
cudnn::dataType<Btype>::zero,
bwd_bottom_desc_, bottom_diff));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNTanHLayer);
} // namespace caffe
#endif
| 1ace17a9b2e32faf8cebaebc8a4a3b121ee57228.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_tanh_layer.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNTanHLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
CUDNN_CHECK(cudnnActivationForward(Caffe::cudnn_handle(),
activ_desc_,
cudnn::dataType<Ftype>::one,
fwd_bottom_desc_, bottom_data,
cudnn::dataType<Ftype>::zero,
fwd_top_desc_, top_data));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template <typename Ftype, typename Btype>
void CuDNNTanHLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Btype* top_data = top[0]->gpu_data<Btype>();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
CUDNN_CHECK(cudnnActivationBackward(Caffe::cudnn_handle(),
activ_desc_,
cudnn::dataType<Btype>::one,
bwd_top_desc_, top_data, bwd_top_desc_, top_diff,
bwd_bottom_desc_, bottom_data,
cudnn::dataType<Btype>::zero,
bwd_bottom_desc_, bottom_diff));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNTanHLayer);
} // namespace caffe
#endif
|
39c22777363ff5333b1c3b1015e254e003bf684c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THH.h>
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <THH/THHBlas.h>
#include <THH/THHTensorCopy.h>
#include <TH/THHalf.h>
#include <THH/THHApply.cuh>
#include <THH/THHReduce.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHAtomics.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHTensor.hpp>
#include <THH/THHStorage.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
#include <c10/macros/Macros.h>
#include <ATen/WrapDimUtils.h>
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
CUDA_KERNEL_ASSERT(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info);
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
CUDA_KERNEL_ASSERT(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include <THH/generic/THHTensorIndex.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensorIndex.hip>
#include <THH/THHGenerateBoolType.h>
| 39c22777363ff5333b1c3b1015e254e003bf684c.cu | #include <THC/THC.h>
#include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <THC/THCBlas.h>
#include <THC/THCTensorCopy.h>
#include <TH/THHalf.h>
#include <THC/THCApply.cuh>
#include <THC/THCReduce.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCAtomics.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
#include <c10/macros/Macros.h>
#include <ATen/WrapDimUtils.h>
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
CUDA_KERNEL_ASSERT(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info);
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
CUDA_KERNEL_ASSERT(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include <THC/generic/THCTensorIndex.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensorIndex.cu>
#include <THC/THCGenerateBoolType.h>
|
230af5db932399e4d597f47142314026696af918.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += (var_4 - (-1.6958E-35f + var_5 + var_6 * +1.3840E36f));
float tmp_1 = coshf((-1.1638E21f * (-1.3328E-36f - +1.3574E-25f * +1.5763E35f * var_7 + -1.6336E0f)));
float tmp_2 = -0.0f;
comp += tmp_2 + tmp_1 + +1.2270E-41f + +1.1817E34f;
if (comp == (-0.0f * var_8 - cosf(log10f(+0.0f / (var_9 / var_10 / +1.1333E36f / var_11 - var_12))))) {
comp += expf((var_13 / (var_14 + +0.0f + var_15 * -1.2341E-42f)));
float tmp_3 = var_16 * (var_17 + -1.4451E-37f * (var_18 / coshf((var_19 * var_20 - +1.9706E5f / var_21))));
comp = tmp_3 * tanhf(var_22 * -1.5717E6f);
}
for (int i=0; i < var_3; ++i) {
comp += (var_23 + -1.5369E-42f * (var_24 - var_25 / var_26));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
hipDeviceSynchronize();
return 0;
}
| 230af5db932399e4d597f47142314026696af918.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += (var_4 - (-1.6958E-35f + var_5 + var_6 * +1.3840E36f));
float tmp_1 = coshf((-1.1638E21f * (-1.3328E-36f - +1.3574E-25f * +1.5763E35f * var_7 + -1.6336E0f)));
float tmp_2 = -0.0f;
comp += tmp_2 + tmp_1 + +1.2270E-41f + +1.1817E34f;
if (comp == (-0.0f * var_8 - cosf(log10f(+0.0f / (var_9 / var_10 / +1.1333E36f / var_11 - var_12))))) {
comp += expf((var_13 / (var_14 + +0.0f + var_15 * -1.2341E-42f)));
float tmp_3 = var_16 * (var_17 + -1.4451E-37f * (var_18 / coshf((var_19 * var_20 - +1.9706E5f / var_21))));
comp = tmp_3 * tanhf(var_22 * -1.5717E6f);
}
for (int i=0; i < var_3; ++i) {
comp += (var_23 + -1.5369E-42f * (var_24 - var_25 / var_26));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
6887358ad767001c24c8fb42b4675380af06ddea.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "common.h"
#include <pthread.h>
#include <cstdio>
#include <getopt.h>
#include <libgen.h>
#include "hip/hip_runtime.h"
int test_ncclVersion = 0; // init'd with ncclGetVersion()
#if NCCL_MAJOR >= 2
ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble,
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
ncclBfloat16
#endif
};
const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double",
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
"bfloat16"
#endif
};
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
int test_typenum = 10;
#else
int test_typenum = 9;
#endif
#else
ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64};
const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"};
int test_typenum = 7;
#endif
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin, ncclAvg};
const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min", "avg"};
int test_opnum = 5;
#else
ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin};
const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"};
int test_opnum = 4;
#endif
thread_local int is_main_thread = 0;
// Command line parameter defaults
static int nThreads = 1;
static int nGpus = 1;
static size_t minBytes = 32*1024*1024;
static size_t maxBytes = 32*1024*1024;
static size_t stepBytes = 1*1024*1024;
static size_t stepFactor = 1;
static int datacheck = 1;
static int warmup_iters = 5;
static int iters = 20;
static int agg_iters = 1;
static int ncclop = ncclSum;
static int nccltype = ncclFloat;
static int ncclroot = 0;
static int parallel_init = 0;
static int blocking_coll = 0;
static int cudaGraphLaunches = 0;
// Report average iteration time: (0=RANK0,1=AVG,2=MIN,3=MAX)
static int average = 1;
#define NUM_BLOCKS 32
static double parsesize(const char *value) {
long long int units;
double size;
char size_lit;
int count = sscanf(value, "%lf %1s", &size, &size_lit);
switch (count) {
case 2:
switch (size_lit) {
case 'G':
case 'g':
units = 1024*1024*1024;
break;
case 'M':
case 'm':
units = 1024*1024;
break;
case 'K':
case 'k':
units = 1024;
break;
default:
return -1.0;
};
break;
case 1:
units = 1;
break;
default:
return -1.0;
}
return size * units;
}
double DeltaMaxValue(ncclDataType_t type) {
switch(type) {
case ncclHalf: return 1e-2;
#if defined(__CUDA_BF16_TYPES_EXIST__)
case ncclBfloat16: return 1e-2;
#endif
case ncclFloat: return 1e-5;
case ncclDouble: return 1e-12;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint8:
//case ncclInt32:
case ncclUint32:
#endif
case ncclInt64:
case ncclUint64: return 1e-200;
}
return 1e-200;
}
template<typename T> __device__
double absDiff(T a, T b) {
return fabs((double)(b - a));
}
template<> __device__
double absDiff<half>(half a, half b) {
float x = __half2float(a);
float y = __half2float(b);
return fabs((double)(y-x));
}
template<typename T> __device__
float toFloat(T a) {
return (float)a;
}
template<> __device__
float toFloat(half a) {
return __half2float(a);
}
#if defined(__CUDA_BF16_TYPES_EXIST__)
template<> __device__
float toFloat(__nv_bfloat16 a) {
return __bfloat162float(a);
}
#endif
template<typename T, int BSIZE> __global__
void deltaKern(void* A_, void* B_, size_t count, double* max) {
const T* A = (const T*)A_;
const T* B = (const T*)B_;
__shared__ double temp[BSIZE];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
double locmax = 0.0;
for(size_t i=tid; i<count; i+=blockDim.x*gridDim.x) {
double delta = absDiff(A[i], B[i]);
if( delta > locmax ) {
locmax = delta;
#ifdef DEBUG_PRINT
if (delta > .1) printf("Error at %ld/%ld(%p) : %f != %f\n", i, count, B+i, toFloat(A[i]), toFloat(B[i]));
#endif
}
}
tid = threadIdx.x;
temp[tid] = locmax;
for(int stride = BSIZE/2; stride > 1; stride>>=1) {
__syncthreads();
if( tid < stride )
temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride];
}
__syncthreads();
if( threadIdx.x == 0)
max[blockIdx.x] = temp[0] > temp[1] ? temp[0] : temp[1];
}
testResult_t CheckDelta(void* results, void* expected, size_t count, ncclDataType_t type, double* devmax) {
switch (type) {
#if defined(__CUDA_BF16_TYPES_EXIST__)
case ncclBfloat16:
hipLaunchKernelGGL(( deltaKern<__nv_bfloat16, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
#endif
case ncclHalf:
hipLaunchKernelGGL(( deltaKern<half, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclFloat:
hipLaunchKernelGGL(( deltaKern<float, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclDouble:
hipLaunchKernelGGL(( deltaKern<double, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclChar:
#if NCCL_MAJOR >= 2
case ncclUint8:
#endif
hipLaunchKernelGGL(( deltaKern<uint8_t, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint32:
#endif
hipLaunchKernelGGL(( deltaKern<uint32_t, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
case ncclInt64:
case ncclUint64:
hipLaunchKernelGGL(( deltaKern<uint64_t, 512>), dim3(NUM_BLOCKS), dim3(512), 0, 0, results, expected, count, devmax); break;
}
CUDACHECK(hipDeviceSynchronize());
for (int i=1; i<NUM_BLOCKS; i++) devmax[0] = ::max(devmax[0], devmax[i]);
return testSuccess;
}
// For integer values, we use values between 0 and 255
template<typename T>
__device__ T testValue(const size_t offset, const int rep, const int rank) {
uint8_t v = (rep+rank+offset) % 256;
return (T)v;
}
// For floating point datatype, we use values between 0 and 1 otherwise the
// Product operation will produce NaNs.
template<>
__device__ double testValue<double>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(double)testValue<int>(offset, rep, rank));
}
template<>
__device__ float testValue<float>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(float)testValue<int>(offset, rep, rank));
}
template<>
__device__ half testValue<half>(const size_t offset, const int rep, const int rank) {
return __float2half(testValue<float>(offset, rep, rank));
}
#if defined(__CUDA_BF16_TYPES_EXIST__)
template<>
__device__ __nv_bfloat16 testValue<__nv_bfloat16>(const size_t offset, const int rep, const int rank) {
return __float2bfloat16(testValue<float>(offset, rep, rank));
}
#endif
// Operations
template<typename T>
__device__ T ncclOpSum(T a, T b) { return a+b; }
template<typename T>
__device__ T ncclOpProd(T a, T b) { return a*b; }
template<typename T>
__device__ T ncclOpMax(T a, T b) { return a>b ? a : b; }
template<typename T>
__device__ T ncclOpMin(T a, T b) { return a<b ? a : b; }
// Definitions for half
template<>
__device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); }
template<>
__device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); }
template<>
__device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; }
template<>
__device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; }
template<typename T>
__device__ T ncclPostOpIdent(T x, int n) { return x; }
template<typename T>
__device__ T ncclPostOpDiv(T x, int n) { return x/n; }
template<>
__device__ half ncclPostOpDiv<half>(half x, int n) { return __float2half(__half2float(x)/n); }
#if defined(__CUDA_BF16_TYPES_EXIST__)
template<>
__device__ __nv_bfloat16 ncclPostOpDiv<__nv_bfloat16>(__nv_bfloat16 x, int n) { return __float2bfloat16(__bfloat162float(x)/n); }
#endif
template<typename T, T (*Op)(T, T), T(*PostOp)(T,int)>
__global__ void InitDataReduceKernel(T* data, const size_t N, const size_t offset, const int rep, const int nranks) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) {
T val = testValue<T>(o+offset, rep, 0);
for (int i=1; i<nranks; i++) {
val = Op(val, testValue<T>(o+offset, rep, i));
}
data[o] = PostOp(val, nranks);
}
}
#define KERN(type, op, postop) (void*)InitDataReduceKernel<type, op<type>, postop<type> >
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
#define OPS(type) \
KERN(type, ncclOpSum, ncclPostOpIdent), \
KERN(type, ncclOpProd, ncclPostOpIdent), \
KERN(type, ncclOpMax, ncclPostOpIdent), \
KERN(type, ncclOpMin, ncclPostOpIdent), \
KERN(type, ncclOpSum/*Avg*/, ncclPostOpDiv)
#else
#define OPS(type) \
KERN(type, ncclOpSum, ncclPostOpIdent), \
KERN(type, ncclOpProd, ncclPostOpIdent), \
KERN(type, ncclOpMax, ncclPostOpIdent), \
KERN(type, ncclOpMin, ncclPostOpIdent)
#endif
static void* const redInitDataKerns[ncclNumOps*ncclNumTypes] = {
OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double),
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
OPS(__nv_bfloat16)
#endif
};
testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[5] = { (void*)&data, (void*)&count, (void*)&offset, (void*)&rep, (void*)&nranks };
CUDACHECK(cudaLaunchKernel(redInitDataKerns[type*ncclNumOps+op], grid, block, args, 0, hipStreamDefault));
return testSuccess;
}
template<typename T>
__global__ void InitDataKernel(T* data, const size_t N, const int rep, const int rank) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x)
data[o] = testValue<T>(o, rep, rank);
}
static void* const initDataKerns[ncclNumTypes] = {
(void*)InitDataKernel< int8_t>,
(void*)InitDataKernel< uint8_t>,
(void*)InitDataKernel< int32_t>,
(void*)InitDataKernel<uint32_t>,
(void*)InitDataKernel< int64_t>,
(void*)InitDataKernel<uint64_t>,
(void*)InitDataKernel< half>,
(void*)InitDataKernel< float>,
(void*)InitDataKernel< double>,
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
(void*)InitDataKernel<__nv_bfloat16>,
#endif
};
template<typename T>
testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) {
T* ptr = (T*)dest;
hipLaunchKernelGGL(( InitDataKernel), dim3(16), dim3(512), 0, 0, ptr, N, rep, rank);
return testSuccess;
}
testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[4] = { (void*)&data, (void*)&count, (void*)&rep, (void*)&rank };
CUDACHECK(cudaLaunchKernel(initDataKerns[type], grid, block, args, 0, hipStreamDefault));
return testSuccess;
}
void Barrier(struct threadArgs* args) {
while (args->barrier[args->barrier_idx] != args->thread) pthread_yield();
args->barrier[args->barrier_idx] = args->thread + 1;
if (args->thread+1 == args->nThreads) {
#ifdef MPI_SUPPORT
MPI_Barrier(MPI_COMM_WORLD);
#endif
args->barrier[args->barrier_idx] = 0;
} else {
while (args->barrier[args->barrier_idx]) pthread_yield();
}
args->barrier_idx=!args->barrier_idx;
}
// Inter-thread/process barrier+allreduce
void Allreduce(struct threadArgs* args, double* value, int average) {
while (args->barrier[args->barrier_idx] != args->thread) pthread_yield();
double val = *value;
if (args->thread > 0) {
double val2 = args->reduce[args->barrier_idx];
if (average == 1) val += val2;
if (average == 2) val = ::min(val, val2);
if (average == 3) val = ::max(val, val2);
}
if (average || args->thread == 0) args->reduce[args->barrier_idx] = val;
args->barrier[args->barrier_idx] = args->thread + 1;
if (args->thread+1 == args->nThreads) {
#ifdef MPI_SUPPORT
if (average != 0) {
MPI_Op op = average == 1 ? MPI_SUM : average == 2 ? MPI_MIN : MPI_MAX;
MPI_Allreduce(MPI_IN_PLACE, (void*)&args->reduce[args->barrier_idx], 1, MPI_DOUBLE, op, MPI_COMM_WORLD);
}
#endif
if (average == 1) args->reduce[args->barrier_idx] /= args->nProcs*args->nThreads;
args->reduce[1-args->barrier_idx] = 0;
args->barrier[args->barrier_idx] = 0;
} else {
while (args->barrier[args->barrier_idx]) pthread_yield();
}
*value = args->reduce[args->barrier_idx];
args->barrier_idx=!args->barrier_idx;
}
testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta) {
size_t count = args->expectedBytes/wordSize(type);
double maxDelta = 0.0;
for (int i=0; i<args->nGpus; i++) {
int device;
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
NCCLCHECK(ncclCommCuDevice(args->comms[i], &device));
CUDACHECK(hipSetDevice(device));
void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i];
TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->deltaHost));
maxDelta = ::max(*(args->deltaHost), maxDelta);
#ifdef DEBUG_PRINT
if (rank == 0) {
int *expectedHost = (int *)malloc(args->expectedBytes);
int *dataHost = (int *)malloc(args->expectedBytes);
hipMemcpy(expectedHost, args->expected[0], args->expectedBytes, hipMemcpyDeviceToHost);
printf("\n Expected: ");
for(int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, expectedHost[j]);
}
printf("\n");
hipMemcpy(dataHost, data, args->expectedBytes, hipMemcpyDeviceToHost);
printf("\n Actual: ");
for (int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, dataHost[j]);
}
printf("\n");
free(expectedHost);
free(dataHost);
}
#endif
}
double nranks = args->nProcs*args->nThreads*args->nGpus;
if (args->reportErrors && maxDelta > DeltaMaxValue(type)*(nranks - 1)) args->errors[0]++;
*delta = maxDelta;
return testSuccess;
}
testResult_t testStreamSynchronize(int ngpus, hipStream_t* streams, ncclComm_t* comms) {
hipError_t cudaErr;
int remaining = ngpus;
int* done = (int*)malloc(sizeof(int)*ngpus);
memset(done, 0, sizeof(int)*ngpus);
while (remaining) {
int idle = 1;
for (int i=0; i<ngpus; i++) {
if (done[i]) continue;
cudaErr = hipStreamQuery(streams[i]);
if (cudaErr == hipSuccess) {
done[i] = 1;
remaining--;
idle = 0;
continue;
}
if (cudaErr != hipErrorNotReady) CUDACHECK(cudaErr);
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0)
if (test_ncclVersion >= NCCL_VERSION(2,4,0) && comms) {
ncclResult_t ncclAsyncErr;
NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr));
if (ncclAsyncErr != ncclSuccess) {
// An asynchronous error happened. Stop the operation and destroy
// the communicator
for (int i=0; i<ngpus; i++)
NCCLCHECK(ncclCommAbort(comms[i]));
// Abort the perf test
NCCLCHECK(ncclAsyncErr);
}
}
#endif
}
// We might want to let other threads (including NCCL threads) use the CPU.
if (idle) pthread_yield();
}
free(done);
return testSuccess;
}
testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) {
size_t count = args->nbytes / wordSize(type);
// Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange
size_t totalnbytes = max(args->sendBytes, args->expectedBytes);
size_t steps = totalnbytes ? args->maxbytes / totalnbytes : 1;
size_t shift = totalnbytes * (iter % steps);
if (args->nGpus > 1) NCCLCHECK(ncclGroupStart());
for (int i = 0; i < args->nGpus; i++) {
#ifndef NCCL_MAJOR
int cudaDev;
NCCLCHECK(ncclCommCuDevice(args->comms[i], &cudaDev));
CUDACHECK(hipSetDevice(cudaDev));
#endif
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
char* recvBuff = ((char*)args->recvbuffs[i]) + shift;
char* sendBuff = ((char*)args->sendbuffs[i]) + shift;
TESTCHECK(args->collTest->runColl(
(void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff),
(void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff),
count, type, op, root, args->comms[i], args->streams[i]));
}
if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd());
if (blocking_coll) {
// Complete op before returning
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
}
if (blocking_coll) Barrier(args);
return testSuccess;
}
testResult_t completeColl(struct threadArgs* args) {
if (blocking_coll) return testSuccess;
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
return testSuccess;
}
testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) {
size_t count = args->nbytes / wordSize(type);
if (datacheck) {
// Initialize sendbuffs, recvbuffs and expected
TESTCHECK(args->collTest->initData(args, type, op, root, 99, in_place));
}
// Sync
TESTCHECK(startColl(args, type, op, root, in_place, 0));
TESTCHECK(completeColl(args));
Barrier(args);
#if CUDART_VERSION >= 11030
hipGraph_t graphs[args->nGpus];
hipGraphExec_t graphExec[args->nGpus];
if (cudaGraphLaunches >= 1) {
// Begin cuda graph capture
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipStreamBeginCapture(args->streams[i], args->nThreads > 1 ? cudaStreamCaptureModeThreadLocal : hipStreamCaptureModeGlobal));
}
}
#endif
// Performance Benchmark
auto start = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < iters; iter++) {
if (agg_iters>1) NCCLCHECK(ncclGroupStart());
for (int aiter = 0; aiter < agg_iters; aiter++) {
TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter));
}
if (agg_iters>1) NCCLCHECK(ncclGroupEnd());
}
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
// End cuda graph capture
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipStreamEndCapture(args->streams[i], graphs+i));
}
// Instantiate cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipGraphInstantiate(graphExec+i, graphs[i], NULL, NULL, 0));
}
// Resync CPU, restart timing, launch cuda graph
Barrier(args);
start = std::chrono::high_resolution_clock::now();
for (int l=0; l<cudaGraphLaunches; l++) {
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipGraphLaunch(graphExec[i], args->streams[i]));
}
}
}
#endif
TESTCHECK(completeColl(args));
auto delta = std::chrono::high_resolution_clock::now() - start;
double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count();
deltaSec = deltaSec/(iters*agg_iters);
if (cudaGraphLaunches >= 1) deltaSec = deltaSec/cudaGraphLaunches;
Allreduce(args, &deltaSec, average);
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
//destroy cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipGraphExecDestroy(graphExec[i]));
CUDACHECK(hipGraphDestroy(graphs[i]));
}
}
#endif
double algBw, busBw;
args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus);
Barrier(args);
double maxDelta = 0;
static __thread int rep = 0;
rep++;
if (datacheck) {
// Initialize sendbuffs, recvbuffs and expected
TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place));
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
// Begin cuda graph capture for data check
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipStreamBeginCapture(args->streams[i], cudaStreamCaptureModeThreadLocal));
}
}
#endif
//test validation in single itertion, should ideally be included into the multi-iteration run
TESTCHECK(startColl(args, type, op, root, in_place, 0));
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
// End cuda graph capture
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipStreamEndCapture(args->streams[i], graphs+i));
}
// Instantiate cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipGraphInstantiate(graphExec+i, graphs[i], NULL, NULL, 0));
}
// Launch cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipGraphLaunch(graphExec[i], args->streams[i]));
}
}
#endif
TESTCHECK(completeColl(args));
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
//destroy cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipGraphExecDestroy(graphExec[i]));
CUDACHECK(hipGraphDestroy(graphs[i]));
}
}
#endif
TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta));
//aggregate delta from all threads and procs
Allreduce(args, &maxDelta, 3);
}
double timeUsec = deltaSec*1.0E6;
char timeStr[100];
if (timeUsec >= 10000.0) {
sprintf(timeStr, "%7.0f", timeUsec);
} else if (timeUsec >= 100.0) {
sprintf(timeStr, "%7.1f", timeUsec);
} else {
sprintf(timeStr, "%7.2f", timeUsec);
}
if (datacheck) {
PRINT(" %7s %6.2f %6.2f %5.0le", timeStr, algBw, busBw, maxDelta);
} else {
PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A");
}
args->bw[0] += busBw;
args->bw_count[0]++;
return testSuccess;
}
void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) {
int nranks = args->nProcs*args->nGpus*args->nThreads;
size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset;
count = size / wordSize(type);
args->collTest->getCollByteCount(&sendCount, &recvCount, ¶mCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks);
args->nbytes = paramCount * wordSize(type);
args->sendBytes = sendCount * wordSize(type);
args->expectedBytes = recvCount * wordSize(type);
args->sendInplaceOffset = sendInplaceOffset * wordSize(type);
args->recvInplaceOffset = recvInplaceOffset * wordSize(type);
}
testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) {
// Warm-up for large size
setupArgs(args->maxbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Warm-up for small size
setupArgs(args->minbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Benchmark
for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) {
setupArgs(size, type, args);
print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root);
TESTCHECK(BenchTime(args, type, op, root, 0));
TESTCHECK(BenchTime(args, type, op, root, 1));
PRINT("\n");
}
return testSuccess;
}
testResult_t threadRunTests(struct threadArgs* args) {
// Set device to the first of our GPUs. If we don't do that, some operations
// will be done on the current GPU (by default : 0) and if the GPUs are in
// exclusive mode those operations will fail.
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus;
CUDACHECK(hipSetDevice(gpuid));
TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop]));
return testSuccess;
}
testResult_t threadInit(struct threadArgs* args) {
char hostname[1024];
getHostName(hostname, 1024);
int nranks = args->nProcs*args->nThreads*args->nGpus;
//set main thread again
is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0;
NCCLCHECK(ncclGroupStart());
for (int i=0; i<args->nGpus; i++) {
int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank));
}
NCCLCHECK(ncclGroupEnd());
TESTCHECK(threadRunTests(args));
for (int i=0; i<args->nGpus; i++) {
NCCLCHECK(ncclCommDestroy(args->comms[i]));
}
return testSuccess;
}
void* threadLauncher(void* thread_) {
struct testThread* thread = (struct testThread*)thread_;
thread->ret = thread->func(&thread->args);
return NULL;
}
testResult_t threadLaunch(struct testThread* thread) {
pthread_create(&thread->thread, NULL, threadLauncher, thread);
return testSuccess;
}
testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) {
CUDACHECK(hipMalloc(sendbuff, nbytes));
CUDACHECK(hipMalloc(recvbuff, nbytes));
if (datacheck) CUDACHECK(hipMalloc(expected, recvBytes));
return testSuccess;
}
testResult_t run(); // Main function
int main(int argc, char* argv[]) {
// Make sure everyline is flushed so that we see the progress of the test
setlinebuf(stdout);
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0)
ncclGetVersion(&test_ncclVersion);
#else
test_ncclVersion = NCCL_VERSION_CODE;
#endif
//printf("# NCCL_VERSION_CODE=%d ncclGetVersion=%d\n", NCCL_VERSION_CODE, test_ncclVersion);
if (NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0) && test_ncclVersion < NCCL_VERSION(2,10,0)) {
test_opnum -= 1; // exclude ncclAvg
test_typenum -= 1; // exclude bfloat16
}
// Parse args
double parsed;
int longindex;
static struct option longopts[] = {
{"nthreads", required_argument, 0, 't'},
{"ngpus", required_argument, 0, 'g'},
{"minbytes", required_argument, 0, 'b'},
{"maxbytes", required_argument, 0, 'e'},
{"stepbytes", required_argument, 0, 'i'},
{"stepfactor", required_argument, 0, 'f'},
{"iters", required_argument, 0, 'n'},
{"agg_iters", required_argument, 0, 'm'},
{"warmup_iters", required_argument, 0, 'w'},
{"parallel_init", required_argument, 0, 'p'},
{"check", required_argument, 0, 'c'},
{"op", required_argument, 0, 'o'},
{"datatype", required_argument, 0, 'd'},
{"root", required_argument, 0, 'r'},
{"blocking", required_argument, 0, 'z'},
{"cudagraph", required_argument, 0, 'G'},
{"average", required_argument, 0, 'a'},
{"help", no_argument, 0, 'h'}
};
while(1) {
int c;
c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:hG:a:", longopts, &longindex);
if (c == -1)
break;
switch(c) {
case 't':
nThreads = strtol(optarg, NULL, 0);
break;
case 'g':
nGpus = strtol(optarg, NULL, 0);
break;
case 'b':
parsed = parsesize(optarg);
if (parsed < 0) {
fprintf(stderr, "invalid size specified for 'minbytes'\n");
return -1;
}
minBytes = (size_t)parsed;
break;
case 'e':
parsed = parsesize(optarg);
if (parsed < 0) {
fprintf(stderr, "invalid size specified for 'maxbytes'\n");
return -1;
}
maxBytes = (size_t)parsed;
break;
case 'i':
stepBytes = strtol(optarg, NULL, 0);
break;
case 'f':
stepFactor = strtol(optarg, NULL, 0);
break;
case 'n':
iters = (int)strtol(optarg, NULL, 0);
break;
case 'm':
#if NCCL_MAJOR > 2 || (NCCL_MAJOR >= 2 && NCCL_MINOR >= 2)
agg_iters = (int)strtol(optarg, NULL, 0);
#else
fprintf(stderr, "Option -m not supported before NCCL 2.2. Ignoring\n");
#endif
break;
case 'w':
warmup_iters = (int)strtol(optarg, NULL, 0);
break;
case 'c':
datacheck = (int)strtol(optarg, NULL, 0);
break;
case 'p':
parallel_init = (int)strtol(optarg, NULL, 0);
break;
case 'o':
ncclop = ncclstringtoop(optarg);
break;
case 'd':
nccltype = ncclstringtotype(optarg);
break;
case 'r':
ncclroot = strtol(optarg, NULL, 0);
break;
case 'z':
blocking_coll = strtol(optarg, NULL, 0);
break;
case 'G':
#if (NCCL_MAJOR > 2 || (NCCL_MAJOR >= 2 && NCCL_MINOR >= 9)) && CUDART_VERSION >= 11030
cudaGraphLaunches = strtol(optarg, NULL, 0);
#else
printf("Option -G (CUDA graph) not supported before NCCL 2.9 + CUDA 11.3. Ignoring\n");
#endif
break;
case 'a':
average = (int)strtol(optarg, NULL, 0);
break;
case 'h':
default:
if (c != 'h') printf("invalid option '%c'\n", c);
printf("USAGE: %s \n\t"
"[-t,--nthreads <num threads>] \n\t"
"[-g,--ngpus <gpus per thread>] \n\t"
"[-b,--minbytes <min size in bytes>] \n\t"
"[-e,--maxbytes <max size in bytes>] \n\t"
"[-i,--stepbytes <increment size>] \n\t"
"[-f,--stepfactor <increment factor>] \n\t"
"[-n,--iters <iteration count>] \n\t"
"[-m,--agg_iters <aggregated iteration count>] \n\t"
"[-w,--warmup_iters <warmup iteration count>] \n\t"
"[-p,--parallel_init <0/1>] \n\t"
"[-c,--check <0/1>] \n\t"
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
"[-o,--op <sum/prod/min/max/avg/all>] \n\t"
#else
"[-o,--op <sum/prod/min/max/all>] \n\t"
#endif
"[-d,--datatype <nccltype/all>] \n\t"
"[-r,--root <root>] \n\t"
"[-z,--blocking <0/1>] \n\t"
"[-G,--cudagraph <num graph launches>] \n\t"
"[-a,--average <0/1/2/3> report average iteration time <0=RANK0/1=AVG/2=MIN/3=MAX>] \n\t"
"[-h,--help]\n",
basename(argv[0]));
return 0;
}
}
if (minBytes > maxBytes) {
fprintf(stderr, "invalid sizes for 'minbytes' and 'maxbytes': %llu > %llu\n",
(unsigned long long)minBytes,
(unsigned long long)maxBytes);
return -1;
}
#ifdef MPI_SUPPORT
MPI_Init(&argc, &argv);
#endif
TESTCHECK(run());
return 0;
}
testResult_t run() {
int nProcs = 1, proc = 0;
int localRank = 0;
char hostname[1024];
getHostName(hostname, 1024);
#ifdef MPI_SUPPORT
MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &proc);
uint64_t hostHashs[nProcs];
hostHashs[proc] = getHostHash(hostname);
MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD);
for (int p=0; p<nProcs; p++) {
if (p == proc) break;
if (hostHashs[p] == hostHashs[proc]) localRank++;
}
#endif
is_main_thread = (proc == 0) ? 1 : 0;
PRINT("# nThread %d nGpus %d minBytes %ld maxBytes %ld step: %ld(%s) warmup iters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes,
(stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck);
if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n");
if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n");
PRINT("#\n");
PRINT("# Using devices\n");
#define MAX_LINE 2048
char line[MAX_LINE];
int len = 0;
size_t maxMem = ~0;
for (int i=0; i<nThreads*nGpus; i++) {
int cudaDev = localRank*nThreads*nGpus+i;
int rank = proc*nThreads*nGpus+i;
hipDeviceProp_t prop;
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n",
rank, getpid(), hostname, cudaDev, prop.pciBusID, prop.name);
maxMem = ::min(maxMem, prop.totalGlobalMem);
}
#if MPI_SUPPORT
char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL;
// Gather all output in rank order to root (0)
MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD);
if (proc == 0) {
for (int p = 0; p < nProcs; p++)
PRINT("%s", lines+MAX_LINE*p);
free(lines);
}
MPI_Allreduce(MPI_IN_PLACE, &maxMem, 1, MPI_LONG, MPI_MIN, MPI_COMM_WORLD);
#else
PRINT("%s", line);
#endif
// We need sendbuff, recvbuff, expected (when datacheck enabled), plus 1G for the rest.
size_t memMaxBytes = (maxMem - (1<<30)) / (datacheck ? 3 : 2);
if (maxBytes > memMaxBytes) {
maxBytes = memMaxBytes;
if (proc == 0) printf("#\n# Reducing maxBytes to %ld due to memory limitation\n", maxBytes);
}
ncclUniqueId ncclId;
if (proc == 0) {
NCCLCHECK(ncclGetUniqueId(&ncclId));
}
#ifdef MPI_SUPPORT
MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD);
#endif
hipStream_t streams[nGpus*nThreads];
void* sendbuffs[nGpus*nThreads];
void* recvbuffs[nGpus*nThreads];
void* expected[nGpus*nThreads];
size_t sendBytes, recvBytes;
ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads);
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(hipSetDevice(localRank*nThreads*nGpus+i));
TESTCHECK(AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus));
CUDACHECK(hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking));
}
//if parallel init is not selected, use main thread to initialize NCCL
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus);
if (!parallel_init) {
if (nProcs == 1) {
int gpuArray[nGpus*nThreads];
for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i;
NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray));
} else {
NCCLCHECK(ncclGroupStart());
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(hipSetDevice(localRank*nThreads*nGpus+i));
NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i));
}
NCCLCHECK(ncclGroupEnd());
}
}
int errors[nThreads];
double bw[nThreads];
double* delta;
CUDACHECK(hipHostMalloc(&delta, sizeof(double)*nThreads*NUM_BLOCKS, hipHostMallocPortable | hipHostMallocMapped));
int bw_count[nThreads];
for (int t=0; t<nThreads; t++) {
bw[t] = 0.0;
errors[t] = bw_count[t] = 0;
}
PRINT("#\n");
print_header();
int* sync = (int*)calloc(2, sizeof(int));
int* barrier = (int*)calloc(2, sizeof(int));
double* reduce = (double*)calloc(2, sizeof(double));
struct testThread threads[nThreads];
memset(threads, 0, sizeof(struct testThread)*nThreads);
for (int t=nThreads-1; t>=0; t--) {
threads[t].args.minbytes=minBytes;
threads[t].args.maxbytes=maxBytes;
threads[t].args.stepbytes=stepBytes;
threads[t].args.stepfactor=stepFactor;
threads[t].args.localRank = localRank;
threads[t].args.nProcs=nProcs;
threads[t].args.proc=proc;
threads[t].args.nThreads=nThreads;
threads[t].args.thread=t;
threads[t].args.nGpus=nGpus;
threads[t].args.sendbuffs = sendbuffs+t*nGpus;
threads[t].args.recvbuffs = recvbuffs+t*nGpus;
threads[t].args.expected = expected+t*nGpus;
threads[t].args.ncclId = ncclId;
threads[t].args.comms=comms+t*nGpus;
threads[t].args.streams=streams+t*nGpus;
threads[t].args.barrier = (volatile int*)barrier;
threads[t].args.barrier_idx = 0;
threads[t].args.reduce = (volatile double*)reduce;
threads[t].args.sync = (volatile int*)sync;
threads[t].args.sync_idx = 0;
threads[t].args.deltaHost = (delta + t*NUM_BLOCKS);
threads[t].args.errors=errors+t;
threads[t].args.bw=bw+t;
threads[t].args.bw_count=bw_count+t;
threads[t].args.reportErrors = 1;
threads[t].func = parallel_init ? threadInit : threadRunTests;
if (t)
TESTCHECK(threadLaunch(threads+t));
else
TESTCHECK(threads[t].func(&threads[t].args));
}
// Wait for other threads and accumulate stats and errors
for (int t=nThreads-1; t>=0; t--) {
if (t) pthread_join(threads[t].thread, NULL);
TESTCHECK(threads[t].ret);
if (t) {
errors[0] += errors[t];
bw[0] += bw[t];
bw_count[0] += bw_count[t];
}
}
#ifdef MPI_SUPPORT
MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
#endif
if (!parallel_init) {
for(int i=0; i<nGpus*nThreads; ++i)
NCCLCHECK(ncclCommDestroy(comms[i]));
free(comms);
}
// Free off CUDA allocated memory
for (int i=0; i<nGpus*nThreads; i++) {
if (sendbuffs[i]) CUDACHECK(hipFree((char*)sendbuffs[i]));
if (recvbuffs[i]) CUDACHECK(hipFree((char*)recvbuffs[i]));
if (datacheck) CUDACHECK(hipFree(expected[i]));
}
CUDACHECK(hipHostFree(delta));
char* str = getenv("NCCL_TESTS_MIN_BW");
double check_avg_bw = str ? atof(str) : -1;
bw[0] /= bw_count[0];
PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK");
PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK"));
PRINT("#\n");
#ifdef MPI_SUPPORT
MPI_Finalize();
#endif
// 'cuda-memcheck --leak-check full' requires this
hipDeviceReset();
if (errors[0] || bw[0] < check_avg_bw*(0.9))
exit(EXIT_FAILURE);
else
exit(EXIT_SUCCESS);
}
| 6887358ad767001c24c8fb42b4675380af06ddea.cu | /*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "common.h"
#include <pthread.h>
#include <cstdio>
#include <getopt.h>
#include <libgen.h>
#include "cuda.h"
int test_ncclVersion = 0; // init'd with ncclGetVersion()
#if NCCL_MAJOR >= 2
ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble,
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
ncclBfloat16
#endif
};
const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double",
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
"bfloat16"
#endif
};
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
int test_typenum = 10;
#else
int test_typenum = 9;
#endif
#else
ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64};
const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"};
int test_typenum = 7;
#endif
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin, ncclAvg};
const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min", "avg"};
int test_opnum = 5;
#else
ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin};
const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"};
int test_opnum = 4;
#endif
thread_local int is_main_thread = 0;
// Command line parameter defaults
static int nThreads = 1;
static int nGpus = 1;
static size_t minBytes = 32*1024*1024;
static size_t maxBytes = 32*1024*1024;
static size_t stepBytes = 1*1024*1024;
static size_t stepFactor = 1;
static int datacheck = 1;
static int warmup_iters = 5;
static int iters = 20;
static int agg_iters = 1;
static int ncclop = ncclSum;
static int nccltype = ncclFloat;
static int ncclroot = 0;
static int parallel_init = 0;
static int blocking_coll = 0;
static int cudaGraphLaunches = 0;
// Report average iteration time: (0=RANK0,1=AVG,2=MIN,3=MAX)
static int average = 1;
#define NUM_BLOCKS 32
static double parsesize(const char *value) {
long long int units;
double size;
char size_lit;
int count = sscanf(value, "%lf %1s", &size, &size_lit);
switch (count) {
case 2:
switch (size_lit) {
case 'G':
case 'g':
units = 1024*1024*1024;
break;
case 'M':
case 'm':
units = 1024*1024;
break;
case 'K':
case 'k':
units = 1024;
break;
default:
return -1.0;
};
break;
case 1:
units = 1;
break;
default:
return -1.0;
}
return size * units;
}
double DeltaMaxValue(ncclDataType_t type) {
switch(type) {
case ncclHalf: return 1e-2;
#if defined(__CUDA_BF16_TYPES_EXIST__)
case ncclBfloat16: return 1e-2;
#endif
case ncclFloat: return 1e-5;
case ncclDouble: return 1e-12;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint8:
//case ncclInt32:
case ncclUint32:
#endif
case ncclInt64:
case ncclUint64: return 1e-200;
}
return 1e-200;
}
template<typename T> __device__
double absDiff(T a, T b) {
return fabs((double)(b - a));
}
template<> __device__
double absDiff<half>(half a, half b) {
float x = __half2float(a);
float y = __half2float(b);
return fabs((double)(y-x));
}
template<typename T> __device__
float toFloat(T a) {
return (float)a;
}
template<> __device__
float toFloat(half a) {
return __half2float(a);
}
#if defined(__CUDA_BF16_TYPES_EXIST__)
template<> __device__
float toFloat(__nv_bfloat16 a) {
return __bfloat162float(a);
}
#endif
template<typename T, int BSIZE> __global__
void deltaKern(void* A_, void* B_, size_t count, double* max) {
const T* A = (const T*)A_;
const T* B = (const T*)B_;
__shared__ double temp[BSIZE];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
double locmax = 0.0;
for(size_t i=tid; i<count; i+=blockDim.x*gridDim.x) {
double delta = absDiff(A[i], B[i]);
if( delta > locmax ) {
locmax = delta;
#ifdef DEBUG_PRINT
if (delta > .1) printf("Error at %ld/%ld(%p) : %f != %f\n", i, count, B+i, toFloat(A[i]), toFloat(B[i]));
#endif
}
}
tid = threadIdx.x;
temp[tid] = locmax;
for(int stride = BSIZE/2; stride > 1; stride>>=1) {
__syncthreads();
if( tid < stride )
temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride];
}
__syncthreads();
if( threadIdx.x == 0)
max[blockIdx.x] = temp[0] > temp[1] ? temp[0] : temp[1];
}
testResult_t CheckDelta(void* results, void* expected, size_t count, ncclDataType_t type, double* devmax) {
switch (type) {
#if defined(__CUDA_BF16_TYPES_EXIST__)
case ncclBfloat16:
deltaKern<__nv_bfloat16, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
#endif
case ncclHalf:
deltaKern<half, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
case ncclFloat:
deltaKern<float, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
case ncclDouble:
deltaKern<double, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
case ncclChar:
#if NCCL_MAJOR >= 2
case ncclUint8:
#endif
deltaKern<uint8_t, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
case ncclInt:
#if NCCL_MAJOR >= 2
case ncclUint32:
#endif
deltaKern<uint32_t, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
case ncclInt64:
case ncclUint64:
deltaKern<uint64_t, 512><<<NUM_BLOCKS, 512>>>(results, expected, count, devmax); break;
}
CUDACHECK(cudaDeviceSynchronize());
for (int i=1; i<NUM_BLOCKS; i++) devmax[0] = std::max(devmax[0], devmax[i]);
return testSuccess;
}
// For integer values, we use values between 0 and 255
template<typename T>
__device__ T testValue(const size_t offset, const int rep, const int rank) {
uint8_t v = (rep+rank+offset) % 256;
return (T)v;
}
// For floating point datatype, we use values between 0 and 1 otherwise the
// Product operation will produce NaNs.
template<>
__device__ double testValue<double>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(double)testValue<int>(offset, rep, rank));
}
template<>
__device__ float testValue<float>(const size_t offset, const int rep, const int rank) {
return 1.0/(1.0+(float)testValue<int>(offset, rep, rank));
}
template<>
__device__ half testValue<half>(const size_t offset, const int rep, const int rank) {
return __float2half(testValue<float>(offset, rep, rank));
}
#if defined(__CUDA_BF16_TYPES_EXIST__)
template<>
__device__ __nv_bfloat16 testValue<__nv_bfloat16>(const size_t offset, const int rep, const int rank) {
return __float2bfloat16(testValue<float>(offset, rep, rank));
}
#endif
// Operations
template<typename T>
__device__ T ncclOpSum(T a, T b) { return a+b; }
template<typename T>
__device__ T ncclOpProd(T a, T b) { return a*b; }
template<typename T>
__device__ T ncclOpMax(T a, T b) { return a>b ? a : b; }
template<typename T>
__device__ T ncclOpMin(T a, T b) { return a<b ? a : b; }
// Definitions for half
template<>
__device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); }
template<>
__device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); }
template<>
__device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; }
template<>
__device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; }
template<typename T>
__device__ T ncclPostOpIdent(T x, int n) { return x; }
template<typename T>
__device__ T ncclPostOpDiv(T x, int n) { return x/n; }
template<>
__device__ half ncclPostOpDiv<half>(half x, int n) { return __float2half(__half2float(x)/n); }
#if defined(__CUDA_BF16_TYPES_EXIST__)
template<>
__device__ __nv_bfloat16 ncclPostOpDiv<__nv_bfloat16>(__nv_bfloat16 x, int n) { return __float2bfloat16(__bfloat162float(x)/n); }
#endif
template<typename T, T (*Op)(T, T), T(*PostOp)(T,int)>
__global__ void InitDataReduceKernel(T* data, const size_t N, const size_t offset, const int rep, const int nranks) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) {
T val = testValue<T>(o+offset, rep, 0);
for (int i=1; i<nranks; i++) {
val = Op(val, testValue<T>(o+offset, rep, i));
}
data[o] = PostOp(val, nranks);
}
}
#define KERN(type, op, postop) (void*)InitDataReduceKernel<type, op<type>, postop<type> >
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
#define OPS(type) \
KERN(type, ncclOpSum, ncclPostOpIdent), \
KERN(type, ncclOpProd, ncclPostOpIdent), \
KERN(type, ncclOpMax, ncclPostOpIdent), \
KERN(type, ncclOpMin, ncclPostOpIdent), \
KERN(type, ncclOpSum/*Avg*/, ncclPostOpDiv)
#else
#define OPS(type) \
KERN(type, ncclOpSum, ncclPostOpIdent), \
KERN(type, ncclOpProd, ncclPostOpIdent), \
KERN(type, ncclOpMax, ncclPostOpIdent), \
KERN(type, ncclOpMin, ncclPostOpIdent)
#endif
static void* const redInitDataKerns[ncclNumOps*ncclNumTypes] = {
OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double),
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
OPS(__nv_bfloat16)
#endif
};
testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[5] = { (void*)&data, (void*)&count, (void*)&offset, (void*)&rep, (void*)&nranks };
CUDACHECK(cudaLaunchKernel(redInitDataKerns[type*ncclNumOps+op], grid, block, args, 0, cudaStreamDefault));
return testSuccess;
}
template<typename T>
__global__ void InitDataKernel(T* data, const size_t N, const int rep, const int rank) {
for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x)
data[o] = testValue<T>(o, rep, rank);
}
static void* const initDataKerns[ncclNumTypes] = {
(void*)InitDataKernel< int8_t>,
(void*)InitDataKernel< uint8_t>,
(void*)InitDataKernel< int32_t>,
(void*)InitDataKernel<uint32_t>,
(void*)InitDataKernel< int64_t>,
(void*)InitDataKernel<uint64_t>,
(void*)InitDataKernel< half>,
(void*)InitDataKernel< float>,
(void*)InitDataKernel< double>,
#if defined(__CUDA_BF16_TYPES_EXIST__) && NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
(void*)InitDataKernel<__nv_bfloat16>,
#endif
};
template<typename T>
testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) {
T* ptr = (T*)dest;
InitDataKernel<<<16, 512>>>(ptr, N, rep, rank);
return testSuccess;
}
testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) {
dim3 grid = { 32, 1, 1 };
dim3 block = { 256, 1, 1 };
void* args[4] = { (void*)&data, (void*)&count, (void*)&rep, (void*)&rank };
CUDACHECK(cudaLaunchKernel(initDataKerns[type], grid, block, args, 0, cudaStreamDefault));
return testSuccess;
}
void Barrier(struct threadArgs* args) {
while (args->barrier[args->barrier_idx] != args->thread) pthread_yield();
args->barrier[args->barrier_idx] = args->thread + 1;
if (args->thread+1 == args->nThreads) {
#ifdef MPI_SUPPORT
MPI_Barrier(MPI_COMM_WORLD);
#endif
args->barrier[args->barrier_idx] = 0;
} else {
while (args->barrier[args->barrier_idx]) pthread_yield();
}
args->barrier_idx=!args->barrier_idx;
}
// Inter-thread/process barrier+allreduce
void Allreduce(struct threadArgs* args, double* value, int average) {
while (args->barrier[args->barrier_idx] != args->thread) pthread_yield();
double val = *value;
if (args->thread > 0) {
double val2 = args->reduce[args->barrier_idx];
if (average == 1) val += val2;
if (average == 2) val = std::min(val, val2);
if (average == 3) val = std::max(val, val2);
}
if (average || args->thread == 0) args->reduce[args->barrier_idx] = val;
args->barrier[args->barrier_idx] = args->thread + 1;
if (args->thread+1 == args->nThreads) {
#ifdef MPI_SUPPORT
if (average != 0) {
MPI_Op op = average == 1 ? MPI_SUM : average == 2 ? MPI_MIN : MPI_MAX;
MPI_Allreduce(MPI_IN_PLACE, (void*)&args->reduce[args->barrier_idx], 1, MPI_DOUBLE, op, MPI_COMM_WORLD);
}
#endif
if (average == 1) args->reduce[args->barrier_idx] /= args->nProcs*args->nThreads;
args->reduce[1-args->barrier_idx] = 0;
args->barrier[args->barrier_idx] = 0;
} else {
while (args->barrier[args->barrier_idx]) pthread_yield();
}
*value = args->reduce[args->barrier_idx];
args->barrier_idx=!args->barrier_idx;
}
testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta) {
size_t count = args->expectedBytes/wordSize(type);
double maxDelta = 0.0;
for (int i=0; i<args->nGpus; i++) {
int device;
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
NCCLCHECK(ncclCommCuDevice(args->comms[i], &device));
CUDACHECK(cudaSetDevice(device));
void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i];
TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->deltaHost));
maxDelta = std::max(*(args->deltaHost), maxDelta);
#ifdef DEBUG_PRINT
if (rank == 0) {
int *expectedHost = (int *)malloc(args->expectedBytes);
int *dataHost = (int *)malloc(args->expectedBytes);
cudaMemcpy(expectedHost, args->expected[0], args->expectedBytes, cudaMemcpyDeviceToHost);
printf("\n Expected: ");
for(int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, expectedHost[j]);
}
printf("\n");
cudaMemcpy(dataHost, data, args->expectedBytes, cudaMemcpyDeviceToHost);
printf("\n Actual: ");
for (int j=0; j<args->expectedBytes/sizeof(int); j++) {
printf("%d:%d ", j, dataHost[j]);
}
printf("\n");
free(expectedHost);
free(dataHost);
}
#endif
}
double nranks = args->nProcs*args->nThreads*args->nGpus;
if (args->reportErrors && maxDelta > DeltaMaxValue(type)*(nranks - 1)) args->errors[0]++;
*delta = maxDelta;
return testSuccess;
}
testResult_t testStreamSynchronize(int ngpus, cudaStream_t* streams, ncclComm_t* comms) {
cudaError_t cudaErr;
int remaining = ngpus;
int* done = (int*)malloc(sizeof(int)*ngpus);
memset(done, 0, sizeof(int)*ngpus);
while (remaining) {
int idle = 1;
for (int i=0; i<ngpus; i++) {
if (done[i]) continue;
cudaErr = cudaStreamQuery(streams[i]);
if (cudaErr == cudaSuccess) {
done[i] = 1;
remaining--;
idle = 0;
continue;
}
if (cudaErr != cudaErrorNotReady) CUDACHECK(cudaErr);
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0)
if (test_ncclVersion >= NCCL_VERSION(2,4,0) && comms) {
ncclResult_t ncclAsyncErr;
NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr));
if (ncclAsyncErr != ncclSuccess) {
// An asynchronous error happened. Stop the operation and destroy
// the communicator
for (int i=0; i<ngpus; i++)
NCCLCHECK(ncclCommAbort(comms[i]));
// Abort the perf test
NCCLCHECK(ncclAsyncErr);
}
}
#endif
}
// We might want to let other threads (including NCCL threads) use the CPU.
if (idle) pthread_yield();
}
free(done);
return testSuccess;
}
testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) {
size_t count = args->nbytes / wordSize(type);
// Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange
size_t totalnbytes = max(args->sendBytes, args->expectedBytes);
size_t steps = totalnbytes ? args->maxbytes / totalnbytes : 1;
size_t shift = totalnbytes * (iter % steps);
if (args->nGpus > 1) NCCLCHECK(ncclGroupStart());
for (int i = 0; i < args->nGpus; i++) {
#ifndef NCCL_MAJOR
int cudaDev;
NCCLCHECK(ncclCommCuDevice(args->comms[i], &cudaDev));
CUDACHECK(cudaSetDevice(cudaDev));
#endif
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
char* recvBuff = ((char*)args->recvbuffs[i]) + shift;
char* sendBuff = ((char*)args->sendbuffs[i]) + shift;
TESTCHECK(args->collTest->runColl(
(void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff),
(void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff),
count, type, op, root, args->comms[i], args->streams[i]));
}
if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd());
if (blocking_coll) {
// Complete op before returning
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
}
if (blocking_coll) Barrier(args);
return testSuccess;
}
testResult_t completeColl(struct threadArgs* args) {
if (blocking_coll) return testSuccess;
TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms));
return testSuccess;
}
testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) {
size_t count = args->nbytes / wordSize(type);
if (datacheck) {
// Initialize sendbuffs, recvbuffs and expected
TESTCHECK(args->collTest->initData(args, type, op, root, 99, in_place));
}
// Sync
TESTCHECK(startColl(args, type, op, root, in_place, 0));
TESTCHECK(completeColl(args));
Barrier(args);
#if CUDART_VERSION >= 11030
cudaGraph_t graphs[args->nGpus];
cudaGraphExec_t graphExec[args->nGpus];
if (cudaGraphLaunches >= 1) {
// Begin cuda graph capture
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaStreamBeginCapture(args->streams[i], args->nThreads > 1 ? cudaStreamCaptureModeThreadLocal : cudaStreamCaptureModeGlobal));
}
}
#endif
// Performance Benchmark
auto start = std::chrono::high_resolution_clock::now();
for (int iter = 0; iter < iters; iter++) {
if (agg_iters>1) NCCLCHECK(ncclGroupStart());
for (int aiter = 0; aiter < agg_iters; aiter++) {
TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter));
}
if (agg_iters>1) NCCLCHECK(ncclGroupEnd());
}
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
// End cuda graph capture
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaStreamEndCapture(args->streams[i], graphs+i));
}
// Instantiate cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaGraphInstantiate(graphExec+i, graphs[i], NULL, NULL, 0));
}
// Resync CPU, restart timing, launch cuda graph
Barrier(args);
start = std::chrono::high_resolution_clock::now();
for (int l=0; l<cudaGraphLaunches; l++) {
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaGraphLaunch(graphExec[i], args->streams[i]));
}
}
}
#endif
TESTCHECK(completeColl(args));
auto delta = std::chrono::high_resolution_clock::now() - start;
double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count();
deltaSec = deltaSec/(iters*agg_iters);
if (cudaGraphLaunches >= 1) deltaSec = deltaSec/cudaGraphLaunches;
Allreduce(args, &deltaSec, average);
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
//destroy cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaGraphExecDestroy(graphExec[i]));
CUDACHECK(cudaGraphDestroy(graphs[i]));
}
}
#endif
double algBw, busBw;
args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus);
Barrier(args);
double maxDelta = 0;
static __thread int rep = 0;
rep++;
if (datacheck) {
// Initialize sendbuffs, recvbuffs and expected
TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place));
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
// Begin cuda graph capture for data check
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaStreamBeginCapture(args->streams[i], cudaStreamCaptureModeThreadLocal));
}
}
#endif
//test validation in single itertion, should ideally be included into the multi-iteration run
TESTCHECK(startColl(args, type, op, root, in_place, 0));
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
// End cuda graph capture
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaStreamEndCapture(args->streams[i], graphs+i));
}
// Instantiate cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaGraphInstantiate(graphExec+i, graphs[i], NULL, NULL, 0));
}
// Launch cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaGraphLaunch(graphExec[i], args->streams[i]));
}
}
#endif
TESTCHECK(completeColl(args));
#if CUDART_VERSION >= 11030
if (cudaGraphLaunches >= 1) {
//destroy cuda graph
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaGraphExecDestroy(graphExec[i]));
CUDACHECK(cudaGraphDestroy(graphs[i]));
}
}
#endif
TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta));
//aggregate delta from all threads and procs
Allreduce(args, &maxDelta, 3);
}
double timeUsec = deltaSec*1.0E6;
char timeStr[100];
if (timeUsec >= 10000.0) {
sprintf(timeStr, "%7.0f", timeUsec);
} else if (timeUsec >= 100.0) {
sprintf(timeStr, "%7.1f", timeUsec);
} else {
sprintf(timeStr, "%7.2f", timeUsec);
}
if (datacheck) {
PRINT(" %7s %6.2f %6.2f %5.0le", timeStr, algBw, busBw, maxDelta);
} else {
PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A");
}
args->bw[0] += busBw;
args->bw_count[0]++;
return testSuccess;
}
void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) {
int nranks = args->nProcs*args->nGpus*args->nThreads;
size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset;
count = size / wordSize(type);
args->collTest->getCollByteCount(&sendCount, &recvCount, ¶mCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks);
args->nbytes = paramCount * wordSize(type);
args->sendBytes = sendCount * wordSize(type);
args->expectedBytes = recvCount * wordSize(type);
args->sendInplaceOffset = sendInplaceOffset * wordSize(type);
args->recvInplaceOffset = recvInplaceOffset * wordSize(type);
}
testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) {
// Warm-up for large size
setupArgs(args->maxbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Warm-up for small size
setupArgs(args->minbytes, type, args);
for (int iter = 0; iter < warmup_iters; iter++) {
TESTCHECK(startColl(args, type, op, root, 0, iter));
}
TESTCHECK(completeColl(args));
// Benchmark
for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) {
setupArgs(size, type, args);
print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root);
TESTCHECK(BenchTime(args, type, op, root, 0));
TESTCHECK(BenchTime(args, type, op, root, 1));
PRINT("\n");
}
return testSuccess;
}
testResult_t threadRunTests(struct threadArgs* args) {
// Set device to the first of our GPUs. If we don't do that, some operations
// will be done on the current GPU (by default : 0) and if the GPUs are in
// exclusive mode those operations will fail.
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus;
CUDACHECK(cudaSetDevice(gpuid));
TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop]));
return testSuccess;
}
testResult_t threadInit(struct threadArgs* args) {
char hostname[1024];
getHostName(hostname, 1024);
int nranks = args->nProcs*args->nThreads*args->nGpus;
//set main thread again
is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0;
NCCLCHECK(ncclGroupStart());
for (int i=0; i<args->nGpus; i++) {
int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank));
}
NCCLCHECK(ncclGroupEnd());
TESTCHECK(threadRunTests(args));
for (int i=0; i<args->nGpus; i++) {
NCCLCHECK(ncclCommDestroy(args->comms[i]));
}
return testSuccess;
}
void* threadLauncher(void* thread_) {
struct testThread* thread = (struct testThread*)thread_;
thread->ret = thread->func(&thread->args);
return NULL;
}
testResult_t threadLaunch(struct testThread* thread) {
pthread_create(&thread->thread, NULL, threadLauncher, thread);
return testSuccess;
}
testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) {
CUDACHECK(cudaMalloc(sendbuff, nbytes));
CUDACHECK(cudaMalloc(recvbuff, nbytes));
if (datacheck) CUDACHECK(cudaMalloc(expected, recvBytes));
return testSuccess;
}
testResult_t run(); // Main function
int main(int argc, char* argv[]) {
// Make sure everyline is flushed so that we see the progress of the test
setlinebuf(stdout);
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0)
ncclGetVersion(&test_ncclVersion);
#else
test_ncclVersion = NCCL_VERSION_CODE;
#endif
//printf("# NCCL_VERSION_CODE=%d ncclGetVersion=%d\n", NCCL_VERSION_CODE, test_ncclVersion);
if (NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0) && test_ncclVersion < NCCL_VERSION(2,10,0)) {
test_opnum -= 1; // exclude ncclAvg
test_typenum -= 1; // exclude bfloat16
}
// Parse args
double parsed;
int longindex;
static struct option longopts[] = {
{"nthreads", required_argument, 0, 't'},
{"ngpus", required_argument, 0, 'g'},
{"minbytes", required_argument, 0, 'b'},
{"maxbytes", required_argument, 0, 'e'},
{"stepbytes", required_argument, 0, 'i'},
{"stepfactor", required_argument, 0, 'f'},
{"iters", required_argument, 0, 'n'},
{"agg_iters", required_argument, 0, 'm'},
{"warmup_iters", required_argument, 0, 'w'},
{"parallel_init", required_argument, 0, 'p'},
{"check", required_argument, 0, 'c'},
{"op", required_argument, 0, 'o'},
{"datatype", required_argument, 0, 'd'},
{"root", required_argument, 0, 'r'},
{"blocking", required_argument, 0, 'z'},
{"cudagraph", required_argument, 0, 'G'},
{"average", required_argument, 0, 'a'},
{"help", no_argument, 0, 'h'}
};
while(1) {
int c;
c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:hG:a:", longopts, &longindex);
if (c == -1)
break;
switch(c) {
case 't':
nThreads = strtol(optarg, NULL, 0);
break;
case 'g':
nGpus = strtol(optarg, NULL, 0);
break;
case 'b':
parsed = parsesize(optarg);
if (parsed < 0) {
fprintf(stderr, "invalid size specified for 'minbytes'\n");
return -1;
}
minBytes = (size_t)parsed;
break;
case 'e':
parsed = parsesize(optarg);
if (parsed < 0) {
fprintf(stderr, "invalid size specified for 'maxbytes'\n");
return -1;
}
maxBytes = (size_t)parsed;
break;
case 'i':
stepBytes = strtol(optarg, NULL, 0);
break;
case 'f':
stepFactor = strtol(optarg, NULL, 0);
break;
case 'n':
iters = (int)strtol(optarg, NULL, 0);
break;
case 'm':
#if NCCL_MAJOR > 2 || (NCCL_MAJOR >= 2 && NCCL_MINOR >= 2)
agg_iters = (int)strtol(optarg, NULL, 0);
#else
fprintf(stderr, "Option -m not supported before NCCL 2.2. Ignoring\n");
#endif
break;
case 'w':
warmup_iters = (int)strtol(optarg, NULL, 0);
break;
case 'c':
datacheck = (int)strtol(optarg, NULL, 0);
break;
case 'p':
parallel_init = (int)strtol(optarg, NULL, 0);
break;
case 'o':
ncclop = ncclstringtoop(optarg);
break;
case 'd':
nccltype = ncclstringtotype(optarg);
break;
case 'r':
ncclroot = strtol(optarg, NULL, 0);
break;
case 'z':
blocking_coll = strtol(optarg, NULL, 0);
break;
case 'G':
#if (NCCL_MAJOR > 2 || (NCCL_MAJOR >= 2 && NCCL_MINOR >= 9)) && CUDART_VERSION >= 11030
cudaGraphLaunches = strtol(optarg, NULL, 0);
#else
printf("Option -G (CUDA graph) not supported before NCCL 2.9 + CUDA 11.3. Ignoring\n");
#endif
break;
case 'a':
average = (int)strtol(optarg, NULL, 0);
break;
case 'h':
default:
if (c != 'h') printf("invalid option '%c'\n", c);
printf("USAGE: %s \n\t"
"[-t,--nthreads <num threads>] \n\t"
"[-g,--ngpus <gpus per thread>] \n\t"
"[-b,--minbytes <min size in bytes>] \n\t"
"[-e,--maxbytes <max size in bytes>] \n\t"
"[-i,--stepbytes <increment size>] \n\t"
"[-f,--stepfactor <increment factor>] \n\t"
"[-n,--iters <iteration count>] \n\t"
"[-m,--agg_iters <aggregated iteration count>] \n\t"
"[-w,--warmup_iters <warmup iteration count>] \n\t"
"[-p,--parallel_init <0/1>] \n\t"
"[-c,--check <0/1>] \n\t"
#if NCCL_VERSION_CODE >= NCCL_VERSION(2,10,0)
"[-o,--op <sum/prod/min/max/avg/all>] \n\t"
#else
"[-o,--op <sum/prod/min/max/all>] \n\t"
#endif
"[-d,--datatype <nccltype/all>] \n\t"
"[-r,--root <root>] \n\t"
"[-z,--blocking <0/1>] \n\t"
"[-G,--cudagraph <num graph launches>] \n\t"
"[-a,--average <0/1/2/3> report average iteration time <0=RANK0/1=AVG/2=MIN/3=MAX>] \n\t"
"[-h,--help]\n",
basename(argv[0]));
return 0;
}
}
if (minBytes > maxBytes) {
fprintf(stderr, "invalid sizes for 'minbytes' and 'maxbytes': %llu > %llu\n",
(unsigned long long)minBytes,
(unsigned long long)maxBytes);
return -1;
}
#ifdef MPI_SUPPORT
MPI_Init(&argc, &argv);
#endif
TESTCHECK(run());
return 0;
}
testResult_t run() {
int nProcs = 1, proc = 0;
int localRank = 0;
char hostname[1024];
getHostName(hostname, 1024);
#ifdef MPI_SUPPORT
MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &proc);
uint64_t hostHashs[nProcs];
hostHashs[proc] = getHostHash(hostname);
MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD);
for (int p=0; p<nProcs; p++) {
if (p == proc) break;
if (hostHashs[p] == hostHashs[proc]) localRank++;
}
#endif
is_main_thread = (proc == 0) ? 1 : 0;
PRINT("# nThread %d nGpus %d minBytes %ld maxBytes %ld step: %ld(%s) warmup iters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes,
(stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck);
if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n");
if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n");
PRINT("#\n");
PRINT("# Using devices\n");
#define MAX_LINE 2048
char line[MAX_LINE];
int len = 0;
size_t maxMem = ~0;
for (int i=0; i<nThreads*nGpus; i++) {
int cudaDev = localRank*nThreads*nGpus+i;
int rank = proc*nThreads*nGpus+i;
cudaDeviceProp prop;
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n",
rank, getpid(), hostname, cudaDev, prop.pciBusID, prop.name);
maxMem = std::min(maxMem, prop.totalGlobalMem);
}
#if MPI_SUPPORT
char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL;
// Gather all output in rank order to root (0)
MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD);
if (proc == 0) {
for (int p = 0; p < nProcs; p++)
PRINT("%s", lines+MAX_LINE*p);
free(lines);
}
MPI_Allreduce(MPI_IN_PLACE, &maxMem, 1, MPI_LONG, MPI_MIN, MPI_COMM_WORLD);
#else
PRINT("%s", line);
#endif
// We need sendbuff, recvbuff, expected (when datacheck enabled), plus 1G for the rest.
size_t memMaxBytes = (maxMem - (1<<30)) / (datacheck ? 3 : 2);
if (maxBytes > memMaxBytes) {
maxBytes = memMaxBytes;
if (proc == 0) printf("#\n# Reducing maxBytes to %ld due to memory limitation\n", maxBytes);
}
ncclUniqueId ncclId;
if (proc == 0) {
NCCLCHECK(ncclGetUniqueId(&ncclId));
}
#ifdef MPI_SUPPORT
MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD);
#endif
cudaStream_t streams[nGpus*nThreads];
void* sendbuffs[nGpus*nThreads];
void* recvbuffs[nGpus*nThreads];
void* expected[nGpus*nThreads];
size_t sendBytes, recvBytes;
ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads);
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(cudaSetDevice(localRank*nThreads*nGpus+i));
TESTCHECK(AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus));
CUDACHECK(cudaStreamCreateWithFlags(streams+i, cudaStreamNonBlocking));
}
//if parallel init is not selected, use main thread to initialize NCCL
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus);
if (!parallel_init) {
if (nProcs == 1) {
int gpuArray[nGpus*nThreads];
for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i;
NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray));
} else {
NCCLCHECK(ncclGroupStart());
for (int i=0; i<nGpus*nThreads; i++) {
CUDACHECK(cudaSetDevice(localRank*nThreads*nGpus+i));
NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i));
}
NCCLCHECK(ncclGroupEnd());
}
}
int errors[nThreads];
double bw[nThreads];
double* delta;
CUDACHECK(cudaHostAlloc(&delta, sizeof(double)*nThreads*NUM_BLOCKS, cudaHostAllocPortable | cudaHostAllocMapped));
int bw_count[nThreads];
for (int t=0; t<nThreads; t++) {
bw[t] = 0.0;
errors[t] = bw_count[t] = 0;
}
PRINT("#\n");
print_header();
int* sync = (int*)calloc(2, sizeof(int));
int* barrier = (int*)calloc(2, sizeof(int));
double* reduce = (double*)calloc(2, sizeof(double));
struct testThread threads[nThreads];
memset(threads, 0, sizeof(struct testThread)*nThreads);
for (int t=nThreads-1; t>=0; t--) {
threads[t].args.minbytes=minBytes;
threads[t].args.maxbytes=maxBytes;
threads[t].args.stepbytes=stepBytes;
threads[t].args.stepfactor=stepFactor;
threads[t].args.localRank = localRank;
threads[t].args.nProcs=nProcs;
threads[t].args.proc=proc;
threads[t].args.nThreads=nThreads;
threads[t].args.thread=t;
threads[t].args.nGpus=nGpus;
threads[t].args.sendbuffs = sendbuffs+t*nGpus;
threads[t].args.recvbuffs = recvbuffs+t*nGpus;
threads[t].args.expected = expected+t*nGpus;
threads[t].args.ncclId = ncclId;
threads[t].args.comms=comms+t*nGpus;
threads[t].args.streams=streams+t*nGpus;
threads[t].args.barrier = (volatile int*)barrier;
threads[t].args.barrier_idx = 0;
threads[t].args.reduce = (volatile double*)reduce;
threads[t].args.sync = (volatile int*)sync;
threads[t].args.sync_idx = 0;
threads[t].args.deltaHost = (delta + t*NUM_BLOCKS);
threads[t].args.errors=errors+t;
threads[t].args.bw=bw+t;
threads[t].args.bw_count=bw_count+t;
threads[t].args.reportErrors = 1;
threads[t].func = parallel_init ? threadInit : threadRunTests;
if (t)
TESTCHECK(threadLaunch(threads+t));
else
TESTCHECK(threads[t].func(&threads[t].args));
}
// Wait for other threads and accumulate stats and errors
for (int t=nThreads-1; t>=0; t--) {
if (t) pthread_join(threads[t].thread, NULL);
TESTCHECK(threads[t].ret);
if (t) {
errors[0] += errors[t];
bw[0] += bw[t];
bw_count[0] += bw_count[t];
}
}
#ifdef MPI_SUPPORT
MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
#endif
if (!parallel_init) {
for(int i=0; i<nGpus*nThreads; ++i)
NCCLCHECK(ncclCommDestroy(comms[i]));
free(comms);
}
// Free off CUDA allocated memory
for (int i=0; i<nGpus*nThreads; i++) {
if (sendbuffs[i]) CUDACHECK(cudaFree((char*)sendbuffs[i]));
if (recvbuffs[i]) CUDACHECK(cudaFree((char*)recvbuffs[i]));
if (datacheck) CUDACHECK(cudaFree(expected[i]));
}
CUDACHECK(cudaFreeHost(delta));
char* str = getenv("NCCL_TESTS_MIN_BW");
double check_avg_bw = str ? atof(str) : -1;
bw[0] /= bw_count[0];
PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK");
PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK"));
PRINT("#\n");
#ifdef MPI_SUPPORT
MPI_Finalize();
#endif
// 'cuda-memcheck --leak-check full' requires this
cudaDeviceReset();
if (errors[0] || bw[0] < check_avg_bw*(0.9))
exit(EXIT_FAILURE);
else
exit(EXIT_SUCCESS);
}
|
fa715ae36369573eb1b6c269202335ade9efcebb.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2012 by Erik Opavsky
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
void printDeviceSequences (char * d_sequences, int numSequences, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * numSequences * sequenceLength);
hipMemcpy (temp, d_sequences, sizeof (char) * numSequences * sequenceLength, hipMemcpyDeviceToHost);
// for (int i = 0; i < numSequences * sequenceLength; i += sequenceLength)
// printf ("d_sequences[%d] = %s\n", i / sequenceLength, temp + i);
for (int i = 0; i < numSequences; i++) {
printf ("d_sequences[%d] = ", i);
for (int j = 0; j < sequenceLength; j++)
printf ("%c", *(temp + i * sequenceLength + j));
printf ("\n");
}
free (temp);
}
void printFirstLastBuckets (char * d_bucketSequence, int numBuckets, int matchLength, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * sequenceLength);
hipMemcpy (temp, d_bucketSequence, sizeof (char) * sequenceLength, hipMemcpyDeviceToHost);
// hipMemcpy (temp2, (d_bucketSequence + numBuckets * sizeof (char)), sizeof (char) * matchLength, hipMemcpyDeviceToHost);
printf ("first bucket = ");
for (int i = 0; i < matchLength; i++)
printf("%c", *(temp + i));
printf("\nlast bucket = ");
for (int i = 0; i < matchLength; i++)
printf("%c", *(temp + numBuckets - 1 + i));
printf("\n");
// printf("numbuckets = %d\n", numBuckets);
free (temp);
// free (temp2);
}
void printDeviceFirstLast (char * d_sequences, int numSequences, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * 2 * sequenceLength);
hipMemcpy (temp, d_sequences, sizeof (char) * sequenceLength, hipMemcpyDeviceToHost);
hipMemcpy (temp + sequenceLength, d_sequences + sequenceLength * (numSequences - 1), sizeof (char) * sequenceLength, hipMemcpyDeviceToHost);
int i;
printf ("d_sequences[0] = ");
for (i = 0; i < sequenceLength; i++)
printf ("%c", *(temp + i));
printf ("\n");
printf ("d_sequences[%d] = ", numSequences - 1);
for (; i < sequenceLength * 2; i++)
printf ("%c", *(temp + i));
printf ("\n");
free (temp);
}
void printFirstLast (char ** sequences, int numSequences, int sequenceLength) {
printf("sequences[0] = %s\n", sequences[0]);
printf("sequences[%d] = %s\n", numSequences - 1, sequences[numSequences - 1]);
}
void printSequences (char ** sequences, int numSequences, int sequenceLength) {
for (int i = 0; i < numSequences; i++) {
printf ("sequences[%d] = ", i);
for (int j = 0; j < sequenceLength; j++)
printf ("%c", sequences[i][j]);
printf ("\n");
}
}
/*
void printFlatSequences (char * sequences, int numSequences, int sequenceLength) {
for (int i = 0; i < numSequences; i++)
printf ("flat_sequences[%d] = %s\n", i , sequences + i * sequenceLength);
}
*/
| fa715ae36369573eb1b6c269202335ade9efcebb.cu | /* Copyright 2012 by Erik Opavsky
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
void printDeviceSequences (char * d_sequences, int numSequences, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * numSequences * sequenceLength);
cudaMemcpy (temp, d_sequences, sizeof (char) * numSequences * sequenceLength, cudaMemcpyDeviceToHost);
// for (int i = 0; i < numSequences * sequenceLength; i += sequenceLength)
// printf ("d_sequences[%d] = %s\n", i / sequenceLength, temp + i);
for (int i = 0; i < numSequences; i++) {
printf ("d_sequences[%d] = ", i);
for (int j = 0; j < sequenceLength; j++)
printf ("%c", *(temp + i * sequenceLength + j));
printf ("\n");
}
free (temp);
}
void printFirstLastBuckets (char * d_bucketSequence, int numBuckets, int matchLength, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * sequenceLength);
cudaMemcpy (temp, d_bucketSequence, sizeof (char) * sequenceLength, cudaMemcpyDeviceToHost);
// cudaMemcpy (temp2, (d_bucketSequence + numBuckets * sizeof (char)), sizeof (char) * matchLength, cudaMemcpyDeviceToHost);
printf ("first bucket = ");
for (int i = 0; i < matchLength; i++)
printf("%c", *(temp + i));
printf("\nlast bucket = ");
for (int i = 0; i < matchLength; i++)
printf("%c", *(temp + numBuckets - 1 + i));
printf("\n");
// printf("numbuckets = %d\n", numBuckets);
free (temp);
// free (temp2);
}
void printDeviceFirstLast (char * d_sequences, int numSequences, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * 2 * sequenceLength);
cudaMemcpy (temp, d_sequences, sizeof (char) * sequenceLength, cudaMemcpyDeviceToHost);
cudaMemcpy (temp + sequenceLength, d_sequences + sequenceLength * (numSequences - 1), sizeof (char) * sequenceLength, cudaMemcpyDeviceToHost);
int i;
printf ("d_sequences[0] = ");
for (i = 0; i < sequenceLength; i++)
printf ("%c", *(temp + i));
printf ("\n");
printf ("d_sequences[%d] = ", numSequences - 1);
for (; i < sequenceLength * 2; i++)
printf ("%c", *(temp + i));
printf ("\n");
free (temp);
}
void printFirstLast (char ** sequences, int numSequences, int sequenceLength) {
printf("sequences[0] = %s\n", sequences[0]);
printf("sequences[%d] = %s\n", numSequences - 1, sequences[numSequences - 1]);
}
void printSequences (char ** sequences, int numSequences, int sequenceLength) {
for (int i = 0; i < numSequences; i++) {
printf ("sequences[%d] = ", i);
for (int j = 0; j < sequenceLength; j++)
printf ("%c", sequences[i][j]);
printf ("\n");
}
}
/*
void printFlatSequences (char * sequences, int numSequences, int sequenceLength) {
for (int i = 0; i < numSequences; i++)
printf ("flat_sequences[%d] = %s\n", i , sequences + i * sequenceLength);
}
*/
|
51dcdb322c064dcd422bfe31a8210c5b06da6699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Imported from https://rosettacode.org/wiki/Example:Hough_transform/C
// It will be used as a baseline to observe transformation
// Modified and Parallelized with CUDA by Vipin Bakshi and Andre Lo.
// DETAILS: based on houghGPUv1.cu but tried to eliminate thread divergence
// in kernel. But this is slower than v1, time with Car.png = 18688360931 ns
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <pthread.h>
#include "cairo.h"
#include "apptime.h"
#ifndef M_PI
#define M_PI 3.1415927
#endif
// These are macros to access the R, G and B values
// of the input (d) data/ output data (ht) image buffers
#define GR(X,Y) (d[(stride)*(Y)+bpp*(X)+((2)%bpp)])
#define GG(X,Y) (d[(stride)*(Y)+bpp*(X)+((1)%bpp)])
#define GB(X,Y) (d[(stride)*(Y)+bpp*(X)+((0)%bpp)])
#define SR(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+2])
#define SG(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+1])
#define SB(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+0])
#define RAD(A) (M_PI*((double)(A))/180.0)
#define tw 360
// Kernel
// todo: experiment with 3D instead of 1D grid?
// computationalkernel1 is for( theta < 45 || (theta > 135 && theta < 225) || theta > 315)
__global__ void computationalkernel1(uint8_t *d, uint8_t *ht, int W, int H, int stride, int bpp, int th, int theta_offset)
{
int rho, y, x;
int theta = (threadIdx.x + blockIdx.x * blockDim.x) + theta_offset; // theta is based on grid/ block id plus offset
for(rho = 0; rho < th; rho++)
{
double C = cos(RAD(theta)); // todo: call sincos instead?
double S = sin(RAD(theta));
uint32_t totalred = 0;
uint32_t totalgreen = 0;
uint32_t totalblue = 0;
uint32_t totalpix = 0;
for(y = 0; y < H; y++) {
double dx = W/2.0 + (rho - (H/2.0-y)*S)/C;
if ( dx < 0 || dx >= W ) continue;
x = floor(dx+.5);
if (x == W) continue;
totalpix++;
totalred += GR(x, y);
totalgreen += GG(x, y);
totalblue += GB(x, y);
}
if ( totalpix > 0 ) {
double dp = totalpix;
SR(theta, rho) = (int)(totalred/dp) &0xff;
SG(theta, rho) = (int)(totalgreen/dp) &0xff;
SB(theta, rho) = (int)(totalblue/dp) &0xff;
}
}
}
// computationalkernel2 is for !( theta < 45 || (theta > 135 && theta < 225) || theta > 315)
__global__ void computationalkernel2(uint8_t *d, uint8_t *ht, int W, int H, int stride, int bpp, int th, int theta_offset)
{
int rho, y, x;
int theta = (threadIdx.x + blockIdx.x * blockDim.x) + theta_offset; // theta is based on grid/ block id plus offset
for(rho = 0; rho < th; rho++)
{
double C = cos(RAD(theta)); // todo: call sincos instead?
double S = sin(RAD(theta));
uint32_t totalred = 0;
uint32_t totalgreen = 0;
uint32_t totalblue = 0;
uint32_t totalpix = 0;
for(x = 0; x < W; x++) {
double dy = H/2.0 - (rho - (x - W/2.0)*C)/S;
if ( dy < 0 || dy >= H ) continue;
y = floor(dy+.5);
if (y == H) continue;
totalpix++;
totalred += GR(x, y);
totalgreen += GG(x, y);
totalblue += GB(x, y);
}
if ( totalpix > 0 ) {
double dp = totalpix;
SR(theta, rho) = (int)(totalred/dp) &0xff;
SG(theta, rho) = (int)(totalgreen/dp) &0xff;
SB(theta, rho) = (int)(totalblue/dp) &0xff;
}
}
}
// d is pointer to input data
// w, h, s is input data's width, height, and stridge
// bpp is bits per pixel of input data
uint8_t *houghtransform(uint8_t *h_in, int *w, int *h, int *s, int bpp)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
int W = *w, H = *h;
int th = sqrt(W*W + H*H)/2.0;
int outputBytes= th*tw*4;
// alloc space for output buffer CPU side
uint8_t *h_ht = (uint8_t *)malloc(outputBytes);
// alloc space for output buffer device side
uint8_t *d_out;
err = hipMalloc((void **)&d_out, outputBytes);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate %d bytes for d_out (error code %s)!\n", outputBytes, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemset((void *)d_out, 0, outputBytes); // black bg
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemset d_out (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("allocated output buffers\n");
// alloc space and init input buffer device side
uint8_t *d_in;
err = hipMalloc((void **)&d_in, (*s * *h)); // bytes = stride * height
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device d_in (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_in, h_in, (*s * *h), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy d_in from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("allocated input buffers\n");
// todo: play with grid, block dimensions
// right now this spawns 360 total kernels, for 360 values of theta
hipLaunchKernelGGL(( computationalkernel2) , dim3(30), dim3(3), 0, 0, d_in, d_out, W, H, *s, bpp, th, 45); // theta of 45 -> 134
hipLaunchKernelGGL(( computationalkernel2) , dim3(30), dim3(3), 0, 0, d_in, d_out, W, H, *s, bpp, th, 225); // theta of 225 -> 314
hipLaunchKernelGGL(( computationalkernel1) , dim3(30), dim3(3), 0, 0, d_in, d_out, W, H, *s, bpp, th, 135); // theta of 135 -> 224
hipLaunchKernelGGL(( computationalkernel1) , dim3(30), dim3(3), 0, 0, d_in, d_out, W, H, *s, bpp, th, 315); // theta of 315 -> 44 (wrap around since 360 deg = 0 deg)
hipDeviceSynchronize(); // wait for all GPU threads to complete
printf("hipDeviceSynchronize done\n");
// Copy resulting output from device
hipMemcpy(h_ht, d_out, outputBytes, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy d_out from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("copy result back to host done\n");
// Clean up
err = hipFree(d_in);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free d_in (error code %s)!\n", hipGetErrorString(err));
}
err = hipFree(d_out);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free d_out (error code %s)!\n", hipGetErrorString(err));
}
// h, w, and s are returned as the height, width, stride of the output image
// ht is the buffer containing the transformed output image
*h = th; // sqrt(W*W+H*H)/2
*w = tw; // 360
*s = 4*tw; // 4 because 4 bytes per pixel output format
return h_ht;
}
int main(int argc, char **argv)
{
cairo_surface_t *inputimg = NULL;
cairo_surface_t *houghimg = NULL;
uint8_t *houghdata = NULL, *inputdata = NULL;
int w, h, s, bpp, format;
uint64_t measurement_time = 0;
#if (CAIRO_HAS_PNG_FUNCTIONS==1)
printf("cairo supports PNG\n");
#else
printf("cairo does not support PNG\n");
#endif
if ( argc < 3 ) return EXIT_FAILURE;
printf("input file: %s\n", argv[1]);
printf("output file: %s\n", argv[2]);
apptime_print_res();
// Lets measure initialization time.
apptime_start_session(&measurement_time);
printf("Initialization...\n");
inputimg = cairo_image_surface_create_from_png(argv[1]);
printf("After create from png: %s\n",
cairo_status_to_string(cairo_surface_status(inputimg)));
w = cairo_image_surface_get_width(inputimg);
h = cairo_image_surface_get_height(inputimg);
s = cairo_image_surface_get_stride(inputimg);
format = cairo_image_surface_get_format(inputimg);
switch(format)
{
case CAIRO_FORMAT_ARGB32: bpp = 4; break;
case CAIRO_FORMAT_RGB24: bpp = 3; break;
case CAIRO_FORMAT_A8: bpp = 1; break;
default:
fprintf(stderr, "unsupported %i\n", format);
goto destroy;
}
inputdata = cairo_image_surface_get_data(inputimg);
measurement_time = apptime_stop_session(&measurement_time);
printf("Initialization Completed. Time: %lld ns\n", measurement_time);
printf("input buffer width %d, height %d, stride %d, bpp %d\n",
w, h, s, bpp);
// Now lets measure the Hough Time.
printf("Hough Transform using CUDA started...\n");
apptime_start_session(&measurement_time);
houghdata = houghtransform(inputdata, &w, &h, &s, bpp);
measurement_time = apptime_stop_session(&measurement_time);
printf("Hought transform completed. Time: %llu ns\n", measurement_time);
printf("w=%d, h=%d\n", w, h);
houghimg = cairo_image_surface_create_for_data(houghdata,
CAIRO_FORMAT_RGB24,
w, h, s);
cairo_surface_write_to_png(houghimg, argv[2]);
destroy:
if (inputimg != NULL) cairo_surface_destroy(inputimg);
if (houghimg != NULL) cairo_surface_destroy(houghimg);
return EXIT_SUCCESS;
}
| 51dcdb322c064dcd422bfe31a8210c5b06da6699.cu | // Imported from https://rosettacode.org/wiki/Example:Hough_transform/C
// It will be used as a baseline to observe transformation
// Modified and Parallelized with CUDA by Vipin Bakshi and Andre Lo.
// DETAILS: based on houghGPUv1.cu but tried to eliminate thread divergence
// in kernel. But this is slower than v1, time with Car.png = 18688360931 ns
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <pthread.h>
#include "cairo.h"
#include "apptime.h"
#ifndef M_PI
#define M_PI 3.1415927
#endif
// These are macros to access the R, G and B values
// of the input (d) data/ output data (ht) image buffers
#define GR(X,Y) (d[(stride)*(Y)+bpp*(X)+((2)%bpp)])
#define GG(X,Y) (d[(stride)*(Y)+bpp*(X)+((1)%bpp)])
#define GB(X,Y) (d[(stride)*(Y)+bpp*(X)+((0)%bpp)])
#define SR(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+2])
#define SG(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+1])
#define SB(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+0])
#define RAD(A) (M_PI*((double)(A))/180.0)
#define tw 360
// Kernel
// todo: experiment with 3D instead of 1D grid?
// computationalkernel1 is for( theta < 45 || (theta > 135 && theta < 225) || theta > 315)
__global__ void computationalkernel1(uint8_t *d, uint8_t *ht, int W, int H, int stride, int bpp, int th, int theta_offset)
{
int rho, y, x;
int theta = (threadIdx.x + blockIdx.x * blockDim.x) + theta_offset; // theta is based on grid/ block id plus offset
for(rho = 0; rho < th; rho++)
{
double C = cos(RAD(theta)); // todo: call sincos instead?
double S = sin(RAD(theta));
uint32_t totalred = 0;
uint32_t totalgreen = 0;
uint32_t totalblue = 0;
uint32_t totalpix = 0;
for(y = 0; y < H; y++) {
double dx = W/2.0 + (rho - (H/2.0-y)*S)/C;
if ( dx < 0 || dx >= W ) continue;
x = floor(dx+.5);
if (x == W) continue;
totalpix++;
totalred += GR(x, y);
totalgreen += GG(x, y);
totalblue += GB(x, y);
}
if ( totalpix > 0 ) {
double dp = totalpix;
SR(theta, rho) = (int)(totalred/dp) &0xff;
SG(theta, rho) = (int)(totalgreen/dp) &0xff;
SB(theta, rho) = (int)(totalblue/dp) &0xff;
}
}
}
// computationalkernel2 is for !( theta < 45 || (theta > 135 && theta < 225) || theta > 315)
__global__ void computationalkernel2(uint8_t *d, uint8_t *ht, int W, int H, int stride, int bpp, int th, int theta_offset)
{
int rho, y, x;
int theta = (threadIdx.x + blockIdx.x * blockDim.x) + theta_offset; // theta is based on grid/ block id plus offset
for(rho = 0; rho < th; rho++)
{
double C = cos(RAD(theta)); // todo: call sincos instead?
double S = sin(RAD(theta));
uint32_t totalred = 0;
uint32_t totalgreen = 0;
uint32_t totalblue = 0;
uint32_t totalpix = 0;
for(x = 0; x < W; x++) {
double dy = H/2.0 - (rho - (x - W/2.0)*C)/S;
if ( dy < 0 || dy >= H ) continue;
y = floor(dy+.5);
if (y == H) continue;
totalpix++;
totalred += GR(x, y);
totalgreen += GG(x, y);
totalblue += GB(x, y);
}
if ( totalpix > 0 ) {
double dp = totalpix;
SR(theta, rho) = (int)(totalred/dp) &0xff;
SG(theta, rho) = (int)(totalgreen/dp) &0xff;
SB(theta, rho) = (int)(totalblue/dp) &0xff;
}
}
}
// d is pointer to input data
// w, h, s is input data's width, height, and stridge
// bpp is bits per pixel of input data
uint8_t *houghtransform(uint8_t *h_in, int *w, int *h, int *s, int bpp)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int W = *w, H = *h;
int th = sqrt(W*W + H*H)/2.0;
int outputBytes= th*tw*4;
// alloc space for output buffer CPU side
uint8_t *h_ht = (uint8_t *)malloc(outputBytes);
// alloc space for output buffer device side
uint8_t *d_out;
err = cudaMalloc((void **)&d_out, outputBytes);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate %d bytes for d_out (error code %s)!\n", outputBytes, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemset((void *)d_out, 0, outputBytes); // black bg
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemset d_out (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("allocated output buffers\n");
// alloc space and init input buffer device side
uint8_t *d_in;
err = cudaMalloc((void **)&d_in, (*s * *h)); // bytes = stride * height
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device d_in (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_in, h_in, (*s * *h), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy d_in from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("allocated input buffers\n");
// todo: play with grid, block dimensions
// right now this spawns 360 total kernels, for 360 values of theta
computationalkernel2 <<<30, 3>>> (d_in, d_out, W, H, *s, bpp, th, 45); // theta of 45 -> 134
computationalkernel2 <<<30, 3>>> (d_in, d_out, W, H, *s, bpp, th, 225); // theta of 225 -> 314
computationalkernel1 <<<30, 3>>> (d_in, d_out, W, H, *s, bpp, th, 135); // theta of 135 -> 224
computationalkernel1 <<<30, 3>>> (d_in, d_out, W, H, *s, bpp, th, 315); // theta of 315 -> 44 (wrap around since 360 deg = 0 deg)
cudaThreadSynchronize(); // wait for all GPU threads to complete
printf("cudaThreadSynchronize done\n");
// Copy resulting output from device
cudaMemcpy(h_ht, d_out, outputBytes, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy d_out from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("copy result back to host done\n");
// Clean up
err = cudaFree(d_in);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free d_in (error code %s)!\n", cudaGetErrorString(err));
}
err = cudaFree(d_out);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free d_out (error code %s)!\n", cudaGetErrorString(err));
}
// h, w, and s are returned as the height, width, stride of the output image
// ht is the buffer containing the transformed output image
*h = th; // sqrt(W*W+H*H)/2
*w = tw; // 360
*s = 4*tw; // 4 because 4 bytes per pixel output format
return h_ht;
}
int main(int argc, char **argv)
{
cairo_surface_t *inputimg = NULL;
cairo_surface_t *houghimg = NULL;
uint8_t *houghdata = NULL, *inputdata = NULL;
int w, h, s, bpp, format;
uint64_t measurement_time = 0;
#if (CAIRO_HAS_PNG_FUNCTIONS==1)
printf("cairo supports PNG\n");
#else
printf("cairo does not support PNG\n");
#endif
if ( argc < 3 ) return EXIT_FAILURE;
printf("input file: %s\n", argv[1]);
printf("output file: %s\n", argv[2]);
apptime_print_res();
// Lets measure initialization time.
apptime_start_session(&measurement_time);
printf("Initialization...\n");
inputimg = cairo_image_surface_create_from_png(argv[1]);
printf("After create from png: %s\n",
cairo_status_to_string(cairo_surface_status(inputimg)));
w = cairo_image_surface_get_width(inputimg);
h = cairo_image_surface_get_height(inputimg);
s = cairo_image_surface_get_stride(inputimg);
format = cairo_image_surface_get_format(inputimg);
switch(format)
{
case CAIRO_FORMAT_ARGB32: bpp = 4; break;
case CAIRO_FORMAT_RGB24: bpp = 3; break;
case CAIRO_FORMAT_A8: bpp = 1; break;
default:
fprintf(stderr, "unsupported %i\n", format);
goto destroy;
}
inputdata = cairo_image_surface_get_data(inputimg);
measurement_time = apptime_stop_session(&measurement_time);
printf("Initialization Completed. Time: %lld ns\n", measurement_time);
printf("input buffer width %d, height %d, stride %d, bpp %d\n",
w, h, s, bpp);
// Now lets measure the Hough Time.
printf("Hough Transform using CUDA started...\n");
apptime_start_session(&measurement_time);
houghdata = houghtransform(inputdata, &w, &h, &s, bpp);
measurement_time = apptime_stop_session(&measurement_time);
printf("Hought transform completed. Time: %llu ns\n", measurement_time);
printf("w=%d, h=%d\n", w, h);
houghimg = cairo_image_surface_create_for_data(houghdata,
CAIRO_FORMAT_RGB24,
w, h, s);
cairo_surface_write_to_png(houghimg, argv[2]);
destroy:
if (inputimg != NULL) cairo_surface_destroy(inputimg);
if (houghimg != NULL) cairo_surface_destroy(houghimg);
return EXIT_SUCCESS;
}
|
e1f1576c5e68442ff8bdb1e160deaa7ab5f23eaf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
#define blockSize 128
int *devIdata;
int *devOdata;
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void calculatePartialSum(int n, int d, int *odata, int *idata) {
int index = threadIdx.x + (blockDim.x * blockIdx.x);
if (index >= n) {
return;
}
odata[index] = ((index >= (1 << (d - 1))) ? (idata[index - (1 << (d - 1))]) : 0) + idata[index];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
timer().startGpuTimer();
hipMalloc((void**)&devIdata, n * sizeof(int));
checkCUDAError("hipMalloc devIdata failed");
hipMemcpy(devIdata, idata, n * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&devOdata, n * sizeof(int));
checkCUDAError("hipMalloc devOdata failed");
// TODO
for (int d = 1; d <= ilog2ceil(n); d++) {
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
calculatePartialSum << <fullBlocksPerGrid, blockSize>> > (n, d, devOdata, devIdata);
hipMemcpy(devIdata, devOdata, n * sizeof(int), hipMemcpyDeviceToDevice);
}
hipMemcpy(odata + 1, devOdata, (n - 1) * sizeof(int), hipMemcpyDeviceToHost);
odata[0] = 0;
hipFree(devIdata);
hipFree(devOdata);
timer().endGpuTimer();
}
}
}
| e1f1576c5e68442ff8bdb1e160deaa7ab5f23eaf.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
#define blockSize 128
int *devIdata;
int *devOdata;
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void calculatePartialSum(int n, int d, int *odata, int *idata) {
int index = threadIdx.x + (blockDim.x * blockIdx.x);
if (index >= n) {
return;
}
odata[index] = ((index >= (1 << (d - 1))) ? (idata[index - (1 << (d - 1))]) : 0) + idata[index];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
timer().startGpuTimer();
cudaMalloc((void**)&devIdata, n * sizeof(int));
checkCUDAError("cudaMalloc devIdata failed");
cudaMemcpy(devIdata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&devOdata, n * sizeof(int));
checkCUDAError("cudaMalloc devOdata failed");
// TODO
for (int d = 1; d <= ilog2ceil(n); d++) {
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
calculatePartialSum << <fullBlocksPerGrid, blockSize>> > (n, d, devOdata, devIdata);
cudaMemcpy(devIdata, devOdata, n * sizeof(int), cudaMemcpyDeviceToDevice);
}
cudaMemcpy(odata + 1, devOdata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost);
odata[0] = 0;
cudaFree(devIdata);
cudaFree(devOdata);
timer().endGpuTimer();
}
}
}
|
be5d00c8d2fe2d7d382a713c509a75fb0fe0428b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
#define NUM_ELEMENTS 8388608
#define PI 3.141592654
#define r 1048576
//__global__ void divideAndConquer()
//{
// int x;
// int y;
//
// double d = (2 * PI) * (NUM_ELEMENTS - 1);
//
// if (threadIdx.x == 0 && blockIdx.x == 0)
// x = 0 + r * cos(0);
//
// x = 0 + r * cos((threadIdx.x + (blockDim.x * blockIdx.x)) * d);
//
// if (threadIdx.x == 0 && blockIdx.x == 0)
// y = 0 + r * sin(0);
//
// y = 0 + r * sin((threadIdx.x + (blockDim.x * blockIdx.x)) * d);
//
// __syncthreads();
//
// printf("%d %d", x, y);
//}
int main()
{
//divideAndConquer<<<4096, 1024>>>();
int x = 0;
int y = 0;
int count = 0;
double d = (2 * PI) / (NUM_ELEMENTS - 1);
printf("2\n6741438\n");
for (int i=0; i<NUM_ELEMENTS; i++)
{
int tempx = 1048576 + r * cos(i * d);
int tempy = 1048576 + r * sin(i * d);
if (tempx != x || tempy != y)
{
x = tempx;
y = tempy;
printf("%d %d\n", x, y);
//count++;
}
}
//printf("%d\n", count);
//system("PAUSE");
return 0;
} | be5d00c8d2fe2d7d382a713c509a75fb0fe0428b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
#define NUM_ELEMENTS 8388608
#define PI 3.141592654
#define r 1048576
//__global__ void divideAndConquer()
//{
// int x;
// int y;
//
// double d = (2 * PI) * (NUM_ELEMENTS - 1);
//
// if (threadIdx.x == 0 && blockIdx.x == 0)
// x = 0 + r * cos(0);
//
// x = 0 + r * cos((threadIdx.x + (blockDim.x * blockIdx.x)) * d);
//
// if (threadIdx.x == 0 && blockIdx.x == 0)
// y = 0 + r * sin(0);
//
// y = 0 + r * sin((threadIdx.x + (blockDim.x * blockIdx.x)) * d);
//
// __syncthreads();
//
// printf("%d %d", x, y);
//}
int main()
{
//divideAndConquer<<<4096, 1024>>>();
int x = 0;
int y = 0;
int count = 0;
double d = (2 * PI) / (NUM_ELEMENTS - 1);
printf("2\n6741438\n");
for (int i=0; i<NUM_ELEMENTS; i++)
{
int tempx = 1048576 + r * cos(i * d);
int tempy = 1048576 + r * sin(i * d);
if (tempx != x || tempy != y)
{
x = tempx;
y = tempy;
printf("%d %d\n", x, y);
//count++;
}
}
//printf("%d\n", count);
//system("PAUSE");
return 0;
} |
3effb56c0cb47989fd847ca549077cd9d09162a1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
hipStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::kl_divergence_op, float, float, float, raft::identity_op, int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
| 3effb56c0cb47989fd847ca549077cd9d09162a1.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
cudaStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::kl_divergence_op, float, float, float, raft::identity_op, int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
|
00be9d65fcdc3fb33734416ed762390378921e7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test showing huge access speed gap
* between aligned and misaligned structures
* (those having/missing __align__ keyword).
* It measures per-element copy throughput for
* aligned and misaligned structures on
* big chunks of data.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_functions.h> // helper utility functions
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Misaligned types
////////////////////////////////////////////////////////////////////////////////
typedef unsigned char uint8;
typedef unsigned short int uint16;
typedef struct
{
unsigned char r, g, b, a;
} RGBA8_misaligned;
typedef struct
{
unsigned int l, a;
} LA32_misaligned;
typedef struct
{
unsigned int r, g, b;
} RGB32_misaligned;
typedef struct
{
unsigned int r, g, b, a;
} RGBA32_misaligned;
////////////////////////////////////////////////////////////////////////////////
// Aligned types
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(4)
{
unsigned char r, g, b, a;
} RGBA8;
typedef unsigned int I32;
typedef struct __align__(8)
{
unsigned int l, a;
} LA32;
typedef struct __align__(16)
{
unsigned int r, g, b;
} RGB32;
typedef struct __align__(16)
{
unsigned int r, g, b, a;
} RGBA32;
////////////////////////////////////////////////////////////////////////////////
// Because G80 class hardware natively supports global memory operations
// only with data elements of 4, 8 and 16 bytes, if structure size
// exceeds 16 bytes, it can't be efficiently read or written,
// since more than one global memory non-coalescable load/store instructions
// will be generated, even if __align__ option is supplied.
// "Structure of arrays" storage strategy offers best performance
// in general case. See section 5.1.2 of the Programming Guide.
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16)
{
RGBA32 c1, c2;
} RGBA32_2;
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b)
{
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b)
{
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b)
{
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// Simple CUDA kernel.
// Copy is carried out on per-element basis,
// so it's not per-byte in case of padded structures.
////////////////////////////////////////////////////////////////////////////////
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
////////////////////////////////////////////////////////////////////////////////
// Validation routine for simple copy kernel.
// We must know "packed" size of TData (number_of_fields * sizeof(simple_type))
// and compare only these "packed" parts of the structure,
// containig actual user data. The compiler behavior with padding bytes
// is undefined, since padding is merely a placeholder
// and doesn't contain any user data.
////////////////////////////////////////////////////////////////////////////////
template<class TData> int testCPU(
TData *h_odata,
TData *h_idata,
int numElements,
int packedElementSize
)
{
for (int pos = 0; pos < numElements; pos++)
{
TData src = h_idata[pos];
TData dst = h_odata[pos];
for (int i = 0; i < packedElementSize; i++)
if (((char *)&src)[i] != ((char *)&dst)[i])
{
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Memory chunk size in bytes. Reused for test
const int MEM_SIZE = 50000000;
const int NUM_ITERATIONS = 32;
//GPU input and output data
unsigned char *d_idata, *d_odata;
//CPU input data and instance of GPU output data
unsigned char *h_idataCPU, *h_odataGPU;
StopWatchInterface *hTimer = NULL;
template<class TData> int runTest(int packedElementSize, int memory_size)
{
const int totalMemSizeAligned = iAlignDown(memory_size, sizeof(TData));
const int numElements = iDivDown(memory_size, sizeof(TData));
//Clean output buffer before current test
checkCudaErrors(hipMemset(d_odata, 0, memory_size));
//Run test
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int i = 0; i < NUM_ITERATIONS; i++)
{
hipLaunchKernelGGL(( testKernel<TData>), dim3(64), dim3(256), 0, 0,
(TData *)d_odata,
(TData *)d_idata,
numElements
);
getLastCudaError("testKernel() execution failed\n");
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
printf(
"Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime,
(double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0)
);
//Read back GPU results and run validation
checkCudaErrors(hipMemcpy(h_odataGPU, d_odata, memory_size, hipMemcpyDeviceToHost));
int flag = testCPU(
(TData *)h_odataGPU,
(TData *)h_idataCPU,
numElements,
packedElementSize
);
printf(flag ? "\tTEST OK\n" : "\tTEST FAILURE\n");
return !flag;
}
int main(int argc, char **argv)
{
int i, nTotalFailures = 0;
int devID;
hipDeviceProp_t deviceProp;
printf("[%s] - Starting...\n", argv[0]);
// find first CUDA device
devID = findCudaDevice(argc, (const char **)argv);
// get number of SMs on this GPU
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// Anything that is less than 192 Cores will have a scaled down workload
float scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
int MemorySize = (int)(MEM_SIZE/scale_factor) & 0xffffff00; // force multiple of 256 bytes
printf("> Compute scaling value = %4.2f\n", scale_factor);
printf("> Memory Size = %d\n", MemorySize);
sdkCreateTimer(&hTimer);
printf("Allocating memory...\n");
h_idataCPU = (unsigned char *)malloc(MemorySize);
h_odataGPU = (unsigned char *)malloc(MemorySize);
checkCudaErrors(hipMalloc((void **)&d_idata, MemorySize));
checkCudaErrors(hipMalloc((void **)&d_odata, MemorySize));
printf("Generating host input data array...\n");
for (i = 0; i < MemorySize; i++)
{
h_idataCPU[i] = (i & 0xFF) + 1;
}
printf("Uploading input data to GPU memory...\n");
checkCudaErrors(hipMemcpy(d_idata, h_idataCPU, MemorySize, hipMemcpyHostToDevice));
printf("Testing misaligned types...\n");
printf("uint8...\n");
nTotalFailures += runTest<uint8>(1, MemorySize);
printf("uint16...\n");
nTotalFailures += runTest<uint16>(2, MemorySize);
printf("RGBA8_misaligned...\n");
nTotalFailures += runTest<RGBA8_misaligned>(4, MemorySize);
printf("LA32_misaligned...\n");
nTotalFailures += runTest<LA32_misaligned>(8, MemorySize);
printf("RGB32_misaligned...\n");
nTotalFailures += runTest<RGB32_misaligned>(12, MemorySize);
printf("RGBA32_misaligned...\n");
nTotalFailures += runTest<RGBA32_misaligned>(16, MemorySize);
printf("Testing aligned types...\n");
printf("RGBA8...\n");
nTotalFailures += runTest<RGBA8>(4, MemorySize);
printf("I32...\n");
nTotalFailures += runTest<I32>(4, MemorySize);
printf("LA32...\n");
nTotalFailures += runTest<LA32>(8, MemorySize);
printf("RGB32...\n");
nTotalFailures += runTest<RGB32>(12, MemorySize);
printf("RGBA32...\n");
nTotalFailures += runTest<RGBA32>(16, MemorySize);
printf("RGBA32_2...\n");
nTotalFailures += runTest<RGBA32_2>(32, MemorySize);
printf("\n[alignedTypes] -> Test Results: %d Failures\n", nTotalFailures);
printf("Shutting down...\n");
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
free(h_odataGPU);
free(h_idataCPU);
sdkDeleteTimer(&hTimer);
hipDeviceReset();
if (nTotalFailures != 0)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 00be9d65fcdc3fb33734416ed762390378921e7b.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test showing huge access speed gap
* between aligned and misaligned structures
* (those having/missing __align__ keyword).
* It measures per-element copy throughput for
* aligned and misaligned structures on
* big chunks of data.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_functions.h> // helper utility functions
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Misaligned types
////////////////////////////////////////////////////////////////////////////////
typedef unsigned char uint8;
typedef unsigned short int uint16;
typedef struct
{
unsigned char r, g, b, a;
} RGBA8_misaligned;
typedef struct
{
unsigned int l, a;
} LA32_misaligned;
typedef struct
{
unsigned int r, g, b;
} RGB32_misaligned;
typedef struct
{
unsigned int r, g, b, a;
} RGBA32_misaligned;
////////////////////////////////////////////////////////////////////////////////
// Aligned types
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(4)
{
unsigned char r, g, b, a;
} RGBA8;
typedef unsigned int I32;
typedef struct __align__(8)
{
unsigned int l, a;
} LA32;
typedef struct __align__(16)
{
unsigned int r, g, b;
} RGB32;
typedef struct __align__(16)
{
unsigned int r, g, b, a;
} RGBA32;
////////////////////////////////////////////////////////////////////////////////
// Because G80 class hardware natively supports global memory operations
// only with data elements of 4, 8 and 16 bytes, if structure size
// exceeds 16 bytes, it can't be efficiently read or written,
// since more than one global memory non-coalescable load/store instructions
// will be generated, even if __align__ option is supplied.
// "Structure of arrays" storage strategy offers best performance
// in general case. See section 5.1.2 of the Programming Guide.
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16)
{
RGBA32 c1, c2;
} RGBA32_2;
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b)
{
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b)
{
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b)
{
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// Simple CUDA kernel.
// Copy is carried out on per-element basis,
// so it's not per-byte in case of padded structures.
////////////////////////////////////////////////////////////////////////////////
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
////////////////////////////////////////////////////////////////////////////////
// Validation routine for simple copy kernel.
// We must know "packed" size of TData (number_of_fields * sizeof(simple_type))
// and compare only these "packed" parts of the structure,
// containig actual user data. The compiler behavior with padding bytes
// is undefined, since padding is merely a placeholder
// and doesn't contain any user data.
////////////////////////////////////////////////////////////////////////////////
template<class TData> int testCPU(
TData *h_odata,
TData *h_idata,
int numElements,
int packedElementSize
)
{
for (int pos = 0; pos < numElements; pos++)
{
TData src = h_idata[pos];
TData dst = h_odata[pos];
for (int i = 0; i < packedElementSize; i++)
if (((char *)&src)[i] != ((char *)&dst)[i])
{
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Memory chunk size in bytes. Reused for test
const int MEM_SIZE = 50000000;
const int NUM_ITERATIONS = 32;
//GPU input and output data
unsigned char *d_idata, *d_odata;
//CPU input data and instance of GPU output data
unsigned char *h_idataCPU, *h_odataGPU;
StopWatchInterface *hTimer = NULL;
template<class TData> int runTest(int packedElementSize, int memory_size)
{
const int totalMemSizeAligned = iAlignDown(memory_size, sizeof(TData));
const int numElements = iDivDown(memory_size, sizeof(TData));
//Clean output buffer before current test
checkCudaErrors(cudaMemset(d_odata, 0, memory_size));
//Run test
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int i = 0; i < NUM_ITERATIONS; i++)
{
testKernel<TData><<<64, 256>>>(
(TData *)d_odata,
(TData *)d_idata,
numElements
);
getLastCudaError("testKernel() execution failed\n");
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
printf(
"Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime,
(double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0)
);
//Read back GPU results and run validation
checkCudaErrors(cudaMemcpy(h_odataGPU, d_odata, memory_size, cudaMemcpyDeviceToHost));
int flag = testCPU(
(TData *)h_odataGPU,
(TData *)h_idataCPU,
numElements,
packedElementSize
);
printf(flag ? "\tTEST OK\n" : "\tTEST FAILURE\n");
return !flag;
}
int main(int argc, char **argv)
{
int i, nTotalFailures = 0;
int devID;
cudaDeviceProp deviceProp;
printf("[%s] - Starting...\n", argv[0]);
// find first CUDA device
devID = findCudaDevice(argc, (const char **)argv);
// get number of SMs on this GPU
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// Anything that is less than 192 Cores will have a scaled down workload
float scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
int MemorySize = (int)(MEM_SIZE/scale_factor) & 0xffffff00; // force multiple of 256 bytes
printf("> Compute scaling value = %4.2f\n", scale_factor);
printf("> Memory Size = %d\n", MemorySize);
sdkCreateTimer(&hTimer);
printf("Allocating memory...\n");
h_idataCPU = (unsigned char *)malloc(MemorySize);
h_odataGPU = (unsigned char *)malloc(MemorySize);
checkCudaErrors(cudaMalloc((void **)&d_idata, MemorySize));
checkCudaErrors(cudaMalloc((void **)&d_odata, MemorySize));
printf("Generating host input data array...\n");
for (i = 0; i < MemorySize; i++)
{
h_idataCPU[i] = (i & 0xFF) + 1;
}
printf("Uploading input data to GPU memory...\n");
checkCudaErrors(cudaMemcpy(d_idata, h_idataCPU, MemorySize, cudaMemcpyHostToDevice));
printf("Testing misaligned types...\n");
printf("uint8...\n");
nTotalFailures += runTest<uint8>(1, MemorySize);
printf("uint16...\n");
nTotalFailures += runTest<uint16>(2, MemorySize);
printf("RGBA8_misaligned...\n");
nTotalFailures += runTest<RGBA8_misaligned>(4, MemorySize);
printf("LA32_misaligned...\n");
nTotalFailures += runTest<LA32_misaligned>(8, MemorySize);
printf("RGB32_misaligned...\n");
nTotalFailures += runTest<RGB32_misaligned>(12, MemorySize);
printf("RGBA32_misaligned...\n");
nTotalFailures += runTest<RGBA32_misaligned>(16, MemorySize);
printf("Testing aligned types...\n");
printf("RGBA8...\n");
nTotalFailures += runTest<RGBA8>(4, MemorySize);
printf("I32...\n");
nTotalFailures += runTest<I32>(4, MemorySize);
printf("LA32...\n");
nTotalFailures += runTest<LA32>(8, MemorySize);
printf("RGB32...\n");
nTotalFailures += runTest<RGB32>(12, MemorySize);
printf("RGBA32...\n");
nTotalFailures += runTest<RGBA32>(16, MemorySize);
printf("RGBA32_2...\n");
nTotalFailures += runTest<RGBA32_2>(32, MemorySize);
printf("\n[alignedTypes] -> Test Results: %d Failures\n", nTotalFailures);
printf("Shutting down...\n");
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
free(h_odataGPU);
free(h_idataCPU);
sdkDeleteTimer(&hTimer);
cudaDeviceReset();
if (nTotalFailures != 0)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
bdf38167b9651467cdba563352a6d9b8d4f8c0c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "tools.hpp"
#include "../cpu_anim.h"
struct WaveDataBlock {
unsigned char * dev_bitmap;
CPUAnimBitmap * bitmap;
};
void CleanUp(WaveDataBlock * d) {
CUDA_CHECK_ERROR(hipFree(d->dev_bitmap));
}
__global__ void WaveKernel(unsigned char * ptr, int ticks) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/100.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey / 2;
ptr[offset * 4 + 2] = - grey / 7;
ptr[offset * 4 + 3] = 128;
}
void GenerateFrame(WaveDataBlock * d, int ticks) {
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( WaveKernel), dim3(blocks), dim3(threads), 0, 0, d->dev_bitmap, ticks);
CUDA_CHECK_ERROR(hipMemcpy(d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
hipMemcpyDeviceToHost));
}
void DrawWave() {
WaveDataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
CUDA_CHECK_ERROR(hipMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
bitmap.anim_and_exit((void(*)(void*, int))GenerateFrame,
(void(*)(void*))CleanUp);
}
| bdf38167b9651467cdba563352a6d9b8d4f8c0c0.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "tools.hpp"
#include "../cpu_anim.h"
struct WaveDataBlock {
unsigned char * dev_bitmap;
CPUAnimBitmap * bitmap;
};
void CleanUp(WaveDataBlock * d) {
CUDA_CHECK_ERROR(cudaFree(d->dev_bitmap));
}
__global__ void WaveKernel(unsigned char * ptr, int ticks) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/100.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey / 2;
ptr[offset * 4 + 2] = - grey / 7;
ptr[offset * 4 + 3] = 128;
}
void GenerateFrame(WaveDataBlock * d, int ticks) {
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16, 16);
WaveKernel<<<blocks, threads>>>(d->dev_bitmap, ticks);
CUDA_CHECK_ERROR(cudaMemcpy(d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost));
}
void DrawWave() {
WaveDataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
CUDA_CHECK_ERROR(cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
bitmap.anim_and_exit((void(*)(void*, int))GenerateFrame,
(void(*)(void*))CleanUp);
}
|
4bb9454c07d82237ae6402bb1961ae940a538a47.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See this header for all of the recursive handling of tuples of vectors
#include "test_parameters.cuh"
#include "groupby_test_helpers.cuh"
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/tuple_vectors.h>
#include <utilities/cudf_utils.h>
#include <cudf.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <iostream>
#include <vector>
#include <utility>
#include <type_traits>
#include <typeinfo>
#include <memory>
#include <cstdlib>
// A new instance of this class will be created for each *TEST(GroupTest, ...)
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct GroupTest : public GdfTest {
// The aggregation type is passed via a member of the template argument class
const agg_op op = test_parameters::op;
gdf_context ctxt = {0, test_parameters::group_type, 0};
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns to be grouped, and the value_type of each
// vector determiens the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
//output_t is the output type of the aggregation column
using output_t = typename test_parameters::output_type;
//map_t is used for reference solution
using map_t = typename test_parameters::ref_map_type;
//tuple_t is tuple of datatypes associated with each column to be grouped
using tuple_t = typename test_parameters::tuple_t;
//contains input generated for gdf calculation and reference solution
multi_column_t input_key;
//contains the input aggregation column
std::vector<output_t> input_value;
//contains grouped by column output of the gdf groupby call
multi_column_t output_key;
//contains the aggregated output column
std::vector<output_t> output_value;
// Type for a unique_ptr to a gdf_column with a custom deleter
// Custom deleter is defined at construction
using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Containers for unique_ptrs to gdf_columns that will be used in the gdf_group_by functions
// unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_input_key_columns;
gdf_col_pointer gdf_input_value_column;
std::vector<gdf_col_pointer> gdf_output_key_columns;
gdf_col_pointer gdf_output_value_column;
// Containers for the raw pointers to the gdf_columns that will be used as input
// to the gdf_group_by functions
std::vector<gdf_column*> gdf_raw_input_key_columns;
gdf_column* gdf_raw_input_val_column;
std::vector<gdf_column*> gdf_raw_output_key_columns;
gdf_column* gdf_raw_output_val_column;
GroupTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~GroupTest()
{
}
template <typename col_type>
gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector,
const gdf_size_type n_count = 0)
{
// Deduce the type and set the gdf_dtype accordingly
gdf_dtype gdf_col_type = N_GDF_TYPES;
if (std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32;
else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64;
// Create a new instance of a gdf_column with a custom deleter that will free
// the associated device memory when it eventually goes out of scope
auto deleter = [](gdf_column* col){col->size = 0; RMM_FREE(col->data, 0); RMM_FREE(col->valid, 0); };
gdf_col_pointer the_column{new gdf_column, deleter};
// Allocate device storage for gdf_column and copy contents from host_vector
EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS);
EXPECT_EQ(hipMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), hipMemcpyHostToDevice), hipSuccess);
int valid_size = gdf_valid_allocation_size(host_vector.size());
EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), valid_size, 0), RMM_SUCCESS);
EXPECT_EQ(hipMemset(the_column->valid, 0xff, valid_size), hipSuccess);
// Fill the gdf_column members
the_column->null_count = n_count;
the_column->size = host_vector.size();
the_column->dtype = gdf_col_type;
gdf_dtype_extra_info extra_info;
extra_info.time_unit = TIME_UNIT_NONE;
the_column->dtype_info = extra_info;
return the_column;
}
// Converts a tuple of host vectors into a vector of gdf_columns
std::vector<gdf_col_pointer>
initialize_gdf_columns(multi_column_t host_columns)
{
std::vector<gdf_col_pointer> gdf_columns;
convert_tuple_to_gdf_columns(gdf_columns, host_columns);
return gdf_columns;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Initializes key columns and aggregation column for gdf group by call
*
* @param key_count The number of unique keys
* @param value_per_key The number of times a random aggregation value is generated for a key
* @param max_key The maximum value of the key columns
* @param max_val The maximum value of aggregation column
* @param print Optionally print the keys and aggregation columns for debugging
*/
/* ----------------------------------------------------------------------------*/
void create_input(const size_t key_count, const size_t value_per_key,
const size_t max_key, const size_t max_val,
bool print = false, const gdf_size_type n_count = 0) {
size_t shuffle_seed = rand();
initialize_keys(input_key, key_count, value_per_key, max_key, shuffle_seed);
initialize_values(input_value, key_count, value_per_key, max_val, shuffle_seed);
gdf_input_key_columns = initialize_gdf_columns(input_key);
gdf_input_value_column = create_gdf_column(input_value, n_count);
// Fill vector of raw pointers to gdf_columns
for(auto const& c : gdf_input_key_columns){
gdf_raw_input_key_columns.push_back(c.get());
}
gdf_raw_input_val_column = gdf_input_value_column.get();
if(print)
{
std::cout << "Key column(s) created. Size: " << std::get<0>(input_key).size() << std::endl;
print_tuple(input_key);
std::cout << "Value column(s) created. Size: " << input_value.size() << std::endl;
print_vector(input_value);
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Creates a unique_ptr that wraps a gdf_column structure intialized with a host vector
*
* @param host_vector The host vector whose data is used to initialize the gdf_column
*
* @returns A unique_ptr wrapping the new gdf_column
*/
/* ----------------------------------------------------------------------------*/
// Compile time recursion to convert each vector in a tuple of vectors into
// a gdf_column and append it to a vector of gdf_columns
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t)
{
//bottom of compile-time recursion
//purposely empty...
}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t)
{
// Creates a gdf_column for the current vector and pushes it onto
// the vector of gdf_columns
gdf_columns.push_back(create_gdf_column(std::get<I>(t)));
//recurse to next vector in tuple
convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t);
}
void create_gdf_output_buffers(const size_t key_count, const size_t value_per_key) {
initialize_keys(output_key, key_count, value_per_key, 0, 0, false);
initialize_values(output_value, key_count, value_per_key, 0, 0);
gdf_output_key_columns = initialize_gdf_columns(output_key);
gdf_output_value_column = create_gdf_column(output_value);
for(auto const& c : gdf_output_key_columns){
gdf_raw_output_key_columns.push_back(c.get());
}
gdf_raw_output_val_column = gdf_output_value_column.get();
}
map_t
compute_reference_solution(void) {
map_t key_val_map;
if (test_parameters::op != agg_op::AVG) {
AggOp<test_parameters::op> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = extractKey(input_key, i);
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
} else {
std::map<tuple_t, size_t> counters;
AggOp<agg_op::SUM> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = extractKey(input_key, i);
counters[l_key]++;
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
for (auto& e : key_val_map) {
e.second = e.second/counters[e.first];
}
}
return key_val_map;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the gdf result of grouping the input_keys and input_value
*/
/* ----------------------------------------------------------------------------*/
void compute_gdf_result(const gdf_error expected_error = GDF_SUCCESS)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_error error{GDF_SUCCESS};
gdf_column **group_by_input_key = gdf_raw_input_key_columns.data();
gdf_column *group_by_input_value = gdf_raw_input_val_column;
gdf_column **group_by_output_key = gdf_raw_output_key_columns.data();
gdf_column *group_by_output_value = gdf_raw_output_val_column;
switch(op)
{
case agg_op::MIN:
{
error = gdf_group_by_min(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::MAX:
{
error = gdf_group_by_max(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::SUM:
{
error = gdf_group_by_sum(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::CNT:
{
error = gdf_group_by_count(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::AVG:
{
error = gdf_group_by_avg(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
default:
error = GDF_INVALID_AGGREGATOR;
}
EXPECT_EQ(expected_error, error) << "The gdf group by function did not complete successfully";
if (GDF_SUCCESS == expected_error) {
copy_output(
group_by_output_key, output_key,
group_by_output_value, output_value);
}
}
void compare_gdf_result(map_t& reference_map) {
ASSERT_EQ(output_value.size(), reference_map.size()) <<
"Size of gdf result does not match reference result\n";
ASSERT_EQ(std::get<0>(output_key).size(), output_value.size()) <<
"Mismatch between aggregation and group by column size.";
for (size_t i = 0; i < output_value.size(); ++i) {
auto sch = reference_map.find(extractKey(output_key, i));
bool found = (sch != reference_map.end());
EXPECT_EQ(found, true);
if (!found) { continue; }
if (std::is_integral<output_t>::value) {
EXPECT_EQ(sch->second, output_value[i]);
} else {
EXPECT_NEAR(sch->second, output_value[i], sch->second/100.0);
}
//ensure no duplicates in gdf output
reference_map.erase(sch);
}
}
};
TYPED_TEST_CASE(GroupTest, Implementations);
TYPED_TEST(GroupTest, GroupbyExampleTest)
{
const size_t num_keys = 1;
const size_t num_values_per_key = 8;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, AllKeysSame)
{
const size_t num_keys = 1;
const size_t num_values_per_key = 1<<14;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, AllKeysDifferent)
{
const size_t num_keys = 1<<14;
const size_t num_values_per_key = 1;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, WarpKeysSame)
{
const size_t num_keys = 1<<10;
const size_t num_values_per_key = 32;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, BlockKeysSame)
{
const size_t num_keys = 1<<10;
const size_t num_values_per_key = 256;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, EmptyInput)
{
const size_t num_keys = 0;
const size_t num_values_per_key = 0;
const size_t max_key = 0;
const size_t max_val = 0;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct GroupValidTest : public GroupTest<test_parameters>
{ };
TYPED_TEST_CASE(GroupValidTest, ValidTestImplementations);
TYPED_TEST(GroupValidTest, ReportValidMaskError)
{
const size_t num_keys = 1;
const size_t num_values_per_key = 8;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val, false, 1);
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result(GDF_VALIDITY_UNSUPPORTED);
}
| 4bb9454c07d82237ae6402bb1961ae940a538a47.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See this header for all of the recursive handling of tuples of vectors
#include "test_parameters.cuh"
#include "groupby_test_helpers.cuh"
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/tuple_vectors.h>
#include <utilities/cudf_utils.h>
#include <cudf.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <iostream>
#include <vector>
#include <utility>
#include <type_traits>
#include <typeinfo>
#include <memory>
#include <cstdlib>
// A new instance of this class will be created for each *TEST(GroupTest, ...)
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct GroupTest : public GdfTest {
// The aggregation type is passed via a member of the template argument class
const agg_op op = test_parameters::op;
gdf_context ctxt = {0, test_parameters::group_type, 0};
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns to be grouped, and the value_type of each
// vector determiens the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
//output_t is the output type of the aggregation column
using output_t = typename test_parameters::output_type;
//map_t is used for reference solution
using map_t = typename test_parameters::ref_map_type;
//tuple_t is tuple of datatypes associated with each column to be grouped
using tuple_t = typename test_parameters::tuple_t;
//contains input generated for gdf calculation and reference solution
multi_column_t input_key;
//contains the input aggregation column
std::vector<output_t> input_value;
//contains grouped by column output of the gdf groupby call
multi_column_t output_key;
//contains the aggregated output column
std::vector<output_t> output_value;
// Type for a unique_ptr to a gdf_column with a custom deleter
// Custom deleter is defined at construction
using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Containers for unique_ptrs to gdf_columns that will be used in the gdf_group_by functions
// unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_input_key_columns;
gdf_col_pointer gdf_input_value_column;
std::vector<gdf_col_pointer> gdf_output_key_columns;
gdf_col_pointer gdf_output_value_column;
// Containers for the raw pointers to the gdf_columns that will be used as input
// to the gdf_group_by functions
std::vector<gdf_column*> gdf_raw_input_key_columns;
gdf_column* gdf_raw_input_val_column;
std::vector<gdf_column*> gdf_raw_output_key_columns;
gdf_column* gdf_raw_output_val_column;
GroupTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~GroupTest()
{
}
template <typename col_type>
gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector,
const gdf_size_type n_count = 0)
{
// Deduce the type and set the gdf_dtype accordingly
gdf_dtype gdf_col_type = N_GDF_TYPES;
if (std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32;
else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64;
// Create a new instance of a gdf_column with a custom deleter that will free
// the associated device memory when it eventually goes out of scope
auto deleter = [](gdf_column* col){col->size = 0; RMM_FREE(col->data, 0); RMM_FREE(col->valid, 0); };
gdf_col_pointer the_column{new gdf_column, deleter};
// Allocate device storage for gdf_column and copy contents from host_vector
EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS);
EXPECT_EQ(cudaMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), cudaMemcpyHostToDevice), cudaSuccess);
int valid_size = gdf_valid_allocation_size(host_vector.size());
EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), valid_size, 0), RMM_SUCCESS);
EXPECT_EQ(cudaMemset(the_column->valid, 0xff, valid_size), cudaSuccess);
// Fill the gdf_column members
the_column->null_count = n_count;
the_column->size = host_vector.size();
the_column->dtype = gdf_col_type;
gdf_dtype_extra_info extra_info;
extra_info.time_unit = TIME_UNIT_NONE;
the_column->dtype_info = extra_info;
return the_column;
}
// Converts a tuple of host vectors into a vector of gdf_columns
std::vector<gdf_col_pointer>
initialize_gdf_columns(multi_column_t host_columns)
{
std::vector<gdf_col_pointer> gdf_columns;
convert_tuple_to_gdf_columns(gdf_columns, host_columns);
return gdf_columns;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Initializes key columns and aggregation column for gdf group by call
*
* @param key_count The number of unique keys
* @param value_per_key The number of times a random aggregation value is generated for a key
* @param max_key The maximum value of the key columns
* @param max_val The maximum value of aggregation column
* @param print Optionally print the keys and aggregation columns for debugging
*/
/* ----------------------------------------------------------------------------*/
void create_input(const size_t key_count, const size_t value_per_key,
const size_t max_key, const size_t max_val,
bool print = false, const gdf_size_type n_count = 0) {
size_t shuffle_seed = rand();
initialize_keys(input_key, key_count, value_per_key, max_key, shuffle_seed);
initialize_values(input_value, key_count, value_per_key, max_val, shuffle_seed);
gdf_input_key_columns = initialize_gdf_columns(input_key);
gdf_input_value_column = create_gdf_column(input_value, n_count);
// Fill vector of raw pointers to gdf_columns
for(auto const& c : gdf_input_key_columns){
gdf_raw_input_key_columns.push_back(c.get());
}
gdf_raw_input_val_column = gdf_input_value_column.get();
if(print)
{
std::cout << "Key column(s) created. Size: " << std::get<0>(input_key).size() << std::endl;
print_tuple(input_key);
std::cout << "Value column(s) created. Size: " << input_value.size() << std::endl;
print_vector(input_value);
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Creates a unique_ptr that wraps a gdf_column structure intialized with a host vector
*
* @param host_vector The host vector whose data is used to initialize the gdf_column
*
* @returns A unique_ptr wrapping the new gdf_column
*/
/* ----------------------------------------------------------------------------*/
// Compile time recursion to convert each vector in a tuple of vectors into
// a gdf_column and append it to a vector of gdf_columns
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t)
{
//bottom of compile-time recursion
//purposely empty...
}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t)
{
// Creates a gdf_column for the current vector and pushes it onto
// the vector of gdf_columns
gdf_columns.push_back(create_gdf_column(std::get<I>(t)));
//recurse to next vector in tuple
convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t);
}
void create_gdf_output_buffers(const size_t key_count, const size_t value_per_key) {
initialize_keys(output_key, key_count, value_per_key, 0, 0, false);
initialize_values(output_value, key_count, value_per_key, 0, 0);
gdf_output_key_columns = initialize_gdf_columns(output_key);
gdf_output_value_column = create_gdf_column(output_value);
for(auto const& c : gdf_output_key_columns){
gdf_raw_output_key_columns.push_back(c.get());
}
gdf_raw_output_val_column = gdf_output_value_column.get();
}
map_t
compute_reference_solution(void) {
map_t key_val_map;
if (test_parameters::op != agg_op::AVG) {
AggOp<test_parameters::op> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = extractKey(input_key, i);
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
} else {
std::map<tuple_t, size_t> counters;
AggOp<agg_op::SUM> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = extractKey(input_key, i);
counters[l_key]++;
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
for (auto& e : key_val_map) {
e.second = e.second/counters[e.first];
}
}
return key_val_map;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the gdf result of grouping the input_keys and input_value
*/
/* ----------------------------------------------------------------------------*/
void compute_gdf_result(const gdf_error expected_error = GDF_SUCCESS)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_error error{GDF_SUCCESS};
gdf_column **group_by_input_key = gdf_raw_input_key_columns.data();
gdf_column *group_by_input_value = gdf_raw_input_val_column;
gdf_column **group_by_output_key = gdf_raw_output_key_columns.data();
gdf_column *group_by_output_value = gdf_raw_output_val_column;
switch(op)
{
case agg_op::MIN:
{
error = gdf_group_by_min(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::MAX:
{
error = gdf_group_by_max(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::SUM:
{
error = gdf_group_by_sum(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::CNT:
{
error = gdf_group_by_count(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::AVG:
{
error = gdf_group_by_avg(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
default:
error = GDF_INVALID_AGGREGATOR;
}
EXPECT_EQ(expected_error, error) << "The gdf group by function did not complete successfully";
if (GDF_SUCCESS == expected_error) {
copy_output(
group_by_output_key, output_key,
group_by_output_value, output_value);
}
}
void compare_gdf_result(map_t& reference_map) {
ASSERT_EQ(output_value.size(), reference_map.size()) <<
"Size of gdf result does not match reference result\n";
ASSERT_EQ(std::get<0>(output_key).size(), output_value.size()) <<
"Mismatch between aggregation and group by column size.";
for (size_t i = 0; i < output_value.size(); ++i) {
auto sch = reference_map.find(extractKey(output_key, i));
bool found = (sch != reference_map.end());
EXPECT_EQ(found, true);
if (!found) { continue; }
if (std::is_integral<output_t>::value) {
EXPECT_EQ(sch->second, output_value[i]);
} else {
EXPECT_NEAR(sch->second, output_value[i], sch->second/100.0);
}
//ensure no duplicates in gdf output
reference_map.erase(sch);
}
}
};
TYPED_TEST_CASE(GroupTest, Implementations);
TYPED_TEST(GroupTest, GroupbyExampleTest)
{
const size_t num_keys = 1;
const size_t num_values_per_key = 8;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, AllKeysSame)
{
const size_t num_keys = 1;
const size_t num_values_per_key = 1<<14;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, AllKeysDifferent)
{
const size_t num_keys = 1<<14;
const size_t num_values_per_key = 1;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, WarpKeysSame)
{
const size_t num_keys = 1<<10;
const size_t num_values_per_key = 32;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, BlockKeysSame)
{
const size_t num_keys = 1<<10;
const size_t num_values_per_key = 256;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
TYPED_TEST(GroupTest, EmptyInput)
{
const size_t num_keys = 0;
const size_t num_values_per_key = 0;
const size_t max_key = 0;
const size_t max_val = 0;
this->create_input(num_keys, num_values_per_key, max_key, max_val);
auto reference_map = this->compute_reference_solution();
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result();
this->compare_gdf_result(reference_map);
}
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct GroupValidTest : public GroupTest<test_parameters>
{ };
TYPED_TEST_CASE(GroupValidTest, ValidTestImplementations);
TYPED_TEST(GroupValidTest, ReportValidMaskError)
{
const size_t num_keys = 1;
const size_t num_values_per_key = 8;
const size_t max_key = num_keys*2;
const size_t max_val = 1000;
this->create_input(num_keys, num_values_per_key, max_key, max_val, false, 1);
this->create_gdf_output_buffers(num_keys, num_values_per_key);
this->compute_gdf_result(GDF_VALIDITY_UNSUPPORTED);
}
|
df1029da4474dcc20f11fa6f66850dbe5cc4e0a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zmergebicgstab2.cu normal z -> s, Fri Jul 18 17:34:28 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "../include/magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from smergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_sreduce_kernel_spmv1( int Gs,
int n,
float *vtmp,
float *vtmp2 ){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_sbicgmerge_spmv1_kernel(
int n,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *p,
float *r,
float *v,
float *vtmp
){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * p[ d_colind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_sbicgstab_alphakernel(
float *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
float tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param
A magma_s_sparse_matrix
system matrix
@param
d1 float*
temporary vector
@param
d2 float*
temporary vector
@param
d_p float*
input vector p
@param
d_r float*
input vector r
@param
d_v float*
output vector v
@param
skp float*
array for parameters ( skp[0]=alpha )
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge_spmv1( magma_s_sparse_matrix A,
float *d1,
float *d2,
float *d_p,
float *d_r,
float *d_v,
float *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( float );
float *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_sbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, A.val, A.row, A.col, d_p, d_r, d_v, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_sbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_sreduce_kernel_spmv2( int Gs,
int n,
float *vtmp,
float *vtmp2 ){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_sbicgmerge_spmv2_kernel(
int n,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *s,
float *t,
float *vtmp
){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * s[ d_colind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
float tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_sbicgstab_omegakernel(
float *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
A magma_s_sparse_matrix
input matrix
@param
d1 float*
temporary vector
@param
d2 float*
temporary vector
@param
d_s float*
input vector s
@param
d_t float*
output vector t
@param
skp float*
array for parameters
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge_spmv2(
magma_s_sparse_matrix A,
float *d1,
float *d2,
float *d_s,
float *d_t,
float *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
float *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_sbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, A.val, A.row, A.col, d_s, d_t, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+6, 1 );
magma_scopyvector( 1, aux1+n, 1, skp+7, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_sbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge_xrbeta_kernel(
int n,
float *rr,
float *r,
float *p,
float *s,
float *t,
float *x,
float *skp,
float *vtmp
){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
float alpha=skp[0];
float omega=skp[2];
if( i<n ){
float sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
float tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_sbicgstab_betakernel(
float *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
float tmp1 = skp[4]/skp[3];
float tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
n int
dimension n
@param
d1 float*
temporary vector
@param
d2 float*
temporary vector
@param
rr float*
input vector rr
@param
r float*
input/output vector r
@param
p float*
input vector p
@param
s float*
input vector s
@param
t float*
input vector t
@param
x float*
output vector x
@param
skp float*
array for parameters
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge_xrbeta(
int n,
float *d1,
float *d2,
float *rr,
float *r,
float *p,
float *s,
float *t,
float *x,
float *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
float *aux1 = d1, *aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_sbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+4, 1 );
magma_scopyvector( 1, aux1+n, 1, skp+5, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_sbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| df1029da4474dcc20f11fa6f66850dbe5cc4e0a7.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zmergebicgstab2.cu normal z -> s, Fri Jul 18 17:34:28 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "../include/magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from smergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_sreduce_kernel_spmv1( int Gs,
int n,
float *vtmp,
float *vtmp2 ){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_sbicgmerge_spmv1_kernel(
int n,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *p,
float *r,
float *v,
float *vtmp
){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * p[ d_colind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_sbicgstab_alphakernel(
float *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
float tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param
A magma_s_sparse_matrix
system matrix
@param
d1 float*
temporary vector
@param
d2 float*
temporary vector
@param
d_p float*
input vector p
@param
d_r float*
input vector r
@param
d_v float*
output vector v
@param
skp float*
array for parameters ( skp[0]=alpha )
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge_spmv1( magma_s_sparse_matrix A,
float *d1,
float *d2,
float *d_p,
float *d_r,
float *d_v,
float *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( float );
float *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
magma_sbicgmerge_spmv1_kernel<<<Gs, Bs, Ms>>>
( n, A.val, A.row, A.col, d_p, d_r, d_v, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_sbicgstab_alphakernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_sreduce_kernel_spmv2( int Gs,
int n,
float *vtmp,
float *vtmp2 ){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_sbicgmerge_spmv2_kernel(
int n,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *s,
float *t,
float *vtmp
){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * s[ d_colind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
float tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_sbicgstab_omegakernel(
float *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
A magma_s_sparse_matrix
input matrix
@param
d1 float*
temporary vector
@param
d2 float*
temporary vector
@param
d_s float*
input vector s
@param
d_t float*
output vector t
@param
skp float*
array for parameters
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge_spmv2(
magma_s_sparse_matrix A,
float *d1,
float *d2,
float *d_s,
float *d_t,
float *skp ){
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
float *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR)
magma_sbicgmerge_spmv2_kernel<<<Gs, Bs, Ms>>>
( n, A.val, A.row, A.col, d_s, d_t, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+6, 1 );
magma_scopyvector( 1, aux1+n, 1, skp+7, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_sbicgstab_omegakernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_sbicgmerge_xrbeta_kernel(
int n,
float *rr,
float *r,
float *p,
float *s,
float *t,
float *x,
float *skp,
float *vtmp
){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
float alpha=skp[0];
float omega=skp[2];
if( i<n ){
float sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
float tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else{
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_sbicgstab_betakernel(
float *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
float tmp1 = skp[4]/skp[3];
float tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param
n int
dimension n
@param
d1 float*
temporary vector
@param
d2 float*
temporary vector
@param
rr float*
input vector rr
@param
r float*
input/output vector r
@param
p float*
input vector p
@param
s float*
input vector s
@param
t float*
input vector t
@param
x float*
output vector x
@param
skp float*
array for parameters
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbicgmerge_xrbeta(
int n,
float *d1,
float *d2,
float *rr,
float *r,
float *p,
float *s,
float *t,
float *x,
float *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( float );
float *aux1 = d1, *aux2 = d2;
int b = 1;
magma_sbicgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_scopyvector( 1, aux1, 1, skp+4, 1 );
magma_scopyvector( 1, aux1+n, 1, skp+5, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_sbicgstab_betakernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
a2caad2c6551e23b2fe217d6c882e6a48067bcbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/self_add_bias.cuh"
#include <assert.h>
#include "core/common.cuh"
// add_QKV_bias kernel code modified from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1342-L1395
template<typename T>
__global__
void add_QKV_bias_opt(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x / m ;
int row_offset = (blockIdx.x % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = (threadIdx.x + blockIdx.y * blockDim.x) / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x ) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x + blockDim.x * blockIdx.y]);
for(int i = word_start_id; i < word_start_id + 1; ++i)
{
T tmp = data_ptr[threadIdx.x + blockDim.x * blockIdx.y] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template<>
__global__
void add_QKV_bias_opt<half>( half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * (size_per_head * head_num) + threadIdx.x + blockDim.x * blockIdx.y;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x + blockDim.x * blockIdx.y;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_opt_kernel( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const hipStream_t stream){
// printf("inner add_qkv_bias\n");
int qkv_types = 3;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
assert(m * qkv_types <= 65536 && "batch_size * seq_len must <= 65536");
int fold_coeff = 1;
dim3 grid;
dim3 block;
//TODO - int8
if (sizeof(T) == sizeof(float)){
if (k <= 1024){
fold_coeff = 1;
}else if( k <= 2048){
fold_coeff = 2;
}else if(k <= 4096){
fold_coeff = 4;
}else if(k <= 8192){
fold_coeff = 8;
}else if(k <= 16384){
fold_coeff = 16;
}
grid.x = m * qkv_types;
grid.y = fold_coeff;
block.x = k / fold_coeff;
hipLaunchKernelGGL(( add_QKV_bias_opt), dim3(grid), dim3(block), 0, stream, (float*)Q, (float*)bias_Q, (float*)K, (float*)bias_K, (float*)V, (float*)bias_V, (float*)q_buf_, (float*)k_buf_,
(float*)v_buf_, batch_size, seq_len, head_num, size_per_head);
}else{
if (k <= 2048){
fold_coeff = 2;
}else if( k <= 4096){
fold_coeff = 2;
}else if(k <= 8192){
fold_coeff = 4;
}else if(k <= 16384){
fold_coeff = 8;
}else if(k <= 16384 * 2){
fold_coeff = 16;
}
grid.x = m;
grid.y = fold_coeff;
block.x = k / (2 * fold_coeff);
hipLaunchKernelGGL(( add_QKV_bias_opt), dim3(grid), dim3(block), 0, stream, (half*)Q, (half*)bias_Q, (half*)K, (half*)bias_K, (half*)V, (half*)bias_V, (half*)q_buf_, (half*)k_buf_,
(half*)v_buf_, batch_size, seq_len, head_num, size_per_head / 2);
}
}
template void add_QKV_bias_opt_kernel<float>( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const hipStream_t stream);
template void add_QKV_bias_opt_kernel<half>(void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const hipStream_t stream);
| a2caad2c6551e23b2fe217d6c882e6a48067bcbb.cu | #include "core/self_add_bias.cuh"
#include <assert.h>
#include "core/common.cuh"
// add_QKV_bias kernel code modified from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1342-L1395
template<typename T>
__global__
void add_QKV_bias_opt(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x / m ;
int row_offset = (blockIdx.x % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = (threadIdx.x + blockIdx.y * blockDim.x) / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x ) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x + blockDim.x * blockIdx.y]);
for(int i = word_start_id; i < word_start_id + 1; ++i)
{
T tmp = data_ptr[threadIdx.x + blockDim.x * blockIdx.y] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template<>
__global__
void add_QKV_bias_opt<half>( half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * (size_per_head * head_num) + threadIdx.x + blockDim.x * blockIdx.y;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x + blockDim.x * blockIdx.y;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_opt_kernel( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream){
// printf("inner add_qkv_bias\n");
int qkv_types = 3;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
assert(m * qkv_types <= 65536 && "batch_size * seq_len must <= 65536");
int fold_coeff = 1;
dim3 grid;
dim3 block;
//TODO - int8
if (sizeof(T) == sizeof(float)){
if (k <= 1024){
fold_coeff = 1;
}else if( k <= 2048){
fold_coeff = 2;
}else if(k <= 4096){
fold_coeff = 4;
}else if(k <= 8192){
fold_coeff = 8;
}else if(k <= 16384){
fold_coeff = 16;
}
grid.x = m * qkv_types;
grid.y = fold_coeff;
block.x = k / fold_coeff;
add_QKV_bias_opt<<<grid, block, 0, stream>>>((float*)Q, (float*)bias_Q, (float*)K, (float*)bias_K, (float*)V, (float*)bias_V, (float*)q_buf_, (float*)k_buf_,
(float*)v_buf_, batch_size, seq_len, head_num, size_per_head);
}else{
if (k <= 2048){
fold_coeff = 2;
}else if( k <= 4096){
fold_coeff = 2;
}else if(k <= 8192){
fold_coeff = 4;
}else if(k <= 16384){
fold_coeff = 8;
}else if(k <= 16384 * 2){
fold_coeff = 16;
}
grid.x = m;
grid.y = fold_coeff;
block.x = k / (2 * fold_coeff);
add_QKV_bias_opt<<<grid, block, 0, stream>>>((half*)Q, (half*)bias_Q, (half*)K, (half*)bias_K, (half*)V, (half*)bias_V, (half*)q_buf_, (half*)k_buf_,
(half*)v_buf_, batch_size, seq_len, head_num, size_per_head / 2);
}
}
template void add_QKV_bias_opt_kernel<float>( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
template void add_QKV_bias_opt_kernel<half>(void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
|
74445947f221de4e95191d5b079c493f8c8d34ea.hip | // !!! This is a file automatically generated by hipify!!!
/**
* ___ _ _ ___ _ _ ___ ___ ___ ___
* / __| | | | \ /_\ | | ___| _ ) __/ __/ __|
* | (__| |_| | |) / _ \ | |_|___| _ \ _| (_ \__ \
* \___|\___/|___/_/ \_\ |____| |___/_| \___|___/
* 2012
* by Jens Wetzl (jens.wetzl@fau.de)
* and Oliver Taubmann (oliver.taubmann@fau.de)
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
* File lbfgs.cu: Implementation of class lbfgs (except cpu_lbfgs).
*
**/
#include "lbfgs.h"
#include "timer.h"
#include <iostream>
#include <algorithm>
#include <limits>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <fstream>
#include <sstream>
using namespace std;
namespace gpu_lbfgs {
// Variables
__device__ float fkm1;
__device__ float fk;
__device__ float tmp;
__device__ float alpha[HISTORY_SIZE];
__device__ float rho [HISTORY_SIZE];
__device__ float H0;
__device__ float step;
__device__ float tmp2;
__device__ int status;
// Small helper kernels for scalar operations in device memory needed during updates.
// What they're used for is documented by comments in the places they are executed.
// *** Use with a single thread only! ***
__global__ void update1 (float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out); // first update loop
__global__ void update2 (float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha); // second update loop
__global__ void update3 (float *rho_out, float *H0_out, const float *yDotS, const float *yDotY); // after line search
}
// linesearch_gpu.h is no real header, it contains
// part of the implementation and must be included
// after the variables above have been declared.
#include "linesearch_gpu.h"
lbfgs::lbfgs(cost_function& cf)
: m_costFunction(cf)
, m_maxIter(10000)
, m_maxEvals(std::numeric_limits<size_t>::max())
, m_gradientEps(1e-4f)
{
CublasSafeCall( hipblasCreate(&m_cublasHandle) );
}
lbfgs::~lbfgs()
{
CublasSafeCall( hipblasDestroy(m_cublasHandle) );
}
std::string lbfgs::statusToString(lbfgs::status stat)
{
switch (stat)
{
case LBFGS_BELOW_GRADIENT_EPS:
return "Below gradient epsilon";
case LBFGS_REACHED_MAX_ITER:
return "Reached maximum number of iterations";
case LBFGS_REACHED_MAX_EVALS:
return "Reached maximum number of function/gradient evaluations";
case LBFGS_LINE_SEARCH_FAILED:
return "Line search failed";
default:
return "Unknown status";
}
}
lbfgs::status lbfgs::minimize(float *d_x)
{
return gpu_lbfgs(d_x);
}
lbfgs::status lbfgs::minimize_with_host_x(float *h_x)
{
const size_t NX = m_costFunction.getNumberOfUnknowns();
float *d_x;
CudaSafeCall( hipMalloc((void**)&d_x, NX * sizeof(float)) );
CudaSafeCall( hipMemcpy(d_x, h_x, NX * sizeof(float), hipMemcpyHostToDevice) );
status ret = minimize(d_x);
CudaSafeCall( hipMemcpy(h_x, d_x, NX * sizeof(float), hipMemcpyDeviceToHost) );
CudaSafeCall( hipFree(d_x) );
return ret;
}
lbfgs::status lbfgs::gpu_lbfgs(float *d_x)
{
#ifdef LBFGS_TIMING
timer timer_total ("GPU_LBFGS_total" );
timer timer_evals ("GPU_LBFGS_evals" );
timer timer_updates ("GPU_LBFGS_updates" );
timer timer_linesearch("GPU_LBFGS_linesearch");
timer_total.start();
#endif
using namespace gpu_lbfgs;
const size_t NX = m_costFunction.getNumberOfUnknowns();
float *d_fkm1, *d_fk; // f_{k-1}, f_k, function values at x_{k-1} and x_k
float *d_gkm1, *d_gk; // g_{k-1}, g_k, gradients at x_{k-1} and x_k
float *d_z; // z, search direction
float *d_H0; // H_0, initial inverse Hessian (diagonal, same value for all elements)
float *d_step; // step current step length
float *d_tmp, *d_tmp2; // tmp, tmp2 temporary storage for intermediate results
int *d_status; // status return code for communication device -> host
// Ring buffers for history
float *d_s; // s, history of solution updates
float *d_y; // y, history of gradient updates
float *d_alpha; // alpha, history of alphas (needed for z updates)
float *d_rho; // rho, history of rhos (needed for z updates)
// Allocations
CudaSafeCall( hipMalloc(&d_gk, NX * sizeof(float)) );
CudaSafeCall( hipMalloc(&d_gkm1, NX * sizeof(float)) );
CudaSafeCall( hipMalloc(&d_z, NX * sizeof(float)) );
CudaSafeCall( hipMalloc(&d_s, HISTORY_SIZE * NX * sizeof(float)) );
CudaSafeCall( hipMalloc(&d_y, HISTORY_SIZE * NX * sizeof(float)) );
// Addresses of global symbols
CudaSafeCall( hipGetSymbolAddress((void**)&d_fkm1, gpu_lbfgs::fkm1 ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_fk, gpu_lbfgs::fk ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_tmp, gpu_lbfgs::tmp ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_tmp2, gpu_lbfgs::tmp2 ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_H0, gpu_lbfgs::H0 ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_alpha, gpu_lbfgs::alpha ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_rho, gpu_lbfgs::rho ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_step, gpu_lbfgs::step ) );
CudaSafeCall( hipGetSymbolAddress((void**)&d_status, gpu_lbfgs::status) );
// Initialize
#ifdef LBFGS_TIMING
timer_evals.start();
#endif
m_costFunction.f_gradf(d_x, d_fk, d_gk);
CudaCheckError();
hipDeviceSynchronize();
#ifdef LBFGS_TIMING
timer_evals.stop();
#endif
size_t evals = 1;
status stat = LBFGS_REACHED_MAX_ITER;
#ifdef LBFGS_VERBOSE
std::cout << "lbfgs::gpu_lbfgs()" << std::endl;
#endif
// H0 = 1.0f;
const float one = 1.0f;
CudaSafeCall( hipMemcpy(d_H0, &one, sizeof(float), hipMemcpyHostToDevice) );
size_t it;
for (it = 0; it < m_maxIter; ++it)
{
#ifdef LBFGS_VERBOSE
float h_y;
CudaSafeCall( hipMemcpy(&h_y, d_fk, sizeof(float), hipMemcpyDeviceToHost) );
float gknorm2;
dispatch_dot(NX, &gknorm2, d_gk, d_gk, false);
printf("f(x) = % 12e, ||grad||_2 = % 12e\n", h_y, std::sqrt(gknorm2));
#endif
// Check for convergence
// ---------------------
float gkNormSquared;
float xkNormSquared;
dispatch_dot(NX, &xkNormSquared, d_x, d_x, false);
dispatch_dot(NX, &gkNormSquared, d_gk, d_gk, false);
if (gkNormSquared < (m_gradientEps * m_gradientEps) * ::max(xkNormSquared, 1.0f))
{
stat = LBFGS_BELOW_GRADIENT_EPS;
break;
}
// Find search direction
// ---------------------
#ifdef LBFGS_TIMING
timer_updates.start();
#endif
const float minusOne = -1.0f;
dispatch_scale(NX, d_z, d_gk, &minusOne, false); // z = -gk
const size_t MAX_IDX = std::min<size_t>(it, HISTORY_SIZE);
for (size_t i = 1; i <= MAX_IDX; ++i)
{
size_t idx = index(it - i);
dispatch_dot(NX, d_tmp, d_s + idx * NX, d_z); // tmp = sDotZ
// alpha = tmp * rho
// tmp = -alpha
hipLaunchKernelGGL(( update1), dim3(1), dim3(1), 0, 0, d_alpha + idx, d_tmp, d_rho + idx, d_tmp);
CudaCheckError();
hipDeviceSynchronize();
// z += tmp * y
dispatch_axpy(NX, d_z, d_z, d_y + idx * NX, d_tmp);
}
dispatch_scale(NX, d_z, d_z, d_H0); // z = H0 * z
for (size_t i = MAX_IDX; i > 0; --i)
{
size_t idx = index(it - i);
dispatch_dot(NX, d_tmp, d_y + idx * NX, d_z); // tmp = yDotZ
// beta = rho * tmp
// tmp = alpha - beta
hipLaunchKernelGGL(( update2), dim3(1), dim3(1), 0, 0, d_tmp, d_rho + idx, d_tmp, d_alpha + idx);
CudaCheckError();
hipDeviceSynchronize();
// z += tmp * s
dispatch_axpy(NX, d_z, d_z, d_s + idx * NX, d_tmp);
}
#ifdef LBFGS_TIMING
timer_updates.stop();
timer_linesearch.start();
#endif
CudaSafeCall( hipMemcpy(d_fkm1, d_fk, 1 * sizeof(float), hipMemcpyDeviceToDevice) ); // fkm1 = fk;
CudaSafeCall( hipMemcpy(d_gkm1, d_gk, NX * sizeof(float), hipMemcpyDeviceToDevice) ); // gkm1 = gk;
timer *t_evals = NULL, *t_linesearch = NULL;
#ifdef LBFGS_TIMING
t_evals = &timer_evals;
t_linesearch = &timer_linesearch;
#endif
// (line search defined in linesearch_gpu.h)
if (!gpu_linesearch(d_x, d_z, d_fk, d_gk, evals, d_gkm1, d_fkm1, stat, d_step,
m_maxEvals, t_evals, t_linesearch, d_tmp, d_status))
{
break;
}
#ifdef LBFGS_TIMING
timer_linesearch.stop();
timer_updates.start();
#endif
// Update s, y, rho and H_0
// ------------------------
// s = x_k - x_{k-1} = step * z
// y = g_k - g_{k-1}
// rho = 1 / (y^T s)
// H_0 = (y^T s) / (y^T y)
float *d_curS = d_s + index(it) * NX;
float *d_curY = d_y + index(it) * NX;
dispatch_scale(NX, d_curS, d_z, d_step); // s = step * z
dispatch_axpy (NX, d_curY, d_gk, d_gkm1, &minusOne, false); // y = gk - gkm1
dispatch_dot(NX, d_tmp, d_curY, d_curS); // tmp = yDotS
dispatch_dot(NX, d_tmp2, d_curY, d_curY); // tmp2 = yDotY
// rho = 1 / tmp
// if (tmp2 > 1e-5)
// H0 = tmp / tmp2
hipLaunchKernelGGL(( update3), dim3(1), dim3(1), 0, 0, d_rho + index(it), d_H0, d_tmp, d_tmp2);
CudaCheckError();
hipDeviceSynchronize();
#ifdef LBFGS_TIMING
timer_updates.stop();
#endif
}
// Deallocations
CudaSafeCall( hipFree(d_gk) );
CudaSafeCall( hipFree(d_gkm1) );
CudaSafeCall( hipFree(d_z) );
CudaSafeCall( hipFree(d_s) );
CudaSafeCall( hipFree(d_y) );
#ifdef LBFGS_TIMING
timer_total.stop();
timer_total.saveMeasurement();
timer_evals.saveMeasurement();
timer_updates.saveMeasurement();
timer_linesearch.saveMeasurement();
#endif
#ifdef LBFGS_VERBOSE
std::cout << "Number of iterations: " << it << std::endl;
std::cout << "Number of function/gradient evaluations: " << evals << std::endl;
std::cout << "Reason for termination: " << statusToString(stat) << std::endl;
#endif
return stat;
}
// Vector operations
// -----------------
void lbfgs::dispatch_axpy(const size_t n, float *d_dst, const float *d_y, const float *d_x, const float *a, bool aDevicePointer) const
{
const hipblasPointerMode_t mode = aDevicePointer ? HIPBLAS_POINTER_MODE_DEVICE
: HIPBLAS_POINTER_MODE_HOST;
CublasSafeCall( hipblasSetPointerMode(m_cublasHandle, mode) );
if (d_dst != d_y)
CudaSafeCall( hipMemcpy(d_dst, d_y, n * sizeof(float), hipMemcpyDeviceToDevice) );
CublasSafeCall( hipblasSaxpy(m_cublasHandle, int(n), a, d_x, 1, d_dst, 1) );
}
void lbfgs::dispatch_scale(const size_t n, float *d_dst, const float *d_x, const float *a, bool aDevicePointer) const
{
const hipblasPointerMode_t mode = aDevicePointer ? HIPBLAS_POINTER_MODE_DEVICE
: HIPBLAS_POINTER_MODE_HOST;
CublasSafeCall( hipblasSetPointerMode(m_cublasHandle, mode) );
if (d_dst != d_x)
CudaSafeCall( hipMemcpy(d_dst, d_x, n * sizeof(float), hipMemcpyDeviceToDevice) );
CublasSafeCall( hipblasSscal(m_cublasHandle, int(n), a, d_dst, 1) );
}
void lbfgs::dispatch_dot(const size_t n, float *dst, const float *d_x, const float *d_y, bool dstDevicePointer) const
{
const hipblasPointerMode_t mode = dstDevicePointer ? HIPBLAS_POINTER_MODE_DEVICE
: HIPBLAS_POINTER_MODE_HOST;
CublasSafeCall( hipblasSetPointerMode(m_cublasHandle, mode) );
CublasSafeCall( hipblasSdot(m_cublasHandle, int(n), d_x, 1, d_y, 1, dst) );
}
// -----------------
// Device / kernel functions
// -------------------------
namespace gpu_lbfgs
{
__global__ void update1(float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out)
{
*alpha_out = *sDotZ * *rho;
*minusAlpha_out = -*alpha_out;
}
__global__ void update2(float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha)
{
const float beta = *rho * *yDotZ;
*alphaMinusBeta_out = *alpha - beta;
}
__global__ void update3(float *rho_out, float *H0_out, const float *yDotS, const float *yDotY)
{
*rho_out = 1.0f / *yDotS;
if (*yDotY > 1e-5)
*H0_out = *yDotS / *yDotY;
}
}
// ------------------
| 74445947f221de4e95191d5b079c493f8c8d34ea.cu | /**
* ___ _ _ ___ _ _ ___ ___ ___ ___
* / __| | | | \ /_\ | | ___| _ ) __/ __/ __|
* | (__| |_| | |) / _ \ | |_|___| _ \ _| (_ \__ \
* \___|\___/|___/_/ \_\ |____| |___/_| \___|___/
* 2012
* by Jens Wetzl (jens.wetzl@fau.de)
* and Oliver Taubmann (oliver.taubmann@fau.de)
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
* File lbfgs.cu: Implementation of class lbfgs (except cpu_lbfgs).
*
**/
#include "lbfgs.h"
#include "timer.h"
#include <iostream>
#include <algorithm>
#include <limits>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <fstream>
#include <sstream>
using namespace std;
namespace gpu_lbfgs {
// Variables
__device__ float fkm1;
__device__ float fk;
__device__ float tmp;
__device__ float alpha[HISTORY_SIZE];
__device__ float rho [HISTORY_SIZE];
__device__ float H0;
__device__ float step;
__device__ float tmp2;
__device__ int status;
// Small helper kernels for scalar operations in device memory needed during updates.
// What they're used for is documented by comments in the places they are executed.
// *** Use with a single thread only! ***
__global__ void update1 (float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out); // first update loop
__global__ void update2 (float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha); // second update loop
__global__ void update3 (float *rho_out, float *H0_out, const float *yDotS, const float *yDotY); // after line search
}
// linesearch_gpu.h is no real header, it contains
// part of the implementation and must be included
// after the variables above have been declared.
#include "linesearch_gpu.h"
lbfgs::lbfgs(cost_function& cf)
: m_costFunction(cf)
, m_maxIter(10000)
, m_maxEvals(std::numeric_limits<size_t>::max())
, m_gradientEps(1e-4f)
{
CublasSafeCall( cublasCreate(&m_cublasHandle) );
}
lbfgs::~lbfgs()
{
CublasSafeCall( cublasDestroy(m_cublasHandle) );
}
std::string lbfgs::statusToString(lbfgs::status stat)
{
switch (stat)
{
case LBFGS_BELOW_GRADIENT_EPS:
return "Below gradient epsilon";
case LBFGS_REACHED_MAX_ITER:
return "Reached maximum number of iterations";
case LBFGS_REACHED_MAX_EVALS:
return "Reached maximum number of function/gradient evaluations";
case LBFGS_LINE_SEARCH_FAILED:
return "Line search failed";
default:
return "Unknown status";
}
}
lbfgs::status lbfgs::minimize(float *d_x)
{
return gpu_lbfgs(d_x);
}
lbfgs::status lbfgs::minimize_with_host_x(float *h_x)
{
const size_t NX = m_costFunction.getNumberOfUnknowns();
float *d_x;
CudaSafeCall( cudaMalloc((void**)&d_x, NX * sizeof(float)) );
CudaSafeCall( cudaMemcpy(d_x, h_x, NX * sizeof(float), cudaMemcpyHostToDevice) );
status ret = minimize(d_x);
CudaSafeCall( cudaMemcpy(h_x, d_x, NX * sizeof(float), cudaMemcpyDeviceToHost) );
CudaSafeCall( cudaFree(d_x) );
return ret;
}
lbfgs::status lbfgs::gpu_lbfgs(float *d_x)
{
#ifdef LBFGS_TIMING
timer timer_total ("GPU_LBFGS_total" );
timer timer_evals ("GPU_LBFGS_evals" );
timer timer_updates ("GPU_LBFGS_updates" );
timer timer_linesearch("GPU_LBFGS_linesearch");
timer_total.start();
#endif
using namespace gpu_lbfgs;
const size_t NX = m_costFunction.getNumberOfUnknowns();
float *d_fkm1, *d_fk; // f_{k-1}, f_k, function values at x_{k-1} and x_k
float *d_gkm1, *d_gk; // g_{k-1}, g_k, gradients at x_{k-1} and x_k
float *d_z; // z, search direction
float *d_H0; // H_0, initial inverse Hessian (diagonal, same value for all elements)
float *d_step; // step current step length
float *d_tmp, *d_tmp2; // tmp, tmp2 temporary storage for intermediate results
int *d_status; // status return code for communication device -> host
// Ring buffers for history
float *d_s; // s, history of solution updates
float *d_y; // y, history of gradient updates
float *d_alpha; // alpha, history of alphas (needed for z updates)
float *d_rho; // rho, history of rhos (needed for z updates)
// Allocations
CudaSafeCall( cudaMalloc(&d_gk, NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_gkm1, NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_z, NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_s, HISTORY_SIZE * NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_y, HISTORY_SIZE * NX * sizeof(float)) );
// Addresses of global symbols
CudaSafeCall( cudaGetSymbolAddress((void**)&d_fkm1, gpu_lbfgs::fkm1 ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_fk, gpu_lbfgs::fk ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_tmp, gpu_lbfgs::tmp ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_tmp2, gpu_lbfgs::tmp2 ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_H0, gpu_lbfgs::H0 ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_alpha, gpu_lbfgs::alpha ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_rho, gpu_lbfgs::rho ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_step, gpu_lbfgs::step ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_status, gpu_lbfgs::status) );
// Initialize
#ifdef LBFGS_TIMING
timer_evals.start();
#endif
m_costFunction.f_gradf(d_x, d_fk, d_gk);
CudaCheckError();
cudaDeviceSynchronize();
#ifdef LBFGS_TIMING
timer_evals.stop();
#endif
size_t evals = 1;
status stat = LBFGS_REACHED_MAX_ITER;
#ifdef LBFGS_VERBOSE
std::cout << "lbfgs::gpu_lbfgs()" << std::endl;
#endif
// H0 = 1.0f;
const float one = 1.0f;
CudaSafeCall( cudaMemcpy(d_H0, &one, sizeof(float), cudaMemcpyHostToDevice) );
size_t it;
for (it = 0; it < m_maxIter; ++it)
{
#ifdef LBFGS_VERBOSE
float h_y;
CudaSafeCall( cudaMemcpy(&h_y, d_fk, sizeof(float), cudaMemcpyDeviceToHost) );
float gknorm2;
dispatch_dot(NX, &gknorm2, d_gk, d_gk, false);
printf("f(x) = % 12e, ||grad||_2 = % 12e\n", h_y, std::sqrt(gknorm2));
#endif
// Check for convergence
// ---------------------
float gkNormSquared;
float xkNormSquared;
dispatch_dot(NX, &xkNormSquared, d_x, d_x, false);
dispatch_dot(NX, &gkNormSquared, d_gk, d_gk, false);
if (gkNormSquared < (m_gradientEps * m_gradientEps) * std::max(xkNormSquared, 1.0f))
{
stat = LBFGS_BELOW_GRADIENT_EPS;
break;
}
// Find search direction
// ---------------------
#ifdef LBFGS_TIMING
timer_updates.start();
#endif
const float minusOne = -1.0f;
dispatch_scale(NX, d_z, d_gk, &minusOne, false); // z = -gk
const size_t MAX_IDX = std::min<size_t>(it, HISTORY_SIZE);
for (size_t i = 1; i <= MAX_IDX; ++i)
{
size_t idx = index(it - i);
dispatch_dot(NX, d_tmp, d_s + idx * NX, d_z); // tmp = sDotZ
// alpha = tmp * rho
// tmp = -alpha
update1<<<1, 1>>>(d_alpha + idx, d_tmp, d_rho + idx, d_tmp);
CudaCheckError();
cudaDeviceSynchronize();
// z += tmp * y
dispatch_axpy(NX, d_z, d_z, d_y + idx * NX, d_tmp);
}
dispatch_scale(NX, d_z, d_z, d_H0); // z = H0 * z
for (size_t i = MAX_IDX; i > 0; --i)
{
size_t idx = index(it - i);
dispatch_dot(NX, d_tmp, d_y + idx * NX, d_z); // tmp = yDotZ
// beta = rho * tmp
// tmp = alpha - beta
update2<<<1, 1>>>(d_tmp, d_rho + idx, d_tmp, d_alpha + idx);
CudaCheckError();
cudaDeviceSynchronize();
// z += tmp * s
dispatch_axpy(NX, d_z, d_z, d_s + idx * NX, d_tmp);
}
#ifdef LBFGS_TIMING
timer_updates.stop();
timer_linesearch.start();
#endif
CudaSafeCall( cudaMemcpy(d_fkm1, d_fk, 1 * sizeof(float), cudaMemcpyDeviceToDevice) ); // fkm1 = fk;
CudaSafeCall( cudaMemcpy(d_gkm1, d_gk, NX * sizeof(float), cudaMemcpyDeviceToDevice) ); // gkm1 = gk;
timer *t_evals = NULL, *t_linesearch = NULL;
#ifdef LBFGS_TIMING
t_evals = &timer_evals;
t_linesearch = &timer_linesearch;
#endif
// (line search defined in linesearch_gpu.h)
if (!gpu_linesearch(d_x, d_z, d_fk, d_gk, evals, d_gkm1, d_fkm1, stat, d_step,
m_maxEvals, t_evals, t_linesearch, d_tmp, d_status))
{
break;
}
#ifdef LBFGS_TIMING
timer_linesearch.stop();
timer_updates.start();
#endif
// Update s, y, rho and H_0
// ------------------------
// s = x_k - x_{k-1} = step * z
// y = g_k - g_{k-1}
// rho = 1 / (y^T s)
// H_0 = (y^T s) / (y^T y)
float *d_curS = d_s + index(it) * NX;
float *d_curY = d_y + index(it) * NX;
dispatch_scale(NX, d_curS, d_z, d_step); // s = step * z
dispatch_axpy (NX, d_curY, d_gk, d_gkm1, &minusOne, false); // y = gk - gkm1
dispatch_dot(NX, d_tmp, d_curY, d_curS); // tmp = yDotS
dispatch_dot(NX, d_tmp2, d_curY, d_curY); // tmp2 = yDotY
// rho = 1 / tmp
// if (tmp2 > 1e-5)
// H0 = tmp / tmp2
update3<<<1, 1>>>(d_rho + index(it), d_H0, d_tmp, d_tmp2);
CudaCheckError();
cudaDeviceSynchronize();
#ifdef LBFGS_TIMING
timer_updates.stop();
#endif
}
// Deallocations
CudaSafeCall( cudaFree(d_gk) );
CudaSafeCall( cudaFree(d_gkm1) );
CudaSafeCall( cudaFree(d_z) );
CudaSafeCall( cudaFree(d_s) );
CudaSafeCall( cudaFree(d_y) );
#ifdef LBFGS_TIMING
timer_total.stop();
timer_total.saveMeasurement();
timer_evals.saveMeasurement();
timer_updates.saveMeasurement();
timer_linesearch.saveMeasurement();
#endif
#ifdef LBFGS_VERBOSE
std::cout << "Number of iterations: " << it << std::endl;
std::cout << "Number of function/gradient evaluations: " << evals << std::endl;
std::cout << "Reason for termination: " << statusToString(stat) << std::endl;
#endif
return stat;
}
// Vector operations
// -----------------
void lbfgs::dispatch_axpy(const size_t n, float *d_dst, const float *d_y, const float *d_x, const float *a, bool aDevicePointer) const
{
const cublasPointerMode_t mode = aDevicePointer ? CUBLAS_POINTER_MODE_DEVICE
: CUBLAS_POINTER_MODE_HOST;
CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) );
if (d_dst != d_y)
CudaSafeCall( cudaMemcpy(d_dst, d_y, n * sizeof(float), cudaMemcpyDeviceToDevice) );
CublasSafeCall( cublasSaxpy(m_cublasHandle, int(n), a, d_x, 1, d_dst, 1) );
}
void lbfgs::dispatch_scale(const size_t n, float *d_dst, const float *d_x, const float *a, bool aDevicePointer) const
{
const cublasPointerMode_t mode = aDevicePointer ? CUBLAS_POINTER_MODE_DEVICE
: CUBLAS_POINTER_MODE_HOST;
CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) );
if (d_dst != d_x)
CudaSafeCall( cudaMemcpy(d_dst, d_x, n * sizeof(float), cudaMemcpyDeviceToDevice) );
CublasSafeCall( cublasSscal(m_cublasHandle, int(n), a, d_dst, 1) );
}
void lbfgs::dispatch_dot(const size_t n, float *dst, const float *d_x, const float *d_y, bool dstDevicePointer) const
{
const cublasPointerMode_t mode = dstDevicePointer ? CUBLAS_POINTER_MODE_DEVICE
: CUBLAS_POINTER_MODE_HOST;
CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) );
CublasSafeCall( cublasSdot(m_cublasHandle, int(n), d_x, 1, d_y, 1, dst) );
}
// -----------------
// Device / kernel functions
// -------------------------
namespace gpu_lbfgs
{
__global__ void update1(float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out)
{
*alpha_out = *sDotZ * *rho;
*minusAlpha_out = -*alpha_out;
}
__global__ void update2(float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha)
{
const float beta = *rho * *yDotZ;
*alphaMinusBeta_out = *alpha - beta;
}
__global__ void update3(float *rho_out, float *H0_out, const float *yDotS, const float *yDotY)
{
*rho_out = 1.0f / *yDotS;
if (*yDotY > 1e-5)
*H0_out = *yDotS / *yDotY;
}
}
// ------------------
|
6844058c881ff1cce010683aa4436509ad7965d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
void genData(float * ptr, unsigned int size);//random data generation
void matrixMul(float * A, float * B, float * C, int HEIGHT,int WIDTH, int COMMON); //loading, transfer, execution(host code)
void printMatrix(float * matrix, int HEIGHT, int WIDTH);//print matrix
int main(int argc, char * argv[]){
float* pA = NULL;
float* pB = NULL;
float* pC = NULL;
int HEIGHT,COMMON, WIDTH;
if(argc<4){
printf("Not inserted Properly, Try again\n");
printf("Ex: ./matmul_shared_gpu_ext <HEIGHT> <COMMON> <WIDTH>\n");
exit(-1);
}
HEIGHT=atoi(argv[1]);
COMMON=atoi(argv[2]);
WIDTH=atoi(argv[3]);
// malloc memories on the host-side
pA = (float*)malloc(HEIGHT * COMMON * sizeof(float));
pB = (float*)malloc(COMMON * WIDTH * sizeof(float));
pC = (float*)malloc(HEIGHT * WIDTH * sizeof(float));
genData(pA, HEIGHT * COMMON);
genData(pB, COMMON * WIDTH);
clock_t start=clock();
matrixMul(pA,pB,pC,HEIGHT,WIDTH,COMMON);
printMatrix(pC,HEIGHT,WIDTH);
printf("Matrix Multiplication execution time(Serial): %fms\n",(double)(clock()-start));
//free 2D array
free(pA);
free(pB);
free(pC);
return 0;
}
void genData(float* ptr, unsigned int size) {
while (size) {
*ptr++ = (float)size/(float)1000;
size--;
}
}
void matrixMul(float* A, float* B, float* C,int HEIGHT,int WIDTH,int COMMON)
{
int x,y,k;
float sum=0.0F;
for(y=0;y<HEIGHT;y++){
for(x=0;x<WIDTH;x++){
sum=0;
for(k=0;k<COMMON;k++){
sum+=A[y*COMMON+k]*B[k*WIDTH+x];
}
C[y*WIDTH+x]=sum;
}
}
}
void printMatrix(float * matrix, int HEIGHT, int WIDTH){
int i,j;
FILE *fp=fopen("output(cpu).txt","wt");
for(i=0;i<HEIGHT;i++)
for(j=0;j<WIDTH;j++)
fprintf(fp,"c[%4d][%4d] = %f\n", i, j, matrix[i * WIDTH + j]);
}
| 6844058c881ff1cce010683aa4436509ad7965d2.cu | #include <cuda.h>
#include <iostream>
void genData(float * ptr, unsigned int size);//random data generation
void matrixMul(float * A, float * B, float * C, int HEIGHT,int WIDTH, int COMMON); //loading, transfer, execution(host code)
void printMatrix(float * matrix, int HEIGHT, int WIDTH);//print matrix
int main(int argc, char * argv[]){
float* pA = NULL;
float* pB = NULL;
float* pC = NULL;
int HEIGHT,COMMON, WIDTH;
if(argc<4){
printf("Not inserted Properly, Try again\n");
printf("Ex: ./matmul_shared_gpu_ext <HEIGHT> <COMMON> <WIDTH>\n");
exit(-1);
}
HEIGHT=atoi(argv[1]);
COMMON=atoi(argv[2]);
WIDTH=atoi(argv[3]);
// malloc memories on the host-side
pA = (float*)malloc(HEIGHT * COMMON * sizeof(float));
pB = (float*)malloc(COMMON * WIDTH * sizeof(float));
pC = (float*)malloc(HEIGHT * WIDTH * sizeof(float));
genData(pA, HEIGHT * COMMON);
genData(pB, COMMON * WIDTH);
clock_t start=clock();
matrixMul(pA,pB,pC,HEIGHT,WIDTH,COMMON);
printMatrix(pC,HEIGHT,WIDTH);
printf("Matrix Multiplication execution time(Serial): %fms\n",(double)(clock()-start));
//free 2D array
free(pA);
free(pB);
free(pC);
return 0;
}
void genData(float* ptr, unsigned int size) {
while (size) {
*ptr++ = (float)size/(float)1000;
size--;
}
}
void matrixMul(float* A, float* B, float* C,int HEIGHT,int WIDTH,int COMMON)
{
int x,y,k;
float sum=0.0F;
for(y=0;y<HEIGHT;y++){
for(x=0;x<WIDTH;x++){
sum=0;
for(k=0;k<COMMON;k++){
sum+=A[y*COMMON+k]*B[k*WIDTH+x];
}
C[y*WIDTH+x]=sum;
}
}
}
void printMatrix(float * matrix, int HEIGHT, int WIDTH){
int i,j;
FILE *fp=fopen("output(cpu).txt","wt");
for(i=0;i<HEIGHT;i++)
for(j=0;j<WIDTH;j++)
fprintf(fp,"c[%4d][%4d] = %f\n", i, j, matrix[i * WIDTH + j]);
}
|
195365cb43bc3e5d53bcbbe2a70d7d9ea2cab308.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef LAYERS_H
#define LAYERS_H
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdexcept>
#include "Matrix.hip"
#include "Activation.hip"
/* ----------------------------
Kernel
---------------------------- */
// template<int BLOCK_SIZE> __global__ void
// XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB);
template<int BLOCK_SIZE> __global__ void
XdotWplusBias(float* A, float* B, float* C, int wA, int wB, int hA, int hB, float *bias);
template<int BLOCK_SIZE> __global__ void
XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB);
template<int BLOCK_SIZE> __global__ void
transpose(float *odata, float *idata, int width, int height);
/* ----------------------------
Layer class
---------------------------- */
class Layer{
private:
std::string name;
public:
Layer(std::string name_); //Default constructor
virtual ~Layer();
std::string getName();
// virtual void call(Matrix &in, Matrix &out) = 0;
// virtual void gradient(Matrix &in, Matrix &out) = 0;
virtual void printWeights() = 0;
virtual int getWidth() = 0;
virtual int getHeight() = 0;
virtual std::string getActivation() = 0;
virtual void forward(Matrix &X) = 0;
virtual void backward(Matrix &X, Matrix &dX, Matrix &dX_T) = 0;
virtual Matrix& getW() = 0;
virtual Matrix& getdW() = 0;
virtual Matrix& getOutput() = 0;
virtual Matrix& getGradOutput() = 0;
virtual Matrix& getOutput_T() = 0;
virtual void updateW(float lr) = 0;
// virtual void updateBias(float lr) = 0;
};
Layer::Layer(std::string name_) : name(name_) {}
Layer::~Layer(){}
std::string Layer::getName(){return name;}
/* ----------------------------
Dense Layer
---------------------------- */
__global__ void updateWKernel(float *W, float *dW, float lr, int size);
class Dense : public Layer{
private:
Matrix W;
Matrix W_T;
Matrix dW;
Matrix b;
Matrix Y; // Y = input*W + b ->
Matrix dY; // Y = input*W + b ->
Matrix Y_T; // Y = input*W + b ->
// Matrix Output;
Activation *activation;
public:
Dense(int input_shape, int output_shape, std::string act, std::string dist = "uniform", float w = 0.1);
~Dense();
// void call(Matrix &in, Matrix &out);
// void gradient(Matrix &in, Matrix &out);
void printWeights();
int getWidth();
int getHeight();
std::string getActivation();
void forward(Matrix &X);
void backward(Matrix &X, Matrix &dX, Matrix &dX_T);
Matrix& getW();
Matrix& getdW();
Matrix& getOutput();
Matrix& getGradOutput();
Matrix& getOutput_T();
void updateW(float lr);
};
Dense::Dense(int input_shape, int output_shape, std::string act, std::string dist, float w)
:Layer("Dense"), W(input_shape,output_shape,dist,w), W_T(output_shape,input_shape,"zeros",w), dW(input_shape,output_shape,"zeros",w), b(1,output_shape,dist,w) {
if(act == "linear")
activation = new Linear;
else if(act == "relu")
activation = new Relu;
else if(act == "sigmoid")
activation = new Sigmoid;
else if(act == "tanh")
activation = new Tanh;
else if(act == "leakyRelu")
activation = new LeakyRelu();
else
throw std::invalid_argument("Invalid activation");
}
Dense::~Dense(){
delete activation;
}
void Dense::printWeights(){
float *ptr_W = W.getHostData();
float *ptr_b = b.getHostData();
for(int i=0; i < W.height; ++i){
for(int j=0; j < W.width; ++j)
std::cout << ptr_W[i*W.width + j] << "\t";
std::cout << ptr_b[i] << "\t";
std::cout << std::endl;
}
}
int Dense::getWidth(){return W.width;}
int Dense::getHeight(){return W.height;}
std::string Dense::getActivation(){
return activation->getName();
}
void Dense::forward(Matrix &X){
// Tengo que hacer X*W
const int block_size = 32;
dim3 threads(block_size, block_size);
dim3 grid((W.width -1) / threads.x + 1, (X.height - 1) / threads.y + 1);
hipLaunchKernelGGL(( XdotWplusBias<block_size>) , dim3(grid), dim3(threads) , 0, 0,
X.getDeviceData(),
W.getDeviceData(),
Y.getDeviceData(),
X.getWidth(),
W.getWidth(),
X.getHeight(),
W.getHeight(),
b.getDeviceData()
);
hipDeviceSynchronize();
// Sumarle el bias y guardarlo en Y
activation->call(Y, Y);
hipDeviceSynchronize();
// En este punto, Y deberia estar listo para la siguiente capa
return;
}
void Dense::backward(Matrix &X, Matrix &dX, Matrix &X_T){
// Supongo que MI dY salida ya la tengo acualizada.
// Tengo que actualizar mi dW, mi db y modificar el dY de la otra (argumento)
// dW
// actualizo x_T
const int block_size = 16;
dim3 threads(block_size, block_size, 1);
dim3 grid((X_T.width -1) / threads.x + 1, (X.height - 1) / threads.y + 1);
// dim3 grid(X.width / block_size, Y.width / block_size, 1);
hipLaunchKernelGGL(( transpose<block_size>) , dim3(grid), dim3(threads) , 0, 0,
X.getDeviceData(),
X_T.getDeviceData(),
X_T.getWidth(),
X.getWidth()
);
hipDeviceSynchronize();
// Ahora a dY le aplico el gradiente de la activacion
activation->gradient(dY,dY);
// Ahora necesito calcular dw = x_t * dY
// Tengo que hacer X*W
const int block_size2 = 32;
dim3 threads2(block_size2, block_size2);
dim3 grid2((dY.width -1) / threads2.x + 1, (X_T.height - 1) / threads2.y + 1);
hipLaunchKernelGGL(( XdotW<block_size2>) , dim3(grid2), dim3(threads2) , 0, 0,
X_T.getDeviceData(),
dY.getDeviceData(),
dW.getDeviceData(),
X_T.getWidth(),
dY.getWidth(),
X_T.getHeight(),
dY.getHeight()
);
hipDeviceSynchronize();
// Ya esta dW
// Ahora falta dX
//Tranpongo W
// actualizo x_T
const int block_size3 = 16;
dim3 threads3(block_size3, block_size3, 1);
dim3 grid3((W_T.width -1) / threads3.x + 1, (W.height - 1) / threads3.y + 1);
// dim3 grid(X.width / block_size, Y.width / block_size, 1);
hipLaunchKernelGGL(( transpose<block_size3>) , dim3(grid3), dim3(threads3) , 0, 0,
W.getDeviceData(),
W_T.getDeviceData(),
W_T.getWidth(),
W.getWidth()
);
hipDeviceSynchronize();
// Ahora hago dX = dY W_T
// Ahora necesito calcular dw = x_t * dY
const int block_size4 = 32;
dim3 threads4(block_size4, block_size4);
dim3 grid4((W_T.width -1) / threads4.x + 1, (dY.height - 1) / threads4.y + 1);
hipLaunchKernelGGL(( XdotW<block_size4>) , dim3(grid4), dim3(threads4) , 0, 0,
dY.getDeviceData(),
W_T.getDeviceData(),
dX.getDeviceData(),
dY.getWidth(),
W_T.getWidth(),
dY.getHeight(),
W_T.getHeight()
);
hipDeviceSynchronize();
}
Matrix& Dense::getW(){return W;}
Matrix& Dense::getdW(){return dW;};
Matrix& Dense::getOutput(){return Y;}
Matrix& Dense::getGradOutput(){return dY;}
Matrix& Dense::getOutput_T(){return Y_T;}
void Dense::updateW(float lr){
int dev;
hipGetDevice(&dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
// dim3 nThreads(256);
dim3 nThreads(deviceProp.maxThreadsDim[0]);
dim3 nBlocks((W.size + nThreads.x - 1) / nThreads.x);
if(nBlocks.x > deviceProp.maxGridSize[0]){
nBlocks.x = deviceProp.maxGridSize[0];
}
hipLaunchKernelGGL(( updateWKernel), dim3(nBlocks), dim3(nThreads) , 0, 0, W.getDeviceData(), dW.getDeviceData(), lr, W.size);
hipDeviceSynchronize();
}
__global__ void updateWKernel(float *W, float *dW, float lr, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < size){
W[i] = (W[i] - lr * dW[i]);
i += blockDim.x * gridDim.x;
}
}
/* ----------------------------
Input Layer
---------------------------- */
class Input : public Layer{
private:
int width, height; // salida, entrada (entrada no la se a esta altiura)
// Matrix Datos;
Matrix W; // Not needed
Matrix dW; // Not needed
Matrix W_T; // Not needed
Matrix Y;
Matrix dY; // Not needed
Matrix Y_T; // Not needed
public:
Input(int width, int height = -1);
~Input();
// void call(Matrix &in, Matrix &out);
// void gradient(Matrix &in, Matrix &out);
void printWeights();
int getWidth();
int getHeight();
std::string getActivation();
void forward(Matrix &X);
void backward(Matrix &X, Matrix &dX, Matrix &dX_T);
Matrix& getW();
Matrix& getdW();
Matrix& getOutput();
Matrix& getGradOutput();
Matrix& getOutput_T();
void updateW(float lr);
};
// Input::Input(int width, int height):Layer("Input"), width(width), height(-1){}
//#! Capaz cambie esto
Input::Input(int width, int height)
: Layer("Input"), width(width), height(-1), Y(height, width), Y_T(width,height), W(1,1), dW(1,1), W_T(1,1){}
Input::~Input(){}
void Input::printWeights(){
std::cout << "Input Layer - Serian los datos" << std::endl;
float *ptr_W = Y.getHostData();
for(int i=0; i < Y.height; ++i){
for(int j=0; j < Y.width; ++j)
std::cout << ptr_W[i*Y.width + j] << "\t";
std::cout << std::endl;
}
}
int Input::getWidth(){return width;}
int Input::getHeight(){return height;}
std::string Input::getActivation(){return "None";}
void Input::forward(Matrix &X){
// Esta funcion creo que deberia inicializar los datos a la matrix correspondiente
// ACA ASUMO QUE YA TIENEN EL MISMO TAMAO
// No hay que cagarla con los constructurores
std::cout << "Unimplemted - Input Layer" << std::endl;
if((X.getHeight() != Y.getHeight()) || (X.getWidth() != Y.getWidth())){
Y.initialize(X.getHeight(), X.getWidth());
// Matrix::initialize(int height_, int width_, std::string dist, float w)
}
Y.copyDeviceDataFromAnother(X);
}
void Input::backward(Matrix &X, Matrix &dX, Matrix &dX_T){
std::cout << "Unimplemted - Backward - Input Layer" << std::endl;
}
Matrix& Input::getW(){return W;}
Matrix& Input::getdW(){return dW;};
Matrix& Input::getOutput(){return Y;}
Matrix& Input::getGradOutput(){return dY;}
Matrix& Input::getOutput_T(){return Y_T;}
void Input::updateW(float lr){return;};
/* ----------------------------
Kernels
---------------------------- */
// Kernel modified from https://www.programmersought.com/article/13436584263/
template<int BLOCK_SIZE> __global__ void
XdotWplusBias(float* A, float* B, float* C, int wA, int wB, int hA, int hB, float *bias){
// XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
/* Divide the matrix into sub-matrices, apply the parallel calculation of the thread in the block
to the multiplication of the sub-matrices, and finally add their values to obtain an element value of C */
int aBegin = by * BLOCK_SIZE * wA; //The row coordinates of the sub-matrix of A
int aStep = BLOCK_SIZE; //The movement step size of A's sub-matrix column coordinates
int aEnd = aBegin + wA - 1; //Limit an end point
int bBegin = bx * BLOCK_SIZE;
int bStep = BLOCK_SIZE * wB;
float Csub = 0; //Define the element value of C at the corresponding position in the block (x,. y) (ty, tx)
int subAw = BLOCK_SIZE;
int subAh = BLOCK_SIZE;
int subBh = BLOCK_SIZE;
int subBw = BLOCK_SIZE;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep){
//The number of columns in the last column of the A matrix is less than BLOCK_SIZE
if (a + aStep - 1 > aEnd){
subAw = aEnd - a + 1;
}else{
subAw = BLOCK_SIZE;
}
subBh = subAw;
//The number of rows in the last row of the A matrix is less than BLOCK_SIZE
if ((by + 1) * BLOCK_SIZE > hA){
subAh = hA - by * BLOCK_SIZE;
}else{
subAh = BLOCK_SIZE;
}
//The number of columns in the last column of the B matrix is less than BLOCK_SIZE
if ((bx + 1) * BLOCK_SIZE > wB){
subBw = wB - bx * BLOCK_SIZE;
}else{
subBw = BLOCK_SIZE;
}
/* Develop shared memory in the block */
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
/* Assign values to the corresponding elements of the sub-matrix in the range of rows and columns */
if (ty < subAh && tx < subAw){
As[ty][tx] = A[a + ty * wA + tx];
}
if (ty < subBh && tx < subBw){
Bs[ty][tx] = B[b + ty * wB + tx];
}
__syncthreads();
//Unroll the loop to compile to speed up
#pragma unroll
//The inner loop calculates the vector product of the corresponding row and column in each sub-matrix and adds it to the previously obtained value
for (int k = 0; k < subAw; k++){
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw){
Csub += As[ty][k] * Bs[k][tx];
}
}
__syncthreads();
}
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw) {
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub;
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[by*BLOCK_SIZE+ty]; //row
C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[bx*BLOCK_SIZE+tx]; //col
}
}
// Kernel modified from https://www.programmersought.com/article/13436584263/
template<int BLOCK_SIZE> __global__ void
XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
// XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
/* Divide the matrix into sub-matrices, apply the parallel calculation of the thread in the block
to the multiplication of the sub-matrices, and finally add their values to obtain an element value of C */
int aBegin = by * BLOCK_SIZE * wA; //The row coordinates of the sub-matrix of A
int aStep = BLOCK_SIZE; //The movement step size of A's sub-matrix column coordinates
int aEnd = aBegin + wA - 1; //Limit an end point
int bBegin = bx * BLOCK_SIZE;
int bStep = BLOCK_SIZE * wB;
float Csub = 0; //Define the element value of C at the corresponding position in the block (x,. y) (ty, tx)
int subAw = BLOCK_SIZE;
int subAh = BLOCK_SIZE;
int subBh = BLOCK_SIZE;
int subBw = BLOCK_SIZE;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep){
//The number of columns in the last column of the A matrix is less than BLOCK_SIZE
if (a + aStep - 1 > aEnd){
subAw = aEnd - a + 1;
}else{
subAw = BLOCK_SIZE;
}
subBh = subAw;
//The number of rows in the last row of the A matrix is less than BLOCK_SIZE
if ((by + 1) * BLOCK_SIZE > hA){
subAh = hA - by * BLOCK_SIZE;
}else{
subAh = BLOCK_SIZE;
}
//The number of columns in the last column of the B matrix is less than BLOCK_SIZE
if ((bx + 1) * BLOCK_SIZE > wB){
subBw = wB - bx * BLOCK_SIZE;
}else{
subBw = BLOCK_SIZE;
}
/* Develop shared memory in the block */
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
/* Assign values to the corresponding elements of the sub-matrix in the range of rows and columns */
if (ty < subAh && tx < subAw){
As[ty][tx] = A[a + ty * wA + tx];
}
if (ty < subBh && tx < subBw){
Bs[ty][tx] = B[b + ty * wB + tx];
}
__syncthreads();
//Unroll the loop to compile to speed up
#pragma unroll
//The inner loop calculates the vector product of the corresponding row and column in each sub-matrix and adds it to the previously obtained value
for (int k = 0; k < subAw; k++){
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw){
Csub += As[ty][k] * Bs[k][tx];
}
}
__syncthreads();
}
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw) {
C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub;
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[by*BLOCK_SIZE+ty]; //row
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[bx*BLOCK_SIZE+tx]; //col
}
}
// template<int BLOCK_SIZE> __global__ void
// XTdotdY(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
// // XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
// //Block index
// int bx = blockIdx.x;
// int by = blockIdx.y;
// //Thread index
// int tx = threadIdx.x;
// int ty = threadIdx.y;
// /* Divide the matrix into sub-matrices, apply the parallel calculation of the thread in the block
// to the multiplication of the sub-matrices, and finally add their values to obtain an element value of C */
// int aBegin = by * BLOCK_SIZE * wA; //The row coordinates of the sub-matrix of A
// int aStep = BLOCK_SIZE; //The movement step size of A's sub-matrix column coordinates
// int aEnd = aBegin + wA - 1; //Limit an end point
// int bBegin = bx * BLOCK_SIZE;
// int bStep = BLOCK_SIZE * wB;
// float Csub = 0; //Define the element value of C at the corresponding position in the block (x,. y) (ty, tx)
// int subAw = BLOCK_SIZE;
// int subAh = BLOCK_SIZE;
// int subBh = BLOCK_SIZE;
// int subBw = BLOCK_SIZE;
// for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep){
// //The number of columns in the last column of the A matrix is less than BLOCK_SIZE
// if (a + aStep - 1 > aEnd){
// subAw = aEnd - a + 1;
// }else{
// subAw = BLOCK_SIZE;
// }
// subBh = subAw;
// //The number of rows in the last row of the A matrix is less than BLOCK_SIZE
// if ((by + 1) * BLOCK_SIZE > hA){
// subAh = hA - by * BLOCK_SIZE;
// }else{
// subAh = BLOCK_SIZE;
// }
// //The number of columns in the last column of the B matrix is less than BLOCK_SIZE
// if ((bx + 1) * BLOCK_SIZE > wB){
// subBw = wB - bx * BLOCK_SIZE;
// }else{
// subBw = BLOCK_SIZE;
// }
// /* Develop shared memory in the block */
// __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// /* Assign values to the corresponding elements of the sub-matrix in the range of rows and columns */
// if (ty < subAh && tx < subAw){
// As[ty][tx] = A[a + ty * wA + tx];
// }
// if (ty < subBh && tx < subBw){
// Bs[ty][tx] = B[b + ty * wB + tx];
// }
// __syncthreads();
// //Unroll the loop to compile to speed up
// #pragma unroll
// //The inner loop calculates the vector product of the corresponding row and column in each sub-matrix and adds it to the previously obtained value
// for (int k = 0; k < subAw; k++){
// //Satisfy the elements within the row and column constraints to calculate the product and sum
// if (ty < subAh && tx < subBw){
// Csub += As[ty][k] * Bs[k][tx];
// }
// }
// __syncthreads();
// }
// //Satisfy the elements within the row and column constraints to calculate the product and sum
// if (ty < subAh && tx < subBw) {
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub;
// // C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[by*BLOCK_SIZE+ty]; //row
// // C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[bx*BLOCK_SIZE+tx]; //col
// }
// }
template<int BLOCK_SIZE> __global__ void
transpose(float *odata, float *idata, int width, int height){
__shared__ float block[BLOCK_SIZE][BLOCK_SIZE+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transposed order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transposed matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
#endif
| 195365cb43bc3e5d53bcbbe2a70d7d9ea2cab308.cu | #ifndef LAYERS_H
#define LAYERS_H
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdexcept>
#include "Matrix.cu"
#include "Activation.cu"
/* ----------------------------
Kernel
---------------------------- */
// template<int BLOCK_SIZE> __global__ void
// XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB);
template<int BLOCK_SIZE> __global__ void
XdotWplusBias(float* A, float* B, float* C, int wA, int wB, int hA, int hB, float *bias);
template<int BLOCK_SIZE> __global__ void
XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB);
template<int BLOCK_SIZE> __global__ void
transpose(float *odata, float *idata, int width, int height);
/* ----------------------------
Layer class
---------------------------- */
class Layer{
private:
std::string name;
public:
Layer(std::string name_); //Default constructor
virtual ~Layer();
std::string getName();
// virtual void call(Matrix &in, Matrix &out) = 0;
// virtual void gradient(Matrix &in, Matrix &out) = 0;
virtual void printWeights() = 0;
virtual int getWidth() = 0;
virtual int getHeight() = 0;
virtual std::string getActivation() = 0;
virtual void forward(Matrix &X) = 0;
virtual void backward(Matrix &X, Matrix &dX, Matrix &dX_T) = 0;
virtual Matrix& getW() = 0;
virtual Matrix& getdW() = 0;
virtual Matrix& getOutput() = 0;
virtual Matrix& getGradOutput() = 0;
virtual Matrix& getOutput_T() = 0;
virtual void updateW(float lr) = 0;
// virtual void updateBias(float lr) = 0;
};
Layer::Layer(std::string name_) : name(name_) {}
Layer::~Layer(){}
std::string Layer::getName(){return name;}
/* ----------------------------
Dense Layer
---------------------------- */
__global__ void updateWKernel(float *W, float *dW, float lr, int size);
class Dense : public Layer{
private:
Matrix W;
Matrix W_T;
Matrix dW;
Matrix b;
Matrix Y; // Y = input*W + b ->
Matrix dY; // Y = input*W + b ->
Matrix Y_T; // Y = input*W + b ->
// Matrix Output;
Activation *activation;
public:
Dense(int input_shape, int output_shape, std::string act, std::string dist = "uniform", float w = 0.1);
~Dense();
// void call(Matrix &in, Matrix &out);
// void gradient(Matrix &in, Matrix &out);
void printWeights();
int getWidth();
int getHeight();
std::string getActivation();
void forward(Matrix &X);
void backward(Matrix &X, Matrix &dX, Matrix &dX_T);
Matrix& getW();
Matrix& getdW();
Matrix& getOutput();
Matrix& getGradOutput();
Matrix& getOutput_T();
void updateW(float lr);
};
Dense::Dense(int input_shape, int output_shape, std::string act, std::string dist, float w)
:Layer("Dense"), W(input_shape,output_shape,dist,w), W_T(output_shape,input_shape,"zeros",w), dW(input_shape,output_shape,"zeros",w), b(1,output_shape,dist,w) {
if(act == "linear")
activation = new Linear;
else if(act == "relu")
activation = new Relu;
else if(act == "sigmoid")
activation = new Sigmoid;
else if(act == "tanh")
activation = new Tanh;
else if(act == "leakyRelu")
activation = new LeakyRelu();
else
throw std::invalid_argument("Invalid activation");
}
Dense::~Dense(){
delete activation;
}
void Dense::printWeights(){
float *ptr_W = W.getHostData();
float *ptr_b = b.getHostData();
for(int i=0; i < W.height; ++i){
for(int j=0; j < W.width; ++j)
std::cout << ptr_W[i*W.width + j] << "\t";
std::cout << ptr_b[i] << "\t";
std::cout << std::endl;
}
}
int Dense::getWidth(){return W.width;}
int Dense::getHeight(){return W.height;}
std::string Dense::getActivation(){
return activation->getName();
}
void Dense::forward(Matrix &X){
// Tengo que hacer X*W
const int block_size = 32;
dim3 threads(block_size, block_size);
dim3 grid((W.width -1) / threads.x + 1, (X.height - 1) / threads.y + 1);
XdotWplusBias<block_size> <<<grid, threads >>> (
X.getDeviceData(),
W.getDeviceData(),
Y.getDeviceData(),
X.getWidth(),
W.getWidth(),
X.getHeight(),
W.getHeight(),
b.getDeviceData()
);
cudaDeviceSynchronize();
// Sumarle el bias y guardarlo en Y
activation->call(Y, Y);
cudaDeviceSynchronize();
// En este punto, Y deberia estar listo para la siguiente capa
return;
}
void Dense::backward(Matrix &X, Matrix &dX, Matrix &X_T){
// Supongo que MI dY salida ya la tengo acualizada.
// Tengo que actualizar mi dW, mi db y modificar el dY de la otra (argumento)
// dW
// actualizo x_T
const int block_size = 16;
dim3 threads(block_size, block_size, 1);
dim3 grid((X_T.width -1) / threads.x + 1, (X.height - 1) / threads.y + 1);
// dim3 grid(X.width / block_size, Y.width / block_size, 1);
transpose<block_size> <<<grid, threads >>> (
X.getDeviceData(),
X_T.getDeviceData(),
X_T.getWidth(),
X.getWidth()
);
cudaDeviceSynchronize();
// Ahora a dY le aplico el gradiente de la activacion
activation->gradient(dY,dY);
// Ahora necesito calcular dw = x_t * dY
// Tengo que hacer X*W
const int block_size2 = 32;
dim3 threads2(block_size2, block_size2);
dim3 grid2((dY.width -1) / threads2.x + 1, (X_T.height - 1) / threads2.y + 1);
XdotW<block_size2> <<<grid2, threads2 >>> (
X_T.getDeviceData(),
dY.getDeviceData(),
dW.getDeviceData(),
X_T.getWidth(),
dY.getWidth(),
X_T.getHeight(),
dY.getHeight()
);
cudaDeviceSynchronize();
// Ya esta dW
// Ahora falta dX
//Tranpongo W
// actualizo x_T
const int block_size3 = 16;
dim3 threads3(block_size3, block_size3, 1);
dim3 grid3((W_T.width -1) / threads3.x + 1, (W.height - 1) / threads3.y + 1);
// dim3 grid(X.width / block_size, Y.width / block_size, 1);
transpose<block_size3> <<<grid3, threads3 >>> (
W.getDeviceData(),
W_T.getDeviceData(),
W_T.getWidth(),
W.getWidth()
);
cudaDeviceSynchronize();
// Ahora hago dX = dY W_T
// Ahora necesito calcular dw = x_t * dY
const int block_size4 = 32;
dim3 threads4(block_size4, block_size4);
dim3 grid4((W_T.width -1) / threads4.x + 1, (dY.height - 1) / threads4.y + 1);
XdotW<block_size4> <<<grid4, threads4 >>> (
dY.getDeviceData(),
W_T.getDeviceData(),
dX.getDeviceData(),
dY.getWidth(),
W_T.getWidth(),
dY.getHeight(),
W_T.getHeight()
);
cudaDeviceSynchronize();
}
Matrix& Dense::getW(){return W;}
Matrix& Dense::getdW(){return dW;};
Matrix& Dense::getOutput(){return Y;}
Matrix& Dense::getGradOutput(){return dY;}
Matrix& Dense::getOutput_T(){return Y_T;}
void Dense::updateW(float lr){
int dev;
cudaGetDevice(&dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// dim3 nThreads(256);
dim3 nThreads(deviceProp.maxThreadsDim[0]);
dim3 nBlocks((W.size + nThreads.x - 1) / nThreads.x);
if(nBlocks.x > deviceProp.maxGridSize[0]){
nBlocks.x = deviceProp.maxGridSize[0];
}
updateWKernel<<< nBlocks, nThreads >>>(W.getDeviceData(), dW.getDeviceData(), lr, W.size);
cudaDeviceSynchronize();
}
__global__ void updateWKernel(float *W, float *dW, float lr, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < size){
W[i] = (W[i] - lr * dW[i]);
i += blockDim.x * gridDim.x;
}
}
/* ----------------------------
Input Layer
---------------------------- */
class Input : public Layer{
private:
int width, height; // salida, entrada (entrada no la se a esta altiura)
// Matrix Datos;
Matrix W; // Not needed
Matrix dW; // Not needed
Matrix W_T; // Not needed
Matrix Y;
Matrix dY; // Not needed
Matrix Y_T; // Not needed
public:
Input(int width, int height = -1);
~Input();
// void call(Matrix &in, Matrix &out);
// void gradient(Matrix &in, Matrix &out);
void printWeights();
int getWidth();
int getHeight();
std::string getActivation();
void forward(Matrix &X);
void backward(Matrix &X, Matrix &dX, Matrix &dX_T);
Matrix& getW();
Matrix& getdW();
Matrix& getOutput();
Matrix& getGradOutput();
Matrix& getOutput_T();
void updateW(float lr);
};
// Input::Input(int width, int height):Layer("Input"), width(width), height(-1){}
//#! Capaz cambie esto
Input::Input(int width, int height)
: Layer("Input"), width(width), height(-1), Y(height, width), Y_T(width,height), W(1,1), dW(1,1), W_T(1,1){}
Input::~Input(){}
void Input::printWeights(){
std::cout << "Input Layer - Serian los datos" << std::endl;
float *ptr_W = Y.getHostData();
for(int i=0; i < Y.height; ++i){
for(int j=0; j < Y.width; ++j)
std::cout << ptr_W[i*Y.width + j] << "\t";
std::cout << std::endl;
}
}
int Input::getWidth(){return width;}
int Input::getHeight(){return height;}
std::string Input::getActivation(){return "None";}
void Input::forward(Matrix &X){
// Esta funcion creo que deberia inicializar los datos a la matrix correspondiente
// ACA ASUMO QUE YA TIENEN EL MISMO TAMAÑO
// No hay que cagarla con los constructurores
std::cout << "Unimplemted - Input Layer" << std::endl;
if((X.getHeight() != Y.getHeight()) || (X.getWidth() != Y.getWidth())){
Y.initialize(X.getHeight(), X.getWidth());
// Matrix::initialize(int height_, int width_, std::string dist, float w)
}
Y.copyDeviceDataFromAnother(X);
}
void Input::backward(Matrix &X, Matrix &dX, Matrix &dX_T){
std::cout << "Unimplemted - Backward - Input Layer" << std::endl;
}
Matrix& Input::getW(){return W;}
Matrix& Input::getdW(){return dW;};
Matrix& Input::getOutput(){return Y;}
Matrix& Input::getGradOutput(){return dY;}
Matrix& Input::getOutput_T(){return Y_T;}
void Input::updateW(float lr){return;};
/* ----------------------------
Kernels
---------------------------- */
// Kernel modified from https://www.programmersought.com/article/13436584263/
template<int BLOCK_SIZE> __global__ void
XdotWplusBias(float* A, float* B, float* C, int wA, int wB, int hA, int hB, float *bias){
// XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
/* Divide the matrix into sub-matrices, apply the parallel calculation of the thread in the block
to the multiplication of the sub-matrices, and finally add their values to obtain an element value of C */
int aBegin = by * BLOCK_SIZE * wA; //The row coordinates of the sub-matrix of A
int aStep = BLOCK_SIZE; //The movement step size of A's sub-matrix column coordinates
int aEnd = aBegin + wA - 1; //Limit an end point
int bBegin = bx * BLOCK_SIZE;
int bStep = BLOCK_SIZE * wB;
float Csub = 0; //Define the element value of C at the corresponding position in the block (x,. y) (ty, tx)
int subAw = BLOCK_SIZE;
int subAh = BLOCK_SIZE;
int subBh = BLOCK_SIZE;
int subBw = BLOCK_SIZE;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep){
//The number of columns in the last column of the A matrix is less than BLOCK_SIZE
if (a + aStep - 1 > aEnd){
subAw = aEnd - a + 1;
}else{
subAw = BLOCK_SIZE;
}
subBh = subAw;
//The number of rows in the last row of the A matrix is less than BLOCK_SIZE
if ((by + 1) * BLOCK_SIZE > hA){
subAh = hA - by * BLOCK_SIZE;
}else{
subAh = BLOCK_SIZE;
}
//The number of columns in the last column of the B matrix is less than BLOCK_SIZE
if ((bx + 1) * BLOCK_SIZE > wB){
subBw = wB - bx * BLOCK_SIZE;
}else{
subBw = BLOCK_SIZE;
}
/* Develop shared memory in the block */
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
/* Assign values to the corresponding elements of the sub-matrix in the range of rows and columns */
if (ty < subAh && tx < subAw){
As[ty][tx] = A[a + ty * wA + tx];
}
if (ty < subBh && tx < subBw){
Bs[ty][tx] = B[b + ty * wB + tx];
}
__syncthreads();
//Unroll the loop to compile to speed up
#pragma unroll
//The inner loop calculates the vector product of the corresponding row and column in each sub-matrix and adds it to the previously obtained value
for (int k = 0; k < subAw; k++){
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw){
Csub += As[ty][k] * Bs[k][tx];
}
}
__syncthreads();
}
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw) {
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub;
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[by*BLOCK_SIZE+ty]; //row
C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[bx*BLOCK_SIZE+tx]; //col
}
}
// Kernel modified from https://www.programmersought.com/article/13436584263/
template<int BLOCK_SIZE> __global__ void
XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
// XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
/* Divide the matrix into sub-matrices, apply the parallel calculation of the thread in the block
to the multiplication of the sub-matrices, and finally add their values to obtain an element value of C */
int aBegin = by * BLOCK_SIZE * wA; //The row coordinates of the sub-matrix of A
int aStep = BLOCK_SIZE; //The movement step size of A's sub-matrix column coordinates
int aEnd = aBegin + wA - 1; //Limit an end point
int bBegin = bx * BLOCK_SIZE;
int bStep = BLOCK_SIZE * wB;
float Csub = 0; //Define the element value of C at the corresponding position in the block (x,. y) (ty, tx)
int subAw = BLOCK_SIZE;
int subAh = BLOCK_SIZE;
int subBh = BLOCK_SIZE;
int subBw = BLOCK_SIZE;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep){
//The number of columns in the last column of the A matrix is less than BLOCK_SIZE
if (a + aStep - 1 > aEnd){
subAw = aEnd - a + 1;
}else{
subAw = BLOCK_SIZE;
}
subBh = subAw;
//The number of rows in the last row of the A matrix is less than BLOCK_SIZE
if ((by + 1) * BLOCK_SIZE > hA){
subAh = hA - by * BLOCK_SIZE;
}else{
subAh = BLOCK_SIZE;
}
//The number of columns in the last column of the B matrix is less than BLOCK_SIZE
if ((bx + 1) * BLOCK_SIZE > wB){
subBw = wB - bx * BLOCK_SIZE;
}else{
subBw = BLOCK_SIZE;
}
/* Develop shared memory in the block */
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
/* Assign values to the corresponding elements of the sub-matrix in the range of rows and columns */
if (ty < subAh && tx < subAw){
As[ty][tx] = A[a + ty * wA + tx];
}
if (ty < subBh && tx < subBw){
Bs[ty][tx] = B[b + ty * wB + tx];
}
__syncthreads();
//Unroll the loop to compile to speed up
#pragma unroll
//The inner loop calculates the vector product of the corresponding row and column in each sub-matrix and adds it to the previously obtained value
for (int k = 0; k < subAw; k++){
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw){
Csub += As[ty][k] * Bs[k][tx];
}
}
__syncthreads();
}
//Satisfy the elements within the row and column constraints to calculate the product and sum
if (ty < subAh && tx < subBw) {
C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub;
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[by*BLOCK_SIZE+ty]; //row
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[bx*BLOCK_SIZE+tx]; //col
}
}
// template<int BLOCK_SIZE> __global__ void
// XTdotdY(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
// // XdotW(float* A, float* B, float* C, int wA, int wB, int hA, int hB){
// //Block index
// int bx = blockIdx.x;
// int by = blockIdx.y;
// //Thread index
// int tx = threadIdx.x;
// int ty = threadIdx.y;
// /* Divide the matrix into sub-matrices, apply the parallel calculation of the thread in the block
// to the multiplication of the sub-matrices, and finally add their values to obtain an element value of C */
// int aBegin = by * BLOCK_SIZE * wA; //The row coordinates of the sub-matrix of A
// int aStep = BLOCK_SIZE; //The movement step size of A's sub-matrix column coordinates
// int aEnd = aBegin + wA - 1; //Limit an end point
// int bBegin = bx * BLOCK_SIZE;
// int bStep = BLOCK_SIZE * wB;
// float Csub = 0; //Define the element value of C at the corresponding position in the block (x,. y) (ty, tx)
// int subAw = BLOCK_SIZE;
// int subAh = BLOCK_SIZE;
// int subBh = BLOCK_SIZE;
// int subBw = BLOCK_SIZE;
// for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep){
// //The number of columns in the last column of the A matrix is less than BLOCK_SIZE
// if (a + aStep - 1 > aEnd){
// subAw = aEnd - a + 1;
// }else{
// subAw = BLOCK_SIZE;
// }
// subBh = subAw;
// //The number of rows in the last row of the A matrix is less than BLOCK_SIZE
// if ((by + 1) * BLOCK_SIZE > hA){
// subAh = hA - by * BLOCK_SIZE;
// }else{
// subAh = BLOCK_SIZE;
// }
// //The number of columns in the last column of the B matrix is less than BLOCK_SIZE
// if ((bx + 1) * BLOCK_SIZE > wB){
// subBw = wB - bx * BLOCK_SIZE;
// }else{
// subBw = BLOCK_SIZE;
// }
// /* Develop shared memory in the block */
// __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// /* Assign values to the corresponding elements of the sub-matrix in the range of rows and columns */
// if (ty < subAh && tx < subAw){
// As[ty][tx] = A[a + ty * wA + tx];
// }
// if (ty < subBh && tx < subBw){
// Bs[ty][tx] = B[b + ty * wB + tx];
// }
// __syncthreads();
// //Unroll the loop to compile to speed up
// #pragma unroll
// //The inner loop calculates the vector product of the corresponding row and column in each sub-matrix and adds it to the previously obtained value
// for (int k = 0; k < subAw; k++){
// //Satisfy the elements within the row and column constraints to calculate the product and sum
// if (ty < subAh && tx < subBw){
// Csub += As[ty][k] * Bs[k][tx];
// }
// }
// __syncthreads();
// }
// //Satisfy the elements within the row and column constraints to calculate the product and sum
// if (ty < subAh && tx < subBw) {
// C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub;
// // C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[by*BLOCK_SIZE+ty]; //row
// // C[by * BLOCK_SIZE * wB + bx * BLOCK_SIZE + ty * wB + tx] = Csub + bias[bx*BLOCK_SIZE+tx]; //col
// }
// }
template<int BLOCK_SIZE> __global__ void
transpose(float *odata, float *idata, int width, int height){
__shared__ float block[BLOCK_SIZE][BLOCK_SIZE+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transposed order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transposed matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
#endif
|
017a84f231d28a1f6b355b65323bf5638b1411e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "knet.h"
template<typename dType>
__global__ void _axpbforw(int n, dType a, dType *x, dType p, dType b, dType *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
dType yi = x[i];
if (p != 1) yi = pow(yi,p);
if (a != 1) yi *= a;
if (b != 0) yi += b;
y[i] = yi;
i += blockDim.x * gridDim.x;
}
}
template<typename dType>
__global__ void _axpbback(int n, dType a, dType *x, dType p, dType *dy, dType *dx) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
dType ap = a*p;
while (i < n) {
dType dxi = dy[i];
if (a!=1 || p!=1) {
if (ap != 1) dxi *= ap;
if (p != 1) dxi *= pow(x[i],p-1);
}
dx[i] = dxi;
i += blockDim.x * gridDim.x;
}
}
extern "C" {
void axpbforw32(int n, float a, float *x, float p, float b, float *y) KCALL(_axpbforw,n,a,x,p,b,y);
void axpbforw64(int n, double a, double *x, double p, double b, double *y) KCALL(_axpbforw,n,a,x,p,b,y);
void axpbback32(int n, float a, float *x, float p, float *dy, float *dx) KCALL(_axpbback,n,a,x,p,dy,dx);
void axpbback64(int n, double a, double *x, double p, double *dy, double *dx) KCALL(_axpbback,n,a,x,p,dy,dx);
}
| 017a84f231d28a1f6b355b65323bf5638b1411e4.cu | #include "knet.h"
template<typename dType>
__global__ void _axpbforw(int n, dType a, dType *x, dType p, dType b, dType *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
dType yi = x[i];
if (p != 1) yi = pow(yi,p);
if (a != 1) yi *= a;
if (b != 0) yi += b;
y[i] = yi;
i += blockDim.x * gridDim.x;
}
}
template<typename dType>
__global__ void _axpbback(int n, dType a, dType *x, dType p, dType *dy, dType *dx) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
dType ap = a*p;
while (i < n) {
dType dxi = dy[i];
if (a!=1 || p!=1) {
if (ap != 1) dxi *= ap;
if (p != 1) dxi *= pow(x[i],p-1);
}
dx[i] = dxi;
i += blockDim.x * gridDim.x;
}
}
extern "C" {
void axpbforw32(int n, float a, float *x, float p, float b, float *y) KCALL(_axpbforw,n,a,x,p,b,y);
void axpbforw64(int n, double a, double *x, double p, double b, double *y) KCALL(_axpbforw,n,a,x,p,b,y);
void axpbback32(int n, float a, float *x, float p, float *dy, float *dx) KCALL(_axpbback,n,a,x,p,dy,dx);
void axpbback64(int n, double a, double *x, double p, double *dy, double *dx) KCALL(_axpbback,n,a,x,p,dy,dx);
}
|
eb127b7efb7d025dfe73c6ca6e576db628bbadc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../common/book.h"
__global__ void add(int a, int b, int* c) {
*c = a + b;
}
int main(void) {
int c;
int* dev_c;
/*
* ** DO NOT ** dereference the pointer returned by hipMalloc()
* from code that executes on the host !!
* RESTRICTIONS:
* <1>. Pass pointers allocated with hipMalloc() to functions that
* execute on the device is allowed.
* <2>. It is allowed to read or write the pointers allocated with
* hipMalloc() as long as they are run on the device.
* <3>. Pointers allocated with hipMalloc() can be passed to
* functions execute on the host.
* <4>. As the ** DO NOT ** says at line 12
*/
HANDLE_ERROR(hipMalloc((void**)&dev_c, sizeof(int)));
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, 2, 7, dev_c);
/*
* hipMemcpyDeviceToHost |
* hipMemcpyHostToDevice As their names tell
* hipMemcpyDeviceToDevice |
*/
HANDLE_ERROR(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
/*
* We must use hipFree to free the memory allocated by hipMalloc,
* but not C free.
*/
hipFree(dev_c);
int count;
HANDLE_ERROR(hipGetDeviceCount(&count));
printf("Device count: %d\n", count);
/*
* hipDeviceProp_t is a structure contains information abount our device
* char name[256], size_t totalGlobalMem and many more. [Page 28]
*/
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
printf("Device name: %s\n", prop.name);
return 0;
}
| eb127b7efb7d025dfe73c6ca6e576db628bbadc5.cu | #include <stdio.h>
#include "../common/book.h"
__global__ void add(int a, int b, int* c) {
*c = a + b;
}
int main(void) {
int c;
int* dev_c;
/*
* ** DO NOT ** dereference the pointer returned by cudaMalloc()
* from code that executes on the host !!
* RESTRICTIONS:
* <1>. Pass pointers allocated with cudaMalloc() to functions that
* execute on the device is allowed.
* <2>. It is allowed to read or write the pointers allocated with
* cudaMalloc() as long as they are run on the device.
* <3>. Pointers allocated with cudaMalloc() can be passed to
* functions execute on the host.
* <4>. As the ** DO NOT ** says at line 12
*/
HANDLE_ERROR(cudaMalloc((void**)&dev_c, sizeof(int)));
add<<<1, 1>>>(2, 7, dev_c);
/*
* cudaMemcpyDeviceToHost |
* cudaMemcpyHostToDevice ┠ As their names tell
* cudaMemcpyDeviceToDevice |
*/
HANDLE_ERROR(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
/*
* We must use cudaFree to free the memory allocated by cudaMalloc,
* but not C free.
*/
cudaFree(dev_c);
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
printf("Device count: %d\n", count);
/*
* cudaDeviceProp is a structure contains information abount our device
* char name[256], size_t totalGlobalMem and many more. [Page 28]
*/
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
printf("Device name: %s\n", prop.name);
return 0;
}
|
4d2dea23d58db82c01ec4905c2fd0f8642c064b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (3*10)
//void saxpy_cpu(int n, float a, float *x, float *y){
// for(int i=0; i<n; ++i)
// y[i]=a*x[i]+y[i];
//}
__global__ void saxpy_gpu(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n)
y[i] = a*x[i] + y[i];
}
int main(void){
float *x, *y; //host copies
float *d_x, *d_y; //device copies
int size = N*sizeof(float);
//Allocate space for device copies
hipMalloc((void **)&d_x, size);
hipMalloc((void **)&d_y, size);
//Allocate space for host copies of x and y and setup input values
x = (float *)malloc(size);
random_floats(x, N);
y = (float *)malloc(size);
random_floats(y, N);
//Copy input to device
hipMemcpy(d_x, &x, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, &y, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy_gpu), dim3(3), dim3(10), 0, 0, N, 2.0f, d_x, d_y);
hipDeviceSynchronize();
//Copy result back to host
hipMemcpy(d_y, &y, size, hipMemcpyDeviceToHost);
//Cleanup
hipFree(d_x); hipFree(d_y);
free(x); free(y);
return 0;
}
| 4d2dea23d58db82c01ec4905c2fd0f8642c064b6.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (3*10)
//void saxpy_cpu(int n, float a, float *x, float *y){
// for(int i=0; i<n; ++i)
// y[i]=a*x[i]+y[i];
//}
__global__ void saxpy_gpu(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n)
y[i] = a*x[i] + y[i];
}
int main(void){
float *x, *y; //host copies
float *d_x, *d_y; //device copies
int size = N*sizeof(float);
//Allocate space for device copies
cudaMalloc((void **)&d_x, size);
cudaMalloc((void **)&d_y, size);
//Allocate space for host copies of x and y and setup input values
x = (float *)malloc(size);
random_floats(x, N);
y = (float *)malloc(size);
random_floats(y, N);
//Copy input to device
cudaMemcpy(d_x, &x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, &y, size, cudaMemcpyHostToDevice);
saxpy_gpu<<<3, 10>>>(N, 2.0f, d_x, d_y);
cudaDeviceSynchronize();
//Copy result back to host
cudaMemcpy(d_y, &y, size, cudaMemcpyDeviceToHost);
//Cleanup
cudaFree(d_x); cudaFree(d_y);
free(x); free(y);
return 0;
}
|
4bbfef76cafe89c17c70a892fa7c323045bf6673.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_prior.cu
* \brief generate multibox prior boxes cuda kernels
* \author Joshua Zhang
*/
#include "./multibox_prior-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOXPRIOR_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__global__ void AssignPriors(DType *out, const float size,
const float sqrt_ratio, const int in_width,
const int in_height, const float step_x,
const float step_y, const float center_offy,
const float center_offx, const int stride,
const int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= in_width * in_height) return;
int r = index / in_width;
int c = index % in_width;
float center_x = (c + center_offx) * step_x;
float center_y = (r + center_offy) * step_y;
float w = size * in_height / in_width * sqrt_ratio / 2; // half width
float h = size / sqrt_ratio / 2; // half height
DType *ptr = out + index * stride + 4 * offset;
*(ptr++) = center_x - w; // xmin
*(ptr++) = center_y - h; // ymin
*(ptr++) = center_x + w; // xmax
*(ptr++) = center_y + h; // ymax
}
} // namespace cuda
template<typename DType>
inline void MultiBoxPriorForward(const Tensor<gpu, 2, DType> &out,
const std::vector<float> &sizes,
const std::vector<float> &ratios,
const int in_width, const int in_height,
const std::vector<float> &steps,
const std::vector<float> &offsets) {
CHECK_EQ(out.CheckContiguous(), true);
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
DType *out_ptr = out.dptr_;
const float step_x = steps[1];
const float step_y = steps[0];
const float offset_x = offsets[1];
const float offset_y = offsets[0];
const int num_sizes = static_cast<int>(sizes.size());
const int num_ratios = static_cast<int>(ratios.size());
const int num_thread = cuda::kMaxThreadsPerBlock;
dim3 dimBlock(num_thread);
dim3 dimGrid((in_width * in_height - 1) / num_thread + 1);
cuda::CheckLaunchParam(dimGrid, dimBlock, "MultiBoxPrior Forward");
const int stride = 4 * (num_sizes * num_ratios);
int offset = 0;
// ratio = 1, various sizes
for (int i = 0; i < num_sizes; ++i) {
for (int j = 0; j < num_ratios; ++j) {
cuda::AssignPriors<DType> << <dimGrid, dimBlock, 0, stream >> > (out_ptr,
sizes[i], sqrtf(ratios[j]), in_width, in_height, step_x, step_y, offset_y, offset_x, stride, offset);
++offset;
}
}
MULTIBOXPRIOR_CUDA_CHECK(hipPeekAtLastError());
// size = sizes[0], various ratios
// for (int j = 1; j < num_ratios; ++j) {
// cuda::AssignPriors<DType><<<dimGrid, dimBlock, 0, stream>>>(out_ptr,
// sizes[0], sqrtf(ratios[j]), in_width, in_height, step_x, step_y,
// offset_y, offset_x, stride, offset);
// ++offset;
// }
// MULTIBOXPRIOR_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(MultiBoxPriorParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxPriorOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 4bbfef76cafe89c17c70a892fa7c323045bf6673.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_prior.cu
* \brief generate multibox prior boxes cuda kernels
* \author Joshua Zhang
*/
#include "./multibox_prior-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOXPRIOR_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__global__ void AssignPriors(DType *out, const float size,
const float sqrt_ratio, const int in_width,
const int in_height, const float step_x,
const float step_y, const float center_offy,
const float center_offx, const int stride,
const int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= in_width * in_height) return;
int r = index / in_width;
int c = index % in_width;
float center_x = (c + center_offx) * step_x;
float center_y = (r + center_offy) * step_y;
float w = size * in_height / in_width * sqrt_ratio / 2; // half width
float h = size / sqrt_ratio / 2; // half height
DType *ptr = out + index * stride + 4 * offset;
*(ptr++) = center_x - w; // xmin
*(ptr++) = center_y - h; // ymin
*(ptr++) = center_x + w; // xmax
*(ptr++) = center_y + h; // ymax
}
} // namespace cuda
template<typename DType>
inline void MultiBoxPriorForward(const Tensor<gpu, 2, DType> &out,
const std::vector<float> &sizes,
const std::vector<float> &ratios,
const int in_width, const int in_height,
const std::vector<float> &steps,
const std::vector<float> &offsets) {
CHECK_EQ(out.CheckContiguous(), true);
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
DType *out_ptr = out.dptr_;
const float step_x = steps[1];
const float step_y = steps[0];
const float offset_x = offsets[1];
const float offset_y = offsets[0];
const int num_sizes = static_cast<int>(sizes.size());
const int num_ratios = static_cast<int>(ratios.size());
const int num_thread = cuda::kMaxThreadsPerBlock;
dim3 dimBlock(num_thread);
dim3 dimGrid((in_width * in_height - 1) / num_thread + 1);
cuda::CheckLaunchParam(dimGrid, dimBlock, "MultiBoxPrior Forward");
const int stride = 4 * (num_sizes * num_ratios);
int offset = 0;
// ratio = 1, various sizes
for (int i = 0; i < num_sizes; ++i) {
for (int j = 0; j < num_ratios; ++j) {
cuda::AssignPriors<DType> << <dimGrid, dimBlock, 0, stream >> > (out_ptr,
sizes[i], sqrtf(ratios[j]), in_width, in_height, step_x, step_y, offset_y, offset_x, stride, offset);
++offset;
}
}
MULTIBOXPRIOR_CUDA_CHECK(cudaPeekAtLastError());
// size = sizes[0], various ratios
// for (int j = 1; j < num_ratios; ++j) {
// cuda::AssignPriors<DType><<<dimGrid, dimBlock, 0, stream>>>(out_ptr,
// sizes[0], sqrtf(ratios[j]), in_width, in_height, step_x, step_y,
// offset_y, offset_x, stride, offset);
// ++offset;
// }
// MULTIBOXPRIOR_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(MultiBoxPriorParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxPriorOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
a6e76b671c8c3fa0f7b26658b01f494f084cd5b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 24
#define ITERATIONS2 1
#define ITERATIONS (unsigned)( 1200 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| a6e76b671c8c3fa0f7b26658b01f494f084cd5b8.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 24
#define ITERATIONS2 1
#define ITERATIONS (unsigned)( 1200 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
e616cfc818d9d25c647c33fc0404ba9f0a69282d.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
COMPILAR USANDO O SEGUINTE COMANDO:
nvcc segmented_sort.cu -o segmented_sort -std=c++11 --expt-extended-lambda -I"/home/schmid/Dropbox/Unicamp/workspace/sorting_segments/moderngpu-master/src"
*/
#include <moderngpu/kernel_segsort.hxx>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
struct Reduce {
uint t;
uint m;
float value;
Reduce(uint index_t, uint index_m, float value) {
this->t = index_t;
this->m = index_m;
this->value = value;
}
Reduce() {
this->t = 0;
this->m = 0;
this->value = 0.0;
}
} myobj;
void print(Reduce* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << "t=" << vec[i * m + j].t << " m="
<< vec[i * m + j].m << " value="
<< vec[i * m + j].value << "\t||";
}
std::cout << "\n";
}
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
}
template<typename T>
void print(T* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << vec[i * m + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void print(T* vec, uint t) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
std::cout << vec[i] << " ";
}
std::cout << "\n";
}
__global__ void calc_completion_times(float* machines, float* completion_times, bool *task_deleted,
Reduce* completion_aux, int m, int t, float MAX_FLOAT) {
extern __shared__ Reduce s_comp_times[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tIdrow = threadIdx.y;
int tIdcol = threadIdx.x;
int iglobal = row * t + col;
int ilocal = tIdrow * blockDim.x + tIdcol;
if(!task_deleted[col]) {
s_comp_times[ilocal].t = col;
s_comp_times[ilocal].m = row;
s_comp_times[ilocal].value = completion_times[row] + machines[iglobal];
}
else {
s_comp_times[ilocal].t = col;
s_comp_times[ilocal].m = row;
s_comp_times[ilocal].value = MAX_FLOAT;
}
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tIdcol < e) {
if ((s_comp_times[ilocal + e].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e].value;
}
}
__syncthreads();
}
if(tIdcol == 0) {
for(int e = blockDim.y/2; e > 0; e/=2)
{
if (tIdrow < e) {
if ((s_comp_times[ilocal + e * blockDim.x].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e * blockDim.x].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e * blockDim.x].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e * blockDim.x].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e * blockDim.x].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e * blockDim.x].value;
}
}
__syncthreads();
}
}
if(tIdrow == 0 && tIdcol == 0) {
iglobal = blockIdx.y * gridDim.x + blockIdx.x;
completion_aux[iglobal].t = s_comp_times[0].t;
completion_aux[iglobal].m = s_comp_times[0].m;
completion_aux[iglobal].value = s_comp_times[0].value;
}
}
__global__ void reduction_two_dimensional(Reduce* completion_aux, int t) {
extern __shared__ Reduce s_comp_times[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tIdrow = threadIdx.y;
int tIdcol = threadIdx.x;
int iglobal = row * t + col;
int ilocal = tIdrow * blockDim.x + tIdcol;
s_comp_times[ilocal].t = completion_aux[iglobal].t;
s_comp_times[ilocal].m = completion_aux[iglobal].m;
s_comp_times[ilocal].value = completion_aux[iglobal].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tIdcol < e) {
if ((s_comp_times[ilocal + e].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e].value;
}
}
__syncthreads();
}
if(tIdcol == 0) {
for(int e = blockDim.y/2; e > 0; e/=2)
{
if (tIdrow < e) {
if ((s_comp_times[ilocal + e * BLOCK_SIZE].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e * BLOCK_SIZE].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e * BLOCK_SIZE].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e * BLOCK_SIZE].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e * BLOCK_SIZE].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e * BLOCK_SIZE].value;
}
}
__syncthreads();
}
}
if(tIdrow == 0 && tIdcol == 0) {
iglobal = blockIdx.y * gridDim.x + blockIdx.x;
completion_aux[iglobal].t = s_comp_times[0].t;
completion_aux[iglobal].m = s_comp_times[0].m;
completion_aux[iglobal].value = s_comp_times[0].value;
}
}
__global__ void reduction(Reduce* d_completion_aux) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
d_completion_aux [blockIdx.x].t = s_comp_times[0].t;
d_completion_aux[blockIdx.x].m = s_comp_times[0].m;
d_completion_aux[blockIdx.x].value = s_comp_times[0].value;
}
}
__global__ void block_reduction(float* completion_times, bool* task_map, bool* task_deleted,
Reduce* d_completion_aux, int t) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
task_deleted[ s_comp_times[0].t ] = true;
task_map[ s_comp_times[0].m * t + s_comp_times[0].t ] = true;
completion_times[ s_comp_times[0].m ] = s_comp_times[0].value;
}
}
int main(int argc, char** argv) {
int t, m;
/*if (argc < 3) {
printf("Parameters missing: <number of tasks> <number of machines>\n\n");
return 0;
}
t = atoi(argv[1]);
m = atoi(argv[2]);
*/
int a = scanf("%d", &t);
a = scanf("%d", &m);
uint mem_size_machines = sizeof(float) * (m * t);
uint mem_size_completion_times = sizeof(float) * (m);
uint mem_size_task_deleted = sizeof(bool) * (t);
uint mem_size_task_map = sizeof(bool) * (m * t);
int dimCol = (t-1)/BLOCK_SIZE + 1;
int dimRow = (m-1)/BLOCK_SIZE + 1;
uint mem_size_completion_aux = sizeof(Reduce) * (dimCol * dimRow);
float *machines = (float *) malloc(mem_size_machines);
float *completion_times = (float *) malloc(mem_size_completion_times);
bool *task_deleted = (bool *) malloc(mem_size_task_deleted);
bool *task_map = (bool *) malloc(mem_size_task_map);
float aux;
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j * t + i] = aux;
task_map[j * t + i] = false;
completion_times[j] = 0;
}
task_deleted[i] = false;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float *d_machines, *d_completion_times;
bool *d_task_deleted, *d_task_map;
Reduce *d_completion_aux;
float MAX_FLOAT = std::numeric_limits<float>::max();
cudaTest(hipMalloc((void **) &d_machines, mem_size_machines));
cudaTest(hipMalloc((void **) &d_completion_times, mem_size_completion_times));
cudaTest(hipMalloc((void **) &d_task_deleted, mem_size_task_deleted));
cudaTest(hipMalloc((void **) &d_task_map, mem_size_task_map));
cudaTest(hipMalloc((void **) &d_completion_aux, mem_size_completion_aux));
// copy host memory to device
cudaTest(hipMemcpy(d_machines, machines, mem_size_machines, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_completion_times, completion_times, mem_size_completion_times, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_task_deleted, task_deleted, mem_size_task_deleted, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_task_map, task_map, mem_size_task_map, hipMemcpyHostToDevice));
hipEventRecord(start);
for(int k = 0; k < t; k++) {
dimCol = (t-1)/BLOCK_SIZE + 1;
dimRow = (m-1)/BLOCK_SIZE + 1;
dim3 dimB1(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimG1(dimCol, dimRow);
hipLaunchKernelGGL(( calc_completion_times), dim3(dimG1), dim3(dimB1), BLOCK_SIZE * BLOCK_SIZE * sizeof(Reduce) , 0,
d_machines, d_completion_times, d_task_deleted, d_completion_aux, m, t, MAX_FLOAT);
/* Reduce *completion_aux = (Reduce *) malloc(mem_size_completion_aux);
cudaTest(hipMemcpy(completion_aux, d_completion_aux, mem_size_completion_aux, hipMemcpyDeviceToHost));
print(completion_aux, dimRow, dimCol);*/
/* dimCol = (dimCol-1)/BLOCK_SIZE + 1;
dimRow = (dimRow-1)/BLOCK_SIZE + 1;
for( ; dimRow > BLOCK_SIZE; dimRow/=BLOCK_SIZE) {
dim3 dimG2(dimCol, dimRow);
reduction_two_dimensional<<<dimG2, dimB1, BLOCK_SIZE * BLOCK_SIZE * sizeof(Reduce) >>> (d_completion_aux, t);
dimCol /= BLOCK_SIZE;
}
dim3 dimB3(BLOCK_SIZE, dimRow);
dim3 dimG3(dimCol);
reduction_two_dimensional<<<dimG3, dimB3, dimRow * BLOCK_SIZE * sizeof(Reduce) >>> (d_completion_aux, t);
cudaTest(hipMemcpy(completion_aux, d_completion_aux, mem_size_completion_aux, hipMemcpyDeviceToHost));
print(completion_aux, dimRow, dimCol);
for( ; dimCol > BLOCK_SIZE; dimCol/=BLOCK_SIZE) {
dim3 dimB4(BLOCK_SIZE);
dim3 dimG4(dimCol);
reduction<<<dimG4, dimB4, BLOCK_SIZE * sizeof(Reduce) >>> (d_completion_aux);
}
dim3 dimB5(dimCol);
dim3 dimG5(1);
block_reduction<<<dimG5, dimB5, dimCol * sizeof(Reduce) >>> (d_completion_times, d_task_map, d_task_deleted,
d_completion_aux, t);*/
}
hipEventRecord(stop);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
hipDeviceSynchronize();
cudaTest(hipMemcpy(completion_times, d_completion_times, mem_size_completion_times, hipMemcpyDeviceToHost));
cudaTest(hipMemcpy(task_map, d_task_map, mem_size_task_map, hipMemcpyDeviceToHost));
hipFree(d_machines);
hipFree(d_completion_times);
hipFree(d_task_map);
hipFree(d_task_deleted);
hipFree(d_completion_aux);
if (ELAPSED_TIME != 1) {
//print(machines, m, t);
//print(task_index, m, t);
print(completion_times, m);
}
free(task_deleted);
free(task_map);
free(machines);
free(completion_times);
return 0;
}
/*
*
*
__global__ void block_reduction_two_dimensional(float* completion_times,
bool* task_map, bool* task_deleted, Reduce* completion_aux, int t) {
extern __shared__ Reduce s_comp_times[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tIdrow = threadIdx.y;
int tIdcol = threadIdx.x;
int iglobal = row * t + col;
int ilocal = tIdrow * blockDim.x + tIdcol;
s_comp_times[ilocal].t = completion_aux[iglobal].t;
s_comp_times[ilocal].m = completion_aux[iglobal].m;
s_comp_times[ilocal].value = completion_aux[iglobal].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tIdcol < e) {
if ((s_comp_times[ilocal + e].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e].value;
}
}
__syncthreads();
}
if(tIdcol == 0) {
for(int e = blockDim.y/2; e > 0; e/=2)
{
if (tIdrow < e) {
if ((s_comp_times[ilocal + e * BLOCK_SIZE].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e * BLOCK_SIZE].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e * BLOCK_SIZE].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e * BLOCK_SIZE].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e * BLOCK_SIZE].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e * BLOCK_SIZE].value;
}
}
__syncthreads();
}
}
if(tIdrow == 0 && tIdcol == 0) {
task_deleted[ s_comp_times[0].t ] = true;
task_map[ s_comp_times[0].m * t + s_comp_times[0].t ] = true;
completion_times[ s_comp_times[0].m ] = s_comp_times[0].value;
}
}
__global__ void calc_completion_times(float* machines, float* completion_times, bool *task_deleted,
Reduce* completion_aux, int m, int t, float MAX_FLOAT) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(!task_deleted[i]) {
for (int j = 0; j < m; j++) {
completion_aux[j * t + i].t = i;
completion_aux[j * t + i].m = j;
completion_aux[j * t + i].value = completion_times[j] + machines[j * t + i];
}
}
else {
for (int j = 0; j < m; j++) {
completion_aux[j * t + i].t = i;
completion_aux[j * t + i].m = j;
completion_aux[j * t + i].value = MAX_FLOAT;
}
}
}
__global__ void reduction(Reduce* d_completion_aux) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
d_completion_aux [blockIdx.x].t = s_comp_times[0].t;
d_completion_aux[blockIdx.x].m = s_comp_times[0].m;
d_completion_aux[blockIdx.x].value = s_comp_times[0].value;
}
}
__global__ void block_reduction(float* completion_times, bool* task_map, bool* task_deleted,
Reduce* d_completion_aux, int t) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
task_deleted[ s_comp_times[0].t ] = true;
task_map[ s_comp_times[0].m * t + s_comp_times[0].t ] = true;
completion_times[ s_comp_times[0].m ] = s_comp_times[0].value;
}
}
int main(int argc, char** argv) {
int t, m;
if (argc < 3) {
printf("Parameters missing: <number of tasks> <number of machines>\n\n");
return 0;
}
t = atoi(argv[1]);
m = atoi(argv[2]);
uint mem_size_machines = sizeof(float) * (m * t);
uint mem_size_completion_times = sizeof(float) * (m);
uint mem_size_task_deleted = sizeof(bool) * (t);
uint mem_size_task_map = sizeof(bool) * (m * t);
uint mem_size_completion_aux = sizeof(Reduce) * (m * t);
float *machines = (float *) malloc(mem_size_machines);
float *completion_times = (float *) malloc(mem_size_completion_times);
bool *task_deleted = (bool *) malloc(mem_size_task_deleted);
bool *task_map = (bool *) malloc(mem_size_task_map);
float aux;
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j * t + i] = aux;
task_map[j * t + i] = false;
completion_times[j] = 0;
}
task_deleted[i] = false;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float *d_machines, *d_completion_times;
bool *d_task_deleted, *d_task_map;
Reduce *d_completion_aux;
float MAX_FLOAT = std::numeric_limits<float>::max();
cudaTest(hipMalloc((void **) &d_machines, mem_size_machines));
cudaTest(hipMalloc((void **) &d_completion_times, mem_size_completion_times));
cudaTest(hipMalloc((void **) &d_task_deleted, mem_size_task_deleted));
cudaTest(hipMalloc((void **) &d_task_map, mem_size_task_map));
cudaTest(hipMalloc((void **) &d_completion_aux, mem_size_completion_aux));
// copy host memory to device
cudaTest(hipMemcpy(d_machines, machines, mem_size_machines, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_completion_times, completion_times, mem_size_completion_times, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_task_deleted, task_deleted, mem_size_task_deleted, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_task_map, task_map, mem_size_task_map, hipMemcpyHostToDevice));
hipEventRecord(start);
for(int k = 0; k < t; k++) {
int dimG = t * m;
int dim = t/BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(dim);
calc_completion_times<<<dimGrid, dimBlock>>>
(d_machines, d_completion_times, d_task_deleted, d_completion_aux, m, t, MAX_FLOAT);
for( ; dimG > BLOCK_SIZE; dimG/=BLOCK_SIZE) {
dim3 block(BLOCK_SIZE);
dim3 grid_b(dimG/BLOCK_SIZE);
reduction<<<grid_b, block, BLOCK_SIZE * sizeof(Reduce) >>>
(d_completion_aux);
}
dim3 block(dimG);
dim3 grid_b(1);
block_reduction<<<grid_b, block, dimG * sizeof(Reduce) >>> (d_completion_times, d_task_map, d_task_deleted,
d_completion_aux, t);
}
hipEventRecord(stop);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
hipDeviceSynchronize();
cudaTest(hipMemcpy(completion_times, d_completion_times, mem_size_completion_times, hipMemcpyDeviceToHost));
cudaTest(hipMemcpy(task_map, d_task_map, mem_size_task_map, hipMemcpyDeviceToHost));
hipFree(d_machines);
hipFree(d_completion_times);
hipFree(d_task_map);
hipFree(d_task_deleted);
hipFree(d_completion_aux);
if (ELAPSED_TIME != 1) {
//print(machines, m, t);
//print(task_index, m, t);
print(completion_times, m);
}
free(task_deleted);
free(task_map);
free(machines);
free(completion_times);
return 0;
}
*
*/
| e616cfc818d9d25c647c33fc0404ba9f0a69282d.cu | /*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
COMPILAR USANDO O SEGUINTE COMANDO:
nvcc segmented_sort.cu -o segmented_sort -std=c++11 --expt-extended-lambda -I"/home/schmid/Dropbox/Unicamp/workspace/sorting_segments/moderngpu-master/src"
*/
#include <moderngpu/kernel_segsort.hxx>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
struct Reduce {
uint t;
uint m;
float value;
Reduce(uint index_t, uint index_m, float value) {
this->t = index_t;
this->m = index_m;
this->value = value;
}
Reduce() {
this->t = 0;
this->m = 0;
this->value = 0.0;
}
} myobj;
void print(Reduce* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << "t=" << vec[i * m + j].t << " m="
<< vec[i * m + j].m << " value="
<< vec[i * m + j].value << "\t||";
}
std::cout << "\n";
}
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
}
template<typename T>
void print(T* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << vec[i * m + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void print(T* vec, uint t) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
std::cout << vec[i] << " ";
}
std::cout << "\n";
}
__global__ void calc_completion_times(float* machines, float* completion_times, bool *task_deleted,
Reduce* completion_aux, int m, int t, float MAX_FLOAT) {
extern __shared__ Reduce s_comp_times[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tIdrow = threadIdx.y;
int tIdcol = threadIdx.x;
int iglobal = row * t + col;
int ilocal = tIdrow * blockDim.x + tIdcol;
if(!task_deleted[col]) {
s_comp_times[ilocal].t = col;
s_comp_times[ilocal].m = row;
s_comp_times[ilocal].value = completion_times[row] + machines[iglobal];
}
else {
s_comp_times[ilocal].t = col;
s_comp_times[ilocal].m = row;
s_comp_times[ilocal].value = MAX_FLOAT;
}
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tIdcol < e) {
if ((s_comp_times[ilocal + e].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e].value;
}
}
__syncthreads();
}
if(tIdcol == 0) {
for(int e = blockDim.y/2; e > 0; e/=2)
{
if (tIdrow < e) {
if ((s_comp_times[ilocal + e * blockDim.x].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e * blockDim.x].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e * blockDim.x].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e * blockDim.x].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e * blockDim.x].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e * blockDim.x].value;
}
}
__syncthreads();
}
}
if(tIdrow == 0 && tIdcol == 0) {
iglobal = blockIdx.y * gridDim.x + blockIdx.x;
completion_aux[iglobal].t = s_comp_times[0].t;
completion_aux[iglobal].m = s_comp_times[0].m;
completion_aux[iglobal].value = s_comp_times[0].value;
}
}
__global__ void reduction_two_dimensional(Reduce* completion_aux, int t) {
extern __shared__ Reduce s_comp_times[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tIdrow = threadIdx.y;
int tIdcol = threadIdx.x;
int iglobal = row * t + col;
int ilocal = tIdrow * blockDim.x + tIdcol;
s_comp_times[ilocal].t = completion_aux[iglobal].t;
s_comp_times[ilocal].m = completion_aux[iglobal].m;
s_comp_times[ilocal].value = completion_aux[iglobal].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tIdcol < e) {
if ((s_comp_times[ilocal + e].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e].value;
}
}
__syncthreads();
}
if(tIdcol == 0) {
for(int e = blockDim.y/2; e > 0; e/=2)
{
if (tIdrow < e) {
if ((s_comp_times[ilocal + e * BLOCK_SIZE].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e * BLOCK_SIZE].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e * BLOCK_SIZE].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e * BLOCK_SIZE].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e * BLOCK_SIZE].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e * BLOCK_SIZE].value;
}
}
__syncthreads();
}
}
if(tIdrow == 0 && tIdcol == 0) {
iglobal = blockIdx.y * gridDim.x + blockIdx.x;
completion_aux[iglobal].t = s_comp_times[0].t;
completion_aux[iglobal].m = s_comp_times[0].m;
completion_aux[iglobal].value = s_comp_times[0].value;
}
}
__global__ void reduction(Reduce* d_completion_aux) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
d_completion_aux [blockIdx.x].t = s_comp_times[0].t;
d_completion_aux[blockIdx.x].m = s_comp_times[0].m;
d_completion_aux[blockIdx.x].value = s_comp_times[0].value;
}
}
__global__ void block_reduction(float* completion_times, bool* task_map, bool* task_deleted,
Reduce* d_completion_aux, int t) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
task_deleted[ s_comp_times[0].t ] = true;
task_map[ s_comp_times[0].m * t + s_comp_times[0].t ] = true;
completion_times[ s_comp_times[0].m ] = s_comp_times[0].value;
}
}
int main(int argc, char** argv) {
int t, m;
/*if (argc < 3) {
printf("Parameters missing: <number of tasks> <number of machines>\n\n");
return 0;
}
t = atoi(argv[1]);
m = atoi(argv[2]);
*/
int a = scanf("%d", &t);
a = scanf("%d", &m);
uint mem_size_machines = sizeof(float) * (m * t);
uint mem_size_completion_times = sizeof(float) * (m);
uint mem_size_task_deleted = sizeof(bool) * (t);
uint mem_size_task_map = sizeof(bool) * (m * t);
int dimCol = (t-1)/BLOCK_SIZE + 1;
int dimRow = (m-1)/BLOCK_SIZE + 1;
uint mem_size_completion_aux = sizeof(Reduce) * (dimCol * dimRow);
float *machines = (float *) malloc(mem_size_machines);
float *completion_times = (float *) malloc(mem_size_completion_times);
bool *task_deleted = (bool *) malloc(mem_size_task_deleted);
bool *task_map = (bool *) malloc(mem_size_task_map);
float aux;
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j * t + i] = aux;
task_map[j * t + i] = false;
completion_times[j] = 0;
}
task_deleted[i] = false;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float *d_machines, *d_completion_times;
bool *d_task_deleted, *d_task_map;
Reduce *d_completion_aux;
float MAX_FLOAT = std::numeric_limits<float>::max();
cudaTest(cudaMalloc((void **) &d_machines, mem_size_machines));
cudaTest(cudaMalloc((void **) &d_completion_times, mem_size_completion_times));
cudaTest(cudaMalloc((void **) &d_task_deleted, mem_size_task_deleted));
cudaTest(cudaMalloc((void **) &d_task_map, mem_size_task_map));
cudaTest(cudaMalloc((void **) &d_completion_aux, mem_size_completion_aux));
// copy host memory to device
cudaTest(cudaMemcpy(d_machines, machines, mem_size_machines, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_completion_times, completion_times, mem_size_completion_times, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_task_deleted, task_deleted, mem_size_task_deleted, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_task_map, task_map, mem_size_task_map, cudaMemcpyHostToDevice));
cudaEventRecord(start);
for(int k = 0; k < t; k++) {
dimCol = (t-1)/BLOCK_SIZE + 1;
dimRow = (m-1)/BLOCK_SIZE + 1;
dim3 dimB1(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimG1(dimCol, dimRow);
calc_completion_times<<<dimG1, dimB1, BLOCK_SIZE * BLOCK_SIZE * sizeof(Reduce) >>>
(d_machines, d_completion_times, d_task_deleted, d_completion_aux, m, t, MAX_FLOAT);
/* Reduce *completion_aux = (Reduce *) malloc(mem_size_completion_aux);
cudaTest(cudaMemcpy(completion_aux, d_completion_aux, mem_size_completion_aux, cudaMemcpyDeviceToHost));
print(completion_aux, dimRow, dimCol);*/
/* dimCol = (dimCol-1)/BLOCK_SIZE + 1;
dimRow = (dimRow-1)/BLOCK_SIZE + 1;
for( ; dimRow > BLOCK_SIZE; dimRow/=BLOCK_SIZE) {
dim3 dimG2(dimCol, dimRow);
reduction_two_dimensional<<<dimG2, dimB1, BLOCK_SIZE * BLOCK_SIZE * sizeof(Reduce) >>> (d_completion_aux, t);
dimCol /= BLOCK_SIZE;
}
dim3 dimB3(BLOCK_SIZE, dimRow);
dim3 dimG3(dimCol);
reduction_two_dimensional<<<dimG3, dimB3, dimRow * BLOCK_SIZE * sizeof(Reduce) >>> (d_completion_aux, t);
cudaTest(cudaMemcpy(completion_aux, d_completion_aux, mem_size_completion_aux, cudaMemcpyDeviceToHost));
print(completion_aux, dimRow, dimCol);
for( ; dimCol > BLOCK_SIZE; dimCol/=BLOCK_SIZE) {
dim3 dimB4(BLOCK_SIZE);
dim3 dimG4(dimCol);
reduction<<<dimG4, dimB4, BLOCK_SIZE * sizeof(Reduce) >>> (d_completion_aux);
}
dim3 dimB5(dimCol);
dim3 dimG5(1);
block_reduction<<<dimG5, dimB5, dimCol * sizeof(Reduce) >>> (d_completion_times, d_task_map, d_task_deleted,
d_completion_aux, t);*/
}
cudaEventRecord(stop);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
cudaDeviceSynchronize();
cudaTest(cudaMemcpy(completion_times, d_completion_times, mem_size_completion_times, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(task_map, d_task_map, mem_size_task_map, cudaMemcpyDeviceToHost));
cudaFree(d_machines);
cudaFree(d_completion_times);
cudaFree(d_task_map);
cudaFree(d_task_deleted);
cudaFree(d_completion_aux);
if (ELAPSED_TIME != 1) {
//print(machines, m, t);
//print(task_index, m, t);
print(completion_times, m);
}
free(task_deleted);
free(task_map);
free(machines);
free(completion_times);
return 0;
}
/*
*
*
__global__ void block_reduction_two_dimensional(float* completion_times,
bool* task_map, bool* task_deleted, Reduce* completion_aux, int t) {
extern __shared__ Reduce s_comp_times[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tIdrow = threadIdx.y;
int tIdcol = threadIdx.x;
int iglobal = row * t + col;
int ilocal = tIdrow * blockDim.x + tIdcol;
s_comp_times[ilocal].t = completion_aux[iglobal].t;
s_comp_times[ilocal].m = completion_aux[iglobal].m;
s_comp_times[ilocal].value = completion_aux[iglobal].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tIdcol < e) {
if ((s_comp_times[ilocal + e].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e].value;
}
}
__syncthreads();
}
if(tIdcol == 0) {
for(int e = blockDim.y/2; e > 0; e/=2)
{
if (tIdrow < e) {
if ((s_comp_times[ilocal + e * BLOCK_SIZE].value < s_comp_times[ilocal].value)
|| (s_comp_times[ilocal + e * BLOCK_SIZE].value == s_comp_times[ilocal].value
&& s_comp_times[ilocal + e * BLOCK_SIZE].t < s_comp_times[ilocal].t)) {
s_comp_times[ilocal].t = s_comp_times[ilocal + e * BLOCK_SIZE].t;
s_comp_times[ilocal].m = s_comp_times[ilocal + e * BLOCK_SIZE].m;
s_comp_times[ilocal].value = s_comp_times[ilocal + e * BLOCK_SIZE].value;
}
}
__syncthreads();
}
}
if(tIdrow == 0 && tIdcol == 0) {
task_deleted[ s_comp_times[0].t ] = true;
task_map[ s_comp_times[0].m * t + s_comp_times[0].t ] = true;
completion_times[ s_comp_times[0].m ] = s_comp_times[0].value;
}
}
__global__ void calc_completion_times(float* machines, float* completion_times, bool *task_deleted,
Reduce* completion_aux, int m, int t, float MAX_FLOAT) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(!task_deleted[i]) {
for (int j = 0; j < m; j++) {
completion_aux[j * t + i].t = i;
completion_aux[j * t + i].m = j;
completion_aux[j * t + i].value = completion_times[j] + machines[j * t + i];
}
}
else {
for (int j = 0; j < m; j++) {
completion_aux[j * t + i].t = i;
completion_aux[j * t + i].m = j;
completion_aux[j * t + i].value = MAX_FLOAT;
}
}
}
__global__ void reduction(Reduce* d_completion_aux) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
d_completion_aux [blockIdx.x].t = s_comp_times[0].t;
d_completion_aux[blockIdx.x].m = s_comp_times[0].m;
d_completion_aux[blockIdx.x].value = s_comp_times[0].value;
}
}
__global__ void block_reduction(float* completion_times, bool* task_map, bool* task_deleted,
Reduce* d_completion_aux, int t) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tId = threadIdx.x;
extern __shared__ Reduce s_comp_times[];
s_comp_times[tId].t = d_completion_aux[i].t;
s_comp_times[tId].m = d_completion_aux[i].m;
s_comp_times[tId].value = d_completion_aux[i].value;
__syncthreads();
for(int e = blockDim.x/2; e > 0; e/=2)
{
if (tId < e) {
if ((s_comp_times[tId + e].value < s_comp_times[tId].value)
|| (s_comp_times[tId + e].value == s_comp_times[tId].value
&& s_comp_times[tId + e].t < s_comp_times[tId].t)) {
s_comp_times[tId].t = s_comp_times[tId + e].t;
s_comp_times[tId].m = s_comp_times[tId + e].m;
s_comp_times[tId].value = s_comp_times[tId + e].value;
}
}
__syncthreads();
}
if(tId == 0) {
task_deleted[ s_comp_times[0].t ] = true;
task_map[ s_comp_times[0].m * t + s_comp_times[0].t ] = true;
completion_times[ s_comp_times[0].m ] = s_comp_times[0].value;
}
}
int main(int argc, char** argv) {
int t, m;
if (argc < 3) {
printf("Parameters missing: <number of tasks> <number of machines>\n\n");
return 0;
}
t = atoi(argv[1]);
m = atoi(argv[2]);
uint mem_size_machines = sizeof(float) * (m * t);
uint mem_size_completion_times = sizeof(float) * (m);
uint mem_size_task_deleted = sizeof(bool) * (t);
uint mem_size_task_map = sizeof(bool) * (m * t);
uint mem_size_completion_aux = sizeof(Reduce) * (m * t);
float *machines = (float *) malloc(mem_size_machines);
float *completion_times = (float *) malloc(mem_size_completion_times);
bool *task_deleted = (bool *) malloc(mem_size_task_deleted);
bool *task_map = (bool *) malloc(mem_size_task_map);
float aux;
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j * t + i] = aux;
task_map[j * t + i] = false;
completion_times[j] = 0;
}
task_deleted[i] = false;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float *d_machines, *d_completion_times;
bool *d_task_deleted, *d_task_map;
Reduce *d_completion_aux;
float MAX_FLOAT = std::numeric_limits<float>::max();
cudaTest(cudaMalloc((void **) &d_machines, mem_size_machines));
cudaTest(cudaMalloc((void **) &d_completion_times, mem_size_completion_times));
cudaTest(cudaMalloc((void **) &d_task_deleted, mem_size_task_deleted));
cudaTest(cudaMalloc((void **) &d_task_map, mem_size_task_map));
cudaTest(cudaMalloc((void **) &d_completion_aux, mem_size_completion_aux));
// copy host memory to device
cudaTest(cudaMemcpy(d_machines, machines, mem_size_machines, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_completion_times, completion_times, mem_size_completion_times, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_task_deleted, task_deleted, mem_size_task_deleted, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_task_map, task_map, mem_size_task_map, cudaMemcpyHostToDevice));
cudaEventRecord(start);
for(int k = 0; k < t; k++) {
int dimG = t * m;
int dim = t/BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(dim);
calc_completion_times<<<dimGrid, dimBlock>>>
(d_machines, d_completion_times, d_task_deleted, d_completion_aux, m, t, MAX_FLOAT);
for( ; dimG > BLOCK_SIZE; dimG/=BLOCK_SIZE) {
dim3 block(BLOCK_SIZE);
dim3 grid_b(dimG/BLOCK_SIZE);
reduction<<<grid_b, block, BLOCK_SIZE * sizeof(Reduce) >>>
(d_completion_aux);
}
dim3 block(dimG);
dim3 grid_b(1);
block_reduction<<<grid_b, block, dimG * sizeof(Reduce) >>> (d_completion_times, d_task_map, d_task_deleted,
d_completion_aux, t);
}
cudaEventRecord(stop);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
cudaDeviceSynchronize();
cudaTest(cudaMemcpy(completion_times, d_completion_times, mem_size_completion_times, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(task_map, d_task_map, mem_size_task_map, cudaMemcpyDeviceToHost));
cudaFree(d_machines);
cudaFree(d_completion_times);
cudaFree(d_task_map);
cudaFree(d_task_deleted);
cudaFree(d_completion_aux);
if (ELAPSED_TIME != 1) {
//print(machines, m, t);
//print(task_index, m, t);
print(completion_times, m);
}
free(task_deleted);
free(task_map);
free(machines);
free(completion_times);
return 0;
}
*
*/
|
c785465e95ea1644a1d15eae0275c4240043a1a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
// Take current digit of each elements
//=====================================================================
__global__ void takeCurrentDigit(unsigned int* inData, unsigned int* outDigit,
int n, int currentDigit, int nBins)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
outDigit[i] = (inData[i] >> currentDigit) & (nBins - 1);
}
}
// Histogram on digit
//=====================================================================
__global__ void histogram(unsigned int* inData, unsigned int* hist, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
atomicAdd(&hist[inData[i]], 1);
}
}
// Inclusive scan
//=====================================================================
__global__ void inclusiveScan(unsigned int* inData, unsigned int* outData,
unsigned int* outLastData, int n)
{
// 1. Each block loads data from GMEM to SMEM
// (each thread will load 2 elements)
extern __shared__ unsigned int s_inScanData[]; // It's size will be 2*blockDim.x (elements)
int idx1 = (blockDim.x * blockIdx.x + threadIdx.x) * 2;
int idx2 = (blockDim.x * blockIdx.x + threadIdx.x) * 2 + 1;
if (idx1 < n)
s_inScanData[2*threadIdx.x] = inData[idx1];
if (idx2 < n)
s_inScanData[2*threadIdx.x+1] = inData[idx2];
__syncthreads();
// 2. Each block does scan with data on SMEM
// 2.1. Reduction phase
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int s_idx = (threadIdx.x + 1) * 2 * stride - 1; // So active threads will be consecutive
if (s_idx < 2 * blockDim.x)
s_inScanData[s_idx] += s_inScanData[s_idx - stride];
__syncthreads();
}
// 2.2. Post-reduction phase
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
int s_idx = (threadIdx.x + 1) * 2 * stride - 1 + stride;
if (s_idx < 2 * blockDim.x)
s_inScanData[s_idx] += s_inScanData[s_idx - stride];
__syncthreads();
}
// 3. Each block writes result from SMEM to GMEM
// (each thread will write 2 elements)
if (idx1 < n)
outData[idx1] = s_inScanData[2*threadIdx.x];
if (idx2 < n)
outData[idx2] = s_inScanData[2*threadIdx.x+1];
// 4. Each block write the result of the last thread block
// (for regression if the array has large size)
if (threadIdx.x == (blockDim.x-1) || (blockIdx.x * blockDim.x + threadIdx.x) == (n-1)/2)
if (idx2 < n)
outLastData[blockIdx.x] = s_inScanData[2 * threadIdx.x + 1];
else
outLastData[blockIdx.x] = s_inScanData[2 * threadIdx.x];
}
// Add scanned block sum to the before scanned block sum
//=====================================================================
__global__ void lastScan(unsigned int* odata, unsigned int* sums, const int n)
{
if (blockIdx.x > 0)
{
int idx1 = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int idx2 = (blockIdx.x * blockDim.x + threadIdx.x) * 2 + 1;
if (idx1 < n)
odata[idx1] += sums[blockIdx.x - 1];
if (idx2 < n)
odata[idx2] += sums[blockIdx.x - 1];
}
}
// Inclusive scan on large size array
//=====================================================================
void scanOnLargeSize(dim3 blockSize, dim3 gridSize, unsigned int *in, unsigned int *out, int n)
{
dim3 _gridSize = gridSize;
unsigned int *outlast;
// Allocate device memory
checkCudaErrors(hipMalloc((unsigned int**)&outlast, gridSize.x*sizeof(unsigned int)));
// Launch scan kernel to find sum of each block (each thread block solve 2 value)
hipLaunchKernelGGL(( inclusiveScan), dim3(gridSize), dim3(blockSize), 2*blockSize.x*sizeof(unsigned int), 0, in, out, outlast, n);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Regress until gridSize just contain 1 block
if (_gridSize.x > 1)
{
int nsize = _gridSize.x;
unsigned int *sums;
_gridSize.x = (_gridSize.x-1)/2/blockSize.x+1;
checkCudaErrors(hipMalloc((unsigned int**)&sums, nsize*sizeof(unsigned int)));
scanOnLargeSize(blockSize, _gridSize, outlast, sums, nsize);
// Add scanned block sum (each thread block solve 2 value)
hipLaunchKernelGGL(( lastScan), dim3(gridSize), dim3(blockSize), 0, 0, out, sums, n);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(sums));
}
checkCudaErrors(hipFree(outlast));
}
// Compute relative offset of each digit
//=====================================================================
__global__ void determineRelativeOffset(unsigned int* inData, unsigned int* exScanData,
unsigned int* outData, int n)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n)
{
int nZeros = n - exScanData[n - 1] - inData[n - 1]; // The number of zeros in array
// Compute rank
if (inData[idx] == 0)
outData[idx] = idx - exScanData[idx];
else
outData[idx] = nZeros + exScanData[idx];
}
}
// Scatter values corresponding to its location
//=====================================================================
__global__ void scatter(unsigned int* inData, unsigned int* outData,
unsigned int* inDigit, unsigned int* exScanHist,
unsigned int* offset, int n)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n)
{
int location = exScanHist[inDigit[idx]] + offset[idx];
outData[location] = inData[idx];
}
}
//=====================================================================
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
// for takeCurrentDigit kernel
const dim3 blockDigit(256);
const dim3 gridDigit((numElems-1)/blockDigit.x+1);
// for histogram kernel
const dim3 blockHist(256);
const dim3 gridHist((numElems-1)/blockHist.x+1);
// for inclusive scan kernel
const dim3 blockScan(512);
const dim3 gridScan((numElems-1)/2/blockScan.x+1);
// for compute offset kernel
const dim3 blockOffset(256);
const dim3 gridOffset((numElems-1)/blockOffset.x+1);
// for scatter kernel
const dim3 blockScatter(128);
const dim3 gridScatter((numElems-1)/blockScatter.x+1);
//
unsigned int* d_vals_src = d_inputVals;
unsigned int* d_pos_src = d_inputPos;
unsigned int* d_vals_dst = d_outputVals;
unsigned int* d_pos_dst = d_outputPos;
//
unsigned int nBits = 1;
unsigned int nBins = 1 << nBits;
//
unsigned int *digit, *exScanDigit;
unsigned int *hist;
unsigned int *offset;
checkCudaErrors(hipMalloc((unsigned int**)&digit, numElems*sizeof(unsigned int)));
checkCudaErrors(hipMalloc((unsigned int**)&exScanDigit, (numElems+1)*sizeof(unsigned int)));
checkCudaErrors(hipMalloc((unsigned int**)&hist, (nBins+1)*sizeof(unsigned int)));
checkCudaErrors(hipMalloc((unsigned int**)&offset, numElems*sizeof(unsigned int)));
// Loop from LSD to MSD
// In each loop, soft values according to the current digit (using STABLE sort)
for (unsigned int i = 0; i < 8*sizeof(unsigned int); i += nBits)
{
checkCudaErrors(hipMemset(hist, 0, (nBins+1)*sizeof(unsigned int)));
checkCudaErrors(hipMemset(exScanDigit, 0, (numElems+1)*sizeof(unsigned int)));
// Step 1: Compute histogram
hipLaunchKernelGGL(( takeCurrentDigit), dim3(gridDigit), dim3(blockDigit), 0, 0, d_vals_src, digit, numElems, i, nBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( histogram), dim3(gridHist), dim3(blockHist), 0, 0, digit, hist+1, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 2: Compute relative offset
scanOnLargeSize(blockScan, gridScan, digit, exScanDigit+1, numElems);
hipLaunchKernelGGL(( determineRelativeOffset), dim3(gridOffset), dim3(blockOffset), 0, 0, digit, exScanDigit, offset, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 3: Scatter values to correct locations
hipLaunchKernelGGL(( scatter), dim3(gridScatter), dim3(blockScatter), 0, 0, d_vals_src, d_vals_dst, digit, hist, offset, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( scatter), dim3(gridScatter), dim3(blockScatter), 0, 0, d_pos_src, d_pos_dst, digit, hist, offset, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Swap src and dst
std::swap(d_vals_dst, d_vals_src);
std::swap(d_pos_dst, d_pos_src);
}
// The above loop does an even number of iterations.
// Its means the sorted array we need is store in input buffer,
// so we need to copy from input buffer into output.
checkCudaErrors(hipMemcpy(d_outputVals, d_inputVals,
numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_outputPos, d_inputPos,
numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
// Free device memory
checkCudaErrors(hipFree(digit));
checkCudaErrors(hipFree(exScanDigit));
checkCudaErrors(hipFree(hist));
checkCudaErrors(hipFree(offset));
}
| c785465e95ea1644a1d15eae0275c4240043a1a2.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
// Take current digit of each elements
//=====================================================================
__global__ void takeCurrentDigit(unsigned int* inData, unsigned int* outDigit,
int n, int currentDigit, int nBins)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
outDigit[i] = (inData[i] >> currentDigit) & (nBins - 1);
}
}
// Histogram on digit
//=====================================================================
__global__ void histogram(unsigned int* inData, unsigned int* hist, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
atomicAdd(&hist[inData[i]], 1);
}
}
// Inclusive scan
//=====================================================================
__global__ void inclusiveScan(unsigned int* inData, unsigned int* outData,
unsigned int* outLastData, int n)
{
// 1. Each block loads data from GMEM to SMEM
// (each thread will load 2 elements)
extern __shared__ unsigned int s_inScanData[]; // It's size will be 2*blockDim.x (elements)
int idx1 = (blockDim.x * blockIdx.x + threadIdx.x) * 2;
int idx2 = (blockDim.x * blockIdx.x + threadIdx.x) * 2 + 1;
if (idx1 < n)
s_inScanData[2*threadIdx.x] = inData[idx1];
if (idx2 < n)
s_inScanData[2*threadIdx.x+1] = inData[idx2];
__syncthreads();
// 2. Each block does scan with data on SMEM
// 2.1. Reduction phase
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int s_idx = (threadIdx.x + 1) * 2 * stride - 1; // So active threads will be consecutive
if (s_idx < 2 * blockDim.x)
s_inScanData[s_idx] += s_inScanData[s_idx - stride];
__syncthreads();
}
// 2.2. Post-reduction phase
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
int s_idx = (threadIdx.x + 1) * 2 * stride - 1 + stride;
if (s_idx < 2 * blockDim.x)
s_inScanData[s_idx] += s_inScanData[s_idx - stride];
__syncthreads();
}
// 3. Each block writes result from SMEM to GMEM
// (each thread will write 2 elements)
if (idx1 < n)
outData[idx1] = s_inScanData[2*threadIdx.x];
if (idx2 < n)
outData[idx2] = s_inScanData[2*threadIdx.x+1];
// 4. Each block write the result of the last thread block
// (for regression if the array has large size)
if (threadIdx.x == (blockDim.x-1) || (blockIdx.x * blockDim.x + threadIdx.x) == (n-1)/2)
if (idx2 < n)
outLastData[blockIdx.x] = s_inScanData[2 * threadIdx.x + 1];
else
outLastData[blockIdx.x] = s_inScanData[2 * threadIdx.x];
}
// Add scanned block sum to the before scanned block sum
//=====================================================================
__global__ void lastScan(unsigned int* odata, unsigned int* sums, const int n)
{
if (blockIdx.x > 0)
{
int idx1 = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int idx2 = (blockIdx.x * blockDim.x + threadIdx.x) * 2 + 1;
if (idx1 < n)
odata[idx1] += sums[blockIdx.x - 1];
if (idx2 < n)
odata[idx2] += sums[blockIdx.x - 1];
}
}
// Inclusive scan on large size array
//=====================================================================
void scanOnLargeSize(dim3 blockSize, dim3 gridSize, unsigned int *in, unsigned int *out, int n)
{
dim3 _gridSize = gridSize;
unsigned int *outlast;
// Allocate device memory
checkCudaErrors(cudaMalloc((unsigned int**)&outlast, gridSize.x*sizeof(unsigned int)));
// Launch scan kernel to find sum of each block (each thread block solve 2 value)
inclusiveScan<<<gridSize, blockSize, 2*blockSize.x*sizeof(unsigned int)>>>(in, out, outlast, n);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Regress until gridSize just contain 1 block
if (_gridSize.x > 1)
{
int nsize = _gridSize.x;
unsigned int *sums;
_gridSize.x = (_gridSize.x-1)/2/blockSize.x+1;
checkCudaErrors(cudaMalloc((unsigned int**)&sums, nsize*sizeof(unsigned int)));
scanOnLargeSize(blockSize, _gridSize, outlast, sums, nsize);
// Add scanned block sum (each thread block solve 2 value)
lastScan<<<gridSize, blockSize>>>(out, sums, n);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(sums));
}
checkCudaErrors(cudaFree(outlast));
}
// Compute relative offset of each digit
//=====================================================================
__global__ void determineRelativeOffset(unsigned int* inData, unsigned int* exScanData,
unsigned int* outData, int n)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n)
{
int nZeros = n - exScanData[n - 1] - inData[n - 1]; // The number of zeros in array
// Compute rank
if (inData[idx] == 0)
outData[idx] = idx - exScanData[idx];
else
outData[idx] = nZeros + exScanData[idx];
}
}
// Scatter values corresponding to its location
//=====================================================================
__global__ void scatter(unsigned int* inData, unsigned int* outData,
unsigned int* inDigit, unsigned int* exScanHist,
unsigned int* offset, int n)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n)
{
int location = exScanHist[inDigit[idx]] + offset[idx];
outData[location] = inData[idx];
}
}
//=====================================================================
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
// for takeCurrentDigit kernel
const dim3 blockDigit(256);
const dim3 gridDigit((numElems-1)/blockDigit.x+1);
// for histogram kernel
const dim3 blockHist(256);
const dim3 gridHist((numElems-1)/blockHist.x+1);
// for inclusive scan kernel
const dim3 blockScan(512);
const dim3 gridScan((numElems-1)/2/blockScan.x+1);
// for compute offset kernel
const dim3 blockOffset(256);
const dim3 gridOffset((numElems-1)/blockOffset.x+1);
// for scatter kernel
const dim3 blockScatter(128);
const dim3 gridScatter((numElems-1)/blockScatter.x+1);
//
unsigned int* d_vals_src = d_inputVals;
unsigned int* d_pos_src = d_inputPos;
unsigned int* d_vals_dst = d_outputVals;
unsigned int* d_pos_dst = d_outputPos;
//
unsigned int nBits = 1;
unsigned int nBins = 1 << nBits;
//
unsigned int *digit, *exScanDigit;
unsigned int *hist;
unsigned int *offset;
checkCudaErrors(cudaMalloc((unsigned int**)&digit, numElems*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((unsigned int**)&exScanDigit, (numElems+1)*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((unsigned int**)&hist, (nBins+1)*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((unsigned int**)&offset, numElems*sizeof(unsigned int)));
// Loop from LSD to MSD
// In each loop, soft values according to the current digit (using STABLE sort)
for (unsigned int i = 0; i < 8*sizeof(unsigned int); i += nBits)
{
checkCudaErrors(cudaMemset(hist, 0, (nBins+1)*sizeof(unsigned int)));
checkCudaErrors(cudaMemset(exScanDigit, 0, (numElems+1)*sizeof(unsigned int)));
// Step 1: Compute histogram
takeCurrentDigit<<<gridDigit, blockDigit>>>(d_vals_src, digit, numElems, i, nBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
histogram<<<gridHist, blockHist>>>(digit, hist+1, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 2: Compute relative offset
scanOnLargeSize(blockScan, gridScan, digit, exScanDigit+1, numElems);
determineRelativeOffset<<<gridOffset, blockOffset>>>(digit, exScanDigit, offset, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 3: Scatter values to correct locations
scatter<<<gridScatter, blockScatter>>>(d_vals_src, d_vals_dst, digit, hist, offset, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
scatter<<<gridScatter, blockScatter>>>(d_pos_src, d_pos_dst, digit, hist, offset, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Swap src and dst
std::swap(d_vals_dst, d_vals_src);
std::swap(d_pos_dst, d_pos_src);
}
// The above loop does an even number of iterations.
// Its means the sorted array we need is store in input buffer,
// so we need to copy from input buffer into output.
checkCudaErrors(cudaMemcpy(d_outputVals, d_inputVals,
numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos, d_inputPos,
numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
// Free device memory
checkCudaErrors(cudaFree(digit));
checkCudaErrors(cudaFree(exScanDigit));
checkCudaErrors(cudaFree(hist));
checkCudaErrors(cudaFree(offset));
}
|
d147940412e75927ad99f522a4eb24aeb17df1a3.hip | // !!! This is a file automatically generated by hipify!!!
/*
Single Author info:
yjkamdar Yash J Kamdar
Group info:
vphadke Vandan V Phadke
angodse Anupam N Godse
*/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
/**************************************
* void __cudaSafeCall(hipError_t err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
int tpdt(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
/*9-point evolution of the grid using the GPU*/
__global__ void evolve9ptgpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t){
/*Calculate the index of the current grid point calculation*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int totalLength = n*n;
/*Boudary conditions for the grid*/
if (idx >= 0 && idx < totalLength) {
if((idx % n == 0) || ((idx + 1) % n == 0) || idx < n || idx > n*(n-1) - 1)
{
un[idx] = 0;
}
/*Calculate grid point value using the 9-point scale*/
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx -n - 1] + uc[idx - n + 1] +
uc[idx -1 + n] + uc[idx + 1 + n]) - 5 * uc[idx])/(h * h)
+ (-1 * __expf(-TSCALE * t) * pebbles[idx]));
}
}
}
__global__ void evolvegpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int totalLength = n*n;
if (idx >= 0 && idx < totalLength) {
if((idx % n == 0) || ((idx + 1) % n == 0) || idx < n || idx > n*(n-1) - 1)
{
un[idx] = 0;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] - 4 * uc[idx])/(h * h) + (__expf(-TSCALE * t) * pebbles[idx]));
}
}
}
void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
hipEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
int nBlocks = n / nthreads;
double t, dt;
double *uc, *uo;
double *un_d, *uc_d, *uo_d, *pebbles_d;
uc = (double*)malloc(sizeof(double) * n * n);
uo = (double*)malloc(sizeof(double) * n * n);
t = 0.;
dt = h / 2.;
memcpy(uo, u0, sizeof(double) * n * n);
memcpy(uc, u1, sizeof(double) * n * n);
/* Set up device timers */
CUDA_CALL(hipSetDevice(0));
CUDA_CALL(hipEventCreate(&kstart));
CUDA_CALL(hipEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
hipMalloc((void **) &un_d, sizeof(double) * n * n);
hipMalloc((void **) &uc_d, sizeof(double) * n * n);
hipMalloc((void **) &uo_d, sizeof(double) * n * n);
hipMalloc((void **) &pebbles_d, sizeof(double) * n * n);
hipMemcpy(pebbles_d, pebbles, sizeof(double) * n * n, hipMemcpyHostToDevice);
/* Start GPU computation timer */
CUDA_CALL(hipEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
while(1)
{
hipMemcpy(uo_d, uo, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipMemcpy(uc_d, uc, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( evolve9ptgpu), dim3(nBlocks*nBlocks), dim3(nthreads*nthreads), 0, 0, un_d, uc_d, uo_d, pebbles_d, n, h, dt, t);
//evolvegpu<<<nBlocks*nBlocks, nthreads*nthreads>>>(un_d, uc_d, uo_d, pebbles_d, n, h, dt, t);
hipMemcpy(uo, uc_d, sizeof(double) * n * n, hipMemcpyDeviceToHost);
hipMemcpy(uc, un_d, sizeof(double) * n * n, hipMemcpyDeviceToHost);
if(!tpdt(&t,dt,end_time)) break;
}
memcpy(u, uc, sizeof(double) * n * n);
/* Stop GPU computation timer */
CUDA_CALL(hipEventRecord(kstop, 0));
CUDA_CALL(hipEventSynchronize(kstop));
CUDA_CALL(hipEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
free(uc);
free(uo);
hipFree(un_d);
hipFree(uc_d);
hipFree(uo_d);
/* timer cleanup */
CUDA_CALL(hipEventDestroy(kstart));
CUDA_CALL(hipEventDestroy(kstop));
}
| d147940412e75927ad99f522a4eb24aeb17df1a3.cu | /*
Single Author info:
yjkamdar Yash J Kamdar
Group info:
vphadke Vandan V Phadke
angodse Anupam N Godse
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
int tpdt(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
/*9-point evolution of the grid using the GPU*/
__global__ void evolve9ptgpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t){
/*Calculate the index of the current grid point calculation*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int totalLength = n*n;
/*Boudary conditions for the grid*/
if (idx >= 0 && idx < totalLength) {
if((idx % n == 0) || ((idx + 1) % n == 0) || idx < n || idx > n*(n-1) - 1)
{
un[idx] = 0;
}
/*Calculate grid point value using the 9-point scale*/
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx -n - 1] + uc[idx - n + 1] +
uc[idx -1 + n] + uc[idx + 1 + n]) - 5 * uc[idx])/(h * h)
+ (-1 * __expf(-TSCALE * t) * pebbles[idx]));
}
}
}
__global__ void evolvegpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int totalLength = n*n;
if (idx >= 0 && idx < totalLength) {
if((idx % n == 0) || ((idx + 1) % n == 0) || idx < n || idx > n*(n-1) - 1)
{
un[idx] = 0;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] - 4 * uc[idx])/(h * h) + (__expf(-TSCALE * t) * pebbles[idx]));
}
}
}
void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
int nBlocks = n / nthreads;
double t, dt;
double *uc, *uo;
double *un_d, *uc_d, *uo_d, *pebbles_d;
uc = (double*)malloc(sizeof(double) * n * n);
uo = (double*)malloc(sizeof(double) * n * n);
t = 0.;
dt = h / 2.;
memcpy(uo, u0, sizeof(double) * n * n);
memcpy(uc, u1, sizeof(double) * n * n);
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
cudaMalloc((void **) &un_d, sizeof(double) * n * n);
cudaMalloc((void **) &uc_d, sizeof(double) * n * n);
cudaMalloc((void **) &uo_d, sizeof(double) * n * n);
cudaMalloc((void **) &pebbles_d, sizeof(double) * n * n);
cudaMemcpy(pebbles_d, pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice);
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
while(1)
{
cudaMemcpy(uo_d, uo, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(uc_d, uc, sizeof(double) * n * n, cudaMemcpyHostToDevice);
evolve9ptgpu<<<nBlocks*nBlocks, nthreads*nthreads>>>(un_d, uc_d, uo_d, pebbles_d, n, h, dt, t);
//evolvegpu<<<nBlocks*nBlocks, nthreads*nthreads>>>(un_d, uc_d, uo_d, pebbles_d, n, h, dt, t);
cudaMemcpy(uo, uc_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
cudaMemcpy(uc, un_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
if(!tpdt(&t,dt,end_time)) break;
}
memcpy(u, uc, sizeof(double) * n * n);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
free(uc);
free(uo);
cudaFree(un_d);
cudaFree(uc_d);
cudaFree(uo_d);
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
659e659be81d9c0523bb087eabd315604d2ade77.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rgb_to_yuv_convert_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include <memory>
#include "util_cuda.h"
#include "../rgb_to_yuv_convert_layer.h"
#include "../neural_network_exception.h"
#define w_r 0.299F
#define w_b 0.114F
#define w_g (1.0F - w_r - w_b)
#define u_max 0.436F
#define v_max 0.615F
#define u_mult (u_max / (1.0F - w_b))
#define v_mult (v_max / (1.0F - w_r))
#define reverse_r_v_mult ((1.0F - w_r) / v_max)
#define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g))
#define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g))
#define reverse_b_u_mult ((1.0F - w_b) / u_max)
namespace nnforge
{
namespace cuda
{
__global__ void rgb_to_yuv_convert_upd_kernel(
const float * __restrict input,
float * __restrict output,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float red = input[red_and_y_offset];
float green = input[green_and_u_offset];
float blue = input[blue_and_v_offset];
float y = w_r * red + w_g * green + w_b * blue;
float u = u_mult * (blue - y);
float v = v_mult * (red - y);
output[red_and_y_offset] = y;
output[green_and_u_offset] = u;
output[blue_and_v_offset] = v;
}
}
__global__ void rgb_to_yuv_convert_deriviative_upd_kernel(
float * __restrict input_errors,
const float * __restrict output_errors,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
bool add_update_to_destination,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float y = output_errors[red_and_y_offset];
float u = output_errors[green_and_u_offset];
float v = output_errors[blue_and_v_offset];
float red = y + reverse_r_v_mult * v;
float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v;
float blue = y + reverse_b_u_mult * u;
if (add_update_to_destination)
{
input_errors[red_and_y_offset] += red;
input_errors[green_and_u_offset] += green;
input_errors[blue_and_v_offset] += blue;
}
else
{
input_errors[red_and_y_offset] = red;
input_errors[green_and_u_offset] = green;
input_errors[blue_and_v_offset] = blue;
}
}
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
if ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) && ((const float *)*output_buffer != (const float *)*input_buffers[1]))
{
cuda_util::copy_buffer(
*cuda_config,
*input_buffers[0],
*output_buffer,
output_elem_count_per_entry * entry_count,
stream_id);
}
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
hipLaunchKernelGGL(( rgb_to_yuv_convert_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_buffers[0],
*output_buffer,
*schema_data[0],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
if (((const float *)*output_errors_buffer != (const float *)*input_errors_buffer)
&& ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) || add_update_to_destination))
{
cuda_util::copy_buffer(
*cuda_config,
*output_errors_buffer,
*input_errors_buffer,
output_elem_count_per_entry * entry_count,
stream_id);
}
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
hipLaunchKernelGGL(( rgb_to_yuv_convert_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*output_errors_buffer,
*schema_data[0],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
color_feature_map_config_count,
add_update_to_destination,
entry_count);
}
int rgb_to_yuv_convert_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
return 0;
}
bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return false;
}
bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
void rgb_to_yuv_convert_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = std::dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema);
color_feature_map_config_count = static_cast<int>(layer_derived->color_feature_map_config_list.size());
}
}
}
| 659e659be81d9c0523bb087eabd315604d2ade77.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rgb_to_yuv_convert_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include <memory>
#include "util_cuda.h"
#include "../rgb_to_yuv_convert_layer.h"
#include "../neural_network_exception.h"
#define w_r 0.299F
#define w_b 0.114F
#define w_g (1.0F - w_r - w_b)
#define u_max 0.436F
#define v_max 0.615F
#define u_mult (u_max / (1.0F - w_b))
#define v_mult (v_max / (1.0F - w_r))
#define reverse_r_v_mult ((1.0F - w_r) / v_max)
#define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g))
#define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g))
#define reverse_b_u_mult ((1.0F - w_b) / u_max)
namespace nnforge
{
namespace cuda
{
__global__ void rgb_to_yuv_convert_upd_kernel(
const float * __restrict input,
float * __restrict output,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float red = input[red_and_y_offset];
float green = input[green_and_u_offset];
float blue = input[blue_and_v_offset];
float y = w_r * red + w_g * green + w_b * blue;
float u = u_mult * (blue - y);
float v = v_mult * (red - y);
output[red_and_y_offset] = y;
output[green_and_u_offset] = u;
output[blue_and_v_offset] = v;
}
}
__global__ void rgb_to_yuv_convert_deriviative_upd_kernel(
float * __restrict input_errors,
const float * __restrict output_errors,
const int * __restrict color_feature_map_config_list,
int feature_map_count,
int elem_count_per_feature_map,
int color_feature_map_config_count,
bool add_update_to_destination,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count))
{
int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3;
int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset];
int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1];
int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2];
int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id;
int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset;
int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset;
int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset;
float y = output_errors[red_and_y_offset];
float u = output_errors[green_and_u_offset];
float v = output_errors[blue_and_v_offset];
float red = y + reverse_r_v_mult * v;
float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v;
float blue = y + reverse_b_u_mult * u;
if (add_update_to_destination)
{
input_errors[red_and_y_offset] += red;
input_errors[green_and_u_offset] += green;
input_errors[blue_and_v_offset] += blue;
}
else
{
input_errors[red_and_y_offset] = red;
input_errors[green_and_u_offset] = green;
input_errors[blue_and_v_offset] = blue;
}
}
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
if ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) && ((const float *)*output_buffer != (const float *)*input_buffers[1]))
{
cuda_util::copy_buffer(
*cuda_config,
*input_buffers[0],
*output_buffer,
output_elem_count_per_entry * entry_count,
stream_id);
}
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
rgb_to_yuv_convert_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_buffers[0],
*output_buffer,
*schema_data[0],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
}
void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
if (((const float *)*output_errors_buffer != (const float *)*input_errors_buffer)
&& ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) || add_update_to_destination))
{
cuda_util::copy_buffer(
*cuda_config,
*output_errors_buffer,
*input_errors_buffer,
output_elem_count_per_entry * entry_count,
stream_id);
}
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
color_feature_map_config_count,
entry_count);
rgb_to_yuv_convert_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*output_errors_buffer,
*schema_data[0],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
color_feature_map_config_count,
add_update_to_destination,
entry_count);
}
int rgb_to_yuv_convert_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
return 0;
}
bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return false;
}
bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
void rgb_to_yuv_convert_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = std::dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema);
color_feature_map_config_count = static_cast<int>(layer_derived->color_feature_map_config_list.size());
}
}
}
|
694ec52399b82127eabd3d112b890fe2992c3d56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include "anyoption.h" // options parsing
/*
How to compile:
nvcc "filename" anyoption.cpp
*/
// sum array of integers sequentially (using cache)
void arraySum(int *arr, int arraySize, int *sumValue, int numCycles) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < arraySize; i++) {
tempSum += arr[i];
}
}
*sumValue = tempSum;
}
// sum array of integers non-sequentially (not using cache)
void arraySumStride(int *arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[j];
}
}
}
*sumValue = tempSum;
}
// sum array on multiple threads on GPU
__global__ void arraySumGPU(int *arr, int arraySize, int *sumValue, int numCycles) {
int index = threadIdx.x; // current thread ID
int numThreads = blockDim.x;
int numElem = arraySize / numThreads; // # of elements per thread
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = index * numElem; (i < arraySize) && (i < (index + 1) * numElem); i++) {
tempSum += arr[i];
}
}
//printf("-tempSum thread %d = %d;\n", index, tempSum);
*sumValue += tempSum;
//printf("Sum value (thread): %d\n", *sumValue);
}
// sum array of integers non-sequentially (not using cache) on GPU
__global__ void arraySumStrideGPU(int *arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[j];
}
}
}
*sumValue = tempSum;
}
// allocate memory using "malloc" if on CPU, "hipMallocManaged" if on GPU
void genericMalloc(void **ptr, int size, int onCPU) {
if (onCPU) {
*ptr = malloc(size);
}
else {
hipMallocManaged(ptr, size);
}
}
// free memory, using either free or hipFree
void genericFree(void *ptr, int onCPU) {
if (onCPU) {
free(ptr);
}
else {
hipFree(ptr);
}
}
int main(int argc, char *argv[])
{
int arraySize = 1 << 23; // 8M integers
int usesCache = 1; // 0: don't use cache, 1: use cache (default)
int onCPU = 0; // allocate memory with CUDA functions
int cacheLineSize = 64 / sizeof(int); // # integers per cache line on CPU
int cacheLineSizeGPU = 128 / sizeof(int); // # integers per cache line on GPU
int numCycles = 500; // default # of repetitions on CPU
int numCyclesGPU = 30; // default # of repetitions on GPU
int numThreads = 1; // number of threads running on kernel
int maxNumThreads = 1024;
int *arr, *sumValue;
printf("Default options: sum on CPU, use cache, arraySize= %d integers.\n", arraySize);
printf("-- Default number of repetitions: %d (CPU), %d (GPU).\n", numCycles, numCyclesGPU);
printf("-- max number of threads= %d.\n", maxNumThreads);
// parse options
AnyOption *opt = new AnyOption();
// set usage
opt->addUsage("Options usage: ");
opt->addUsage("");
opt->addUsage(" --no_cache \tDon't use cache ");
opt->addUsage(" --rep <rep>\tNumber of repetitions ");
opt->addUsage(" --size <size>\tArray size (* 2^20) elements");
opt->addUsage("");
opt->printUsage();
// set options
opt->setFlag("no_cache");
opt->setOption("rep");
opt->setOption("size");
// Process commandline and get the options
opt->processCommandArgs(argc, argv);
// Get option values
if (opt->getFlag("no_cache")) {
usesCache = 0;
printf("no_cache flag set\n");
}
if (opt->getValue("rep") != NULL) {
numCycles = atoi(opt->getValue("rep"));
numCyclesGPU = numCycles;
printf("Number of repetitions set to: %d\n", numCycles);
}
if (opt->getValue("size") != NULL) {
arraySize = (1 << 20) * atoi(opt->getValue("size"));
printf("Array size set to: %dM integers\n", arraySize);
}
delete opt;
// options parsed
// allocate memory
genericMalloc((void**)&arr, arraySize * sizeof(int), onCPU);
genericMalloc((void**)&sumValue, arraySize * sizeof(int), onCPU);
// initialize array on CPU
for (int i = 0; i < arraySize; i++) {
arr[i] = 1;
}
*sumValue = 0;
// Time measurement
int elapsedClocks = 0, startClock = 0, endClock = 0;
double elapsedTimeCPU, avgElapsedTimeCPU, elapsedTimeGPU, avgElapsedTimeGPU;
// Measure CPU execution time
startClock = clock();
if (usesCache) {
arraySum(arr, arraySize, sumValue, numCycles);
}
else {
arraySumStride(arr, arraySize, sumValue, numCycles, cacheLineSize);
}
endClock = clock();
// Print CPU results
printf("startClock CPU: %d, endClock CPU: %d\n", startClock, endClock);
elapsedClocks = endClock - startClock;
printf("elapsedClock CPU: %d\n", elapsedClocks);
elapsedTimeCPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
avgElapsedTimeCPU = elapsedTimeCPU / numCycles;
printf("CPU: sum= %d. \nElapsed time= %fs. Average execution time= %fs.\n\n", *sumValue, elapsedTimeCPU, avgElapsedTimeCPU);
// Prefetch data to GPU
*sumValue = 0;
int device = -1;
hipGetDevice(&device);
hipDeviceSynchronize();
hipMemPrefetchAsync(arr, arraySize * sizeof(int), device, NULL);
hipMemPrefetchAsync(sumValue, sizeof(int), device, NULL);
hipDeviceSynchronize();
// Measure GPU execution time
avgElapsedTimeGPU = avgElapsedTimeCPU + 10; // run on GPU at least once
while ((avgElapsedTimeCPU < avgElapsedTimeGPU) && (numThreads <= maxNumThreads)) {
*sumValue = 0;
hipMemPrefetchAsync(sumValue, sizeof(int), device, NULL);
hipDeviceSynchronize();
startClock = clock();
if (usesCache) {
arraySumGPU << <1, numThreads >> > (arr, arraySize, sumValue, numCyclesGPU);
}
else {
arraySumStrideGPU << <1, 1 >> > (arr, arraySize, sumValue, numCyclesGPU, cacheLineSizeGPU);
}
hipDeviceSynchronize();
endClock = clock();
// Print GPU results
elapsedClocks = endClock - startClock;
elapsedTimeGPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
avgElapsedTimeGPU = elapsedTimeGPU / numCyclesGPU;
printf("GPU: %d threads.\n-- Elapsed time= %fs. Average execution time= %fs.\n", numThreads, elapsedTimeGPU, avgElapsedTimeGPU);
if (avgElapsedTimeCPU < avgElapsedTimeGPU) {
// double thread numbers
numThreads *= 2;
}
}
// print comparison results
if (avgElapsedTimeCPU >= avgElapsedTimeGPU) {
printf("GPU surpassed CPU when running %d threads\n. CPU time: %f, GPU time: %f.\n\n", numThreads, avgElapsedTimeCPU, avgElapsedTimeGPU);
}
else{
printf("CPU is still faster.\n\n");
}
// free allocated memory
genericFree(arr, onCPU);
genericFree(sumValue, onCPU);
return 0;
}
| 694ec52399b82127eabd3d112b890fe2992c3d56.cu | #include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include "anyoption.h" // options parsing
/*
How to compile:
nvcc "filename" anyoption.cpp
*/
// sum array of integers sequentially (using cache)
void arraySum(int *arr, int arraySize, int *sumValue, int numCycles) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < arraySize; i++) {
tempSum += arr[i];
}
}
*sumValue = tempSum;
}
// sum array of integers non-sequentially (not using cache)
void arraySumStride(int *arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[j];
}
}
}
*sumValue = tempSum;
}
// sum array on multiple threads on GPU
__global__ void arraySumGPU(int *arr, int arraySize, int *sumValue, int numCycles) {
int index = threadIdx.x; // current thread ID
int numThreads = blockDim.x;
int numElem = arraySize / numThreads; // # of elements per thread
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = index * numElem; (i < arraySize) && (i < (index + 1) * numElem); i++) {
tempSum += arr[i];
}
}
//printf("-tempSum thread %d = %d;\n", index, tempSum);
*sumValue += tempSum;
//printf("Sum value (thread): %d\n", *sumValue);
}
// sum array of integers non-sequentially (not using cache) on GPU
__global__ void arraySumStrideGPU(int *arr, int arraySize, int *sumValue, int numCycles, int cacheLineSize) {
int tempSum = 0;
for (int k = 0; k < numCycles; k++) {
tempSum = 0;
for (int i = 0; i < cacheLineSize; i++) {
for (int j = i; j < arraySize; j += cacheLineSize) {
tempSum += arr[j];
}
}
}
*sumValue = tempSum;
}
// allocate memory using "malloc" if on CPU, "cudaMallocManaged" if on GPU
void genericMalloc(void **ptr, int size, int onCPU) {
if (onCPU) {
*ptr = malloc(size);
}
else {
cudaMallocManaged(ptr, size);
}
}
// free memory, using either free or cudaFree
void genericFree(void *ptr, int onCPU) {
if (onCPU) {
free(ptr);
}
else {
cudaFree(ptr);
}
}
int main(int argc, char *argv[])
{
int arraySize = 1 << 23; // 8M integers
int usesCache = 1; // 0: don't use cache, 1: use cache (default)
int onCPU = 0; // allocate memory with CUDA functions
int cacheLineSize = 64 / sizeof(int); // # integers per cache line on CPU
int cacheLineSizeGPU = 128 / sizeof(int); // # integers per cache line on GPU
int numCycles = 500; // default # of repetitions on CPU
int numCyclesGPU = 30; // default # of repetitions on GPU
int numThreads = 1; // number of threads running on kernel
int maxNumThreads = 1024;
int *arr, *sumValue;
printf("Default options: sum on CPU, use cache, arraySize= %d integers.\n", arraySize);
printf("-- Default number of repetitions: %d (CPU), %d (GPU).\n", numCycles, numCyclesGPU);
printf("-- max number of threads= %d.\n", maxNumThreads);
// parse options
AnyOption *opt = new AnyOption();
// set usage
opt->addUsage("Options usage: ");
opt->addUsage("");
opt->addUsage(" --no_cache \tDon't use cache ");
opt->addUsage(" --rep <rep>\tNumber of repetitions ");
opt->addUsage(" --size <size>\tArray size (* 2^20) elements");
opt->addUsage("");
opt->printUsage();
// set options
opt->setFlag("no_cache");
opt->setOption("rep");
opt->setOption("size");
// Process commandline and get the options
opt->processCommandArgs(argc, argv);
// Get option values
if (opt->getFlag("no_cache")) {
usesCache = 0;
printf("no_cache flag set\n");
}
if (opt->getValue("rep") != NULL) {
numCycles = atoi(opt->getValue("rep"));
numCyclesGPU = numCycles;
printf("Number of repetitions set to: %d\n", numCycles);
}
if (opt->getValue("size") != NULL) {
arraySize = (1 << 20) * atoi(opt->getValue("size"));
printf("Array size set to: %dM integers\n", arraySize);
}
delete opt;
// options parsed
// allocate memory
genericMalloc((void**)&arr, arraySize * sizeof(int), onCPU);
genericMalloc((void**)&sumValue, arraySize * sizeof(int), onCPU);
// initialize array on CPU
for (int i = 0; i < arraySize; i++) {
arr[i] = 1;
}
*sumValue = 0;
// Time measurement
int elapsedClocks = 0, startClock = 0, endClock = 0;
double elapsedTimeCPU, avgElapsedTimeCPU, elapsedTimeGPU, avgElapsedTimeGPU;
// Measure CPU execution time
startClock = clock();
if (usesCache) {
arraySum(arr, arraySize, sumValue, numCycles);
}
else {
arraySumStride(arr, arraySize, sumValue, numCycles, cacheLineSize);
}
endClock = clock();
// Print CPU results
printf("startClock CPU: %d, endClock CPU: %d\n", startClock, endClock);
elapsedClocks = endClock - startClock;
printf("elapsedClock CPU: %d\n", elapsedClocks);
elapsedTimeCPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
avgElapsedTimeCPU = elapsedTimeCPU / numCycles;
printf("CPU: sum= %d. \nElapsed time= %fs. Average execution time= %fs.\n\n", *sumValue, elapsedTimeCPU, avgElapsedTimeCPU);
// Prefetch data to GPU
*sumValue = 0;
int device = -1;
cudaGetDevice(&device);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(arr, arraySize * sizeof(int), device, NULL);
cudaMemPrefetchAsync(sumValue, sizeof(int), device, NULL);
cudaDeviceSynchronize();
// Measure GPU execution time
avgElapsedTimeGPU = avgElapsedTimeCPU + 10; // run on GPU at least once
while ((avgElapsedTimeCPU < avgElapsedTimeGPU) && (numThreads <= maxNumThreads)) {
*sumValue = 0;
cudaMemPrefetchAsync(sumValue, sizeof(int), device, NULL);
cudaDeviceSynchronize();
startClock = clock();
if (usesCache) {
arraySumGPU << <1, numThreads >> > (arr, arraySize, sumValue, numCyclesGPU);
}
else {
arraySumStrideGPU << <1, 1 >> > (arr, arraySize, sumValue, numCyclesGPU, cacheLineSizeGPU);
}
cudaDeviceSynchronize();
endClock = clock();
// Print GPU results
elapsedClocks = endClock - startClock;
elapsedTimeGPU = ((double)(elapsedClocks)) / (CLOCKS_PER_SEC);
avgElapsedTimeGPU = elapsedTimeGPU / numCyclesGPU;
printf("GPU: %d threads.\n-- Elapsed time= %fs. Average execution time= %fs.\n", numThreads, elapsedTimeGPU, avgElapsedTimeGPU);
if (avgElapsedTimeCPU < avgElapsedTimeGPU) {
// double thread numbers
numThreads *= 2;
}
}
// print comparison results
if (avgElapsedTimeCPU >= avgElapsedTimeGPU) {
printf("GPU surpassed CPU when running %d threads\n. CPU time: %f, GPU time: %f.\n\n", numThreads, avgElapsedTimeCPU, avgElapsedTimeGPU);
}
else{
printf("CPU is still faster.\n\n");
}
// free allocated memory
genericFree(arr, onCPU);
genericFree(sumValue, onCPU);
return 0;
}
|
c8917213920f4b1beb05a007f39623689d70505b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef WITH_CUDA
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/eye_kernel_util.h"
namespace oneflow {
namespace user_op {
template<typename T>
__global__ void EyeForwardGpuKernel(const int64_t cols, const int64_t rows, T* out) {
SetOneInDiag(cols, rows, out);
}
template<typename T>
struct EyeFunctor<DeviceType::kGPU, T> final {
void operator()(ep::Stream* stream, const int64_t& cols, const int64_t& rows, T* out) {
RUN_CUDA_KERNEL((EyeForwardGpuKernel<T>), stream, rows, cols, rows, out);
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_EYE_FUNCTOR, (DeviceType::kGPU), RANGE_DATA_TYPE_SEQ);
} // namespace user_op
} // namespace oneflow
#endif // End WITH_CUDA
| c8917213920f4b1beb05a007f39623689d70505b.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef WITH_CUDA
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/eye_kernel_util.h"
namespace oneflow {
namespace user_op {
template<typename T>
__global__ void EyeForwardGpuKernel(const int64_t cols, const int64_t rows, T* out) {
SetOneInDiag(cols, rows, out);
}
template<typename T>
struct EyeFunctor<DeviceType::kGPU, T> final {
void operator()(ep::Stream* stream, const int64_t& cols, const int64_t& rows, T* out) {
RUN_CUDA_KERNEL((EyeForwardGpuKernel<T>), stream, rows, cols, rows, out);
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_EYE_FUNCTOR, (DeviceType::kGPU), RANGE_DATA_TYPE_SEQ);
} // namespace user_op
} // namespace oneflow
#endif // End WITH_CUDA
|
7d685a393d12e63ae72e3edbebbd33d4b7df05b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define NUM_THREADS 256
#define GridDimMaxY 65536
template <typename T, bool kNFirst>
__global__ void ChannelShuffleNCHWKernel(
const int G,
const int K,
const int HxW,
const T* X,
T* Y)
{
const int C = G * K;
const int n = kNFirst ? blockIdx.x : blockIdx.y;
const int s = kNFirst ? blockIdx.y : blockIdx.x;
const int g = blockIdx.z % G;
const int k = blockIdx.z / G;
const int offset = s * NUM_THREADS + threadIdx.x;
if (offset < HxW) {
Y[(n * C + blockIdx.z) * HxW + offset] =
__ldg(X + (n * C + g * K + k) * HxW + offset);
}
}
template <typename T, int kSharedSize>
__global__ void
ChannelShuffleNHWCKernel(const int G, const int K, const T* X, T* Y)
{
__shared__ T sdata[kSharedSize];
const int C = G * K;
const int offset = blockIdx.x * C;
for (int i = threadIdx.x; i < C; i += blockDim.x) {
sdata[i] = __ldg(X + offset + i);
}
__syncthreads();
for (int i = threadIdx.x; i < C; i += blockDim.x) {
const int g = i % G;
const int k = i / G;
Y[offset + i] = sdata[g * K + k];
}
}
template <typename T>
bool ChannelShuffleNCHW (T *X, int N, int C, int G, int numel, T *Y,
long &time, int repeat)
{
if (C % G != 0 || numel < N * C) return false;
const int K = C / G;
const int HxW = numel / (N * C);
const int S = (HxW + NUM_THREADS - 1) / NUM_THREADS;
auto start = std::chrono::steady_clock::now();
if (N <= GridDimMaxY) {
const dim3 dim_grid(S, N, C);
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>)
, dim3(dim_grid), dim3(NUM_THREADS), 0, 0, G, K, HxW, X, Y);
} else {
const dim3 dim_grid(N, S, C);
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>)
, dim3(dim_grid), dim3(NUM_THREADS), 0, 0, G, K, HxW, X, Y);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return true;
}
template <typename T>
bool ChannelShuffleNHWC (T *X, int N, int C, int G, int numel, T *Y,
long &time, int repeat)
{
if (C % G != 0 || numel < N * C) return false;
const int K = C / G;
const int HxW = numel / (N * C);
const int outer_size = N * HxW;
auto start = std::chrono::steady_clock::now();
if (C <= 32) {
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>)
, dim3(outer_size), dim3(NUM_THREADS), 0, 0, G, K, X, Y);
} else if (C <= 128) {
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>)
, dim3(outer_size), dim3(NUM_THREADS), 0, 0, G, K, X, Y);
} else if (C <= 512) {
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>)
, dim3(outer_size), dim3(NUM_THREADS), 0, 0, G, K, X, Y);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return true;
}
int main(int argc, char* argv[])
{
if (argc != 5) {
printf("Usage: %s <group size> <width> <height> <repeat>\n", argv[0]);
return 1;
}
const int G = atoi(argv[1]);
const int W = atoi(argv[2]);
const int H = atoi(argv[3]);
const int repeat = atoi(argv[4]);
long time;
// limited by the global device memory
for (int N = 1; N <= 64; N = N * 4) {
for (int C = 32; C <= 512; C = C * 4) {
printf("\n(N=%d C=%d W=%d H=%d)\n", N, C, W, H);
const int numel = N * C * W * H; // assume no integer overflow
size_t data_size_bytes = numel * sizeof(float);
float *d_X, *d_Y;
auto errorX = hipMalloc((void**)&d_X, data_size_bytes);
auto errorY = hipMalloc((void**)&d_Y, data_size_bytes);
if (errorX != hipSuccess || errorY != hipSuccess) {
if (errorX == hipSuccess) hipFree(d_X);
if (errorY == hipSuccess) hipFree(d_Y);
printf("Device memory allocation failed. Exit\n");
goto end;
}
auto ok = ChannelShuffleNHWC (d_X, N, C, G, numel, d_Y, time, repeat);
if (ok)
printf("Average time of channel shuffle (nhwc): %f (ms)\n", (time * 1e-6f) / repeat);
else
printf("Failed to execute channel shuffle (nhwc)\n");
ok = ChannelShuffleNCHW (d_X, N, C, G, numel, d_Y, time, repeat);
if (ok)
printf("Average time of channel shuffle (nchw): %f (ms)\n", (time * 1e-6f) / repeat);
else
printf("Failed to execute channel shuffle (nchw)\n");
hipFree(d_X);
hipFree(d_Y);
}
}
end: return 0;
}
| 7d685a393d12e63ae72e3edbebbd33d4b7df05b6.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define NUM_THREADS 256
#define GridDimMaxY 65536
template <typename T, bool kNFirst>
__global__ void ChannelShuffleNCHWKernel(
const int G,
const int K,
const int HxW,
const T* X,
T* Y)
{
const int C = G * K;
const int n = kNFirst ? blockIdx.x : blockIdx.y;
const int s = kNFirst ? blockIdx.y : blockIdx.x;
const int g = blockIdx.z % G;
const int k = blockIdx.z / G;
const int offset = s * NUM_THREADS + threadIdx.x;
if (offset < HxW) {
Y[(n * C + blockIdx.z) * HxW + offset] =
__ldg(X + (n * C + g * K + k) * HxW + offset);
}
}
template <typename T, int kSharedSize>
__global__ void
ChannelShuffleNHWCKernel(const int G, const int K, const T* X, T* Y)
{
__shared__ T sdata[kSharedSize];
const int C = G * K;
const int offset = blockIdx.x * C;
for (int i = threadIdx.x; i < C; i += blockDim.x) {
sdata[i] = __ldg(X + offset + i);
}
__syncthreads();
for (int i = threadIdx.x; i < C; i += blockDim.x) {
const int g = i % G;
const int k = i / G;
Y[offset + i] = sdata[g * K + k];
}
}
template <typename T>
bool ChannelShuffleNCHW (T *X, int N, int C, int G, int numel, T *Y,
long &time, int repeat)
{
if (C % G != 0 || numel < N * C) return false;
const int K = C / G;
const int HxW = numel / (N * C);
const int S = (HxW + NUM_THREADS - 1) / NUM_THREADS;
auto start = std::chrono::steady_clock::now();
if (N <= GridDimMaxY) {
const dim3 dim_grid(S, N, C);
for (int i = 0; i < repeat; i++)
ChannelShuffleNCHWKernel<float, false>
<<<dim_grid, NUM_THREADS>>>(G, K, HxW, X, Y);
} else {
const dim3 dim_grid(N, S, C);
for (int i = 0; i < repeat; i++)
ChannelShuffleNCHWKernel<float, true>
<<<dim_grid, NUM_THREADS>>>(G, K, HxW, X, Y);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return true;
}
template <typename T>
bool ChannelShuffleNHWC (T *X, int N, int C, int G, int numel, T *Y,
long &time, int repeat)
{
if (C % G != 0 || numel < N * C) return false;
const int K = C / G;
const int HxW = numel / (N * C);
const int outer_size = N * HxW;
auto start = std::chrono::steady_clock::now();
if (C <= 32) {
for (int i = 0; i < repeat; i++)
ChannelShuffleNHWCKernel<float, 32>
<<<outer_size, NUM_THREADS>>>(G, K, X, Y);
} else if (C <= 128) {
for (int i = 0; i < repeat; i++)
ChannelShuffleNHWCKernel<float, 128>
<<<outer_size, NUM_THREADS>>>(G, K, X, Y);
} else if (C <= 512) {
for (int i = 0; i < repeat; i++)
ChannelShuffleNHWCKernel<float, 512>
<<<outer_size, NUM_THREADS>>>(G, K, X, Y);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return true;
}
int main(int argc, char* argv[])
{
if (argc != 5) {
printf("Usage: %s <group size> <width> <height> <repeat>\n", argv[0]);
return 1;
}
const int G = atoi(argv[1]);
const int W = atoi(argv[2]);
const int H = atoi(argv[3]);
const int repeat = atoi(argv[4]);
long time;
// limited by the global device memory
for (int N = 1; N <= 64; N = N * 4) {
for (int C = 32; C <= 512; C = C * 4) {
printf("\n(N=%d C=%d W=%d H=%d)\n", N, C, W, H);
const int numel = N * C * W * H; // assume no integer overflow
size_t data_size_bytes = numel * sizeof(float);
float *d_X, *d_Y;
auto errorX = hipMalloc((void**)&d_X, data_size_bytes);
auto errorY = hipMalloc((void**)&d_Y, data_size_bytes);
if (errorX != hipSuccess || errorY != hipSuccess) {
if (errorX == hipSuccess) hipFree(d_X);
if (errorY == hipSuccess) hipFree(d_Y);
printf("Device memory allocation failed. Exit\n");
goto end;
}
auto ok = ChannelShuffleNHWC (d_X, N, C, G, numel, d_Y, time, repeat);
if (ok)
printf("Average time of channel shuffle (nhwc): %f (ms)\n", (time * 1e-6f) / repeat);
else
printf("Failed to execute channel shuffle (nhwc)\n");
ok = ChannelShuffleNCHW (d_X, N, C, G, numel, d_Y, time, repeat);
if (ok)
printf("Average time of channel shuffle (nchw): %f (ms)\n", (time * 1e-6f) / repeat);
else
printf("Failed to execute channel shuffle (nchw)\n");
hipFree(d_X);
hipFree(d_Y);
}
}
end: return 0;
}
|
cf3980d7905c0d170a4517acc3046af18e4a4a6e.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _KMEANS_CUDA_KERNEL_H_
#define _KMEANS_CUDA_KERNEL_H_
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "kmeans.h"
// FIXME: Make this a runtime selectable variable!
#define ASSUMED_NR_CLUSTERS 32
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]...
texture<float, 1, hipReadModeElementType> t_features;
// t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1]
texture<float, 1, hipReadModeElementType> t_features_flipped;
texture<float, 1, hipReadModeElementType> t_clusters;
__constant__ float c_clusters[ASSUMED_NR_CLUSTERS*34]; /* constant memory for cluster centers */
/* ----------------- invert_mapping() --------------------- */
/* inverts data array from row-major to column-major.
[p0,dim0][p0,dim1][p0,dim2] ...
[p1,dim0][p1,dim1][p1,dim2] ...
[p2,dim0][p2,dim1][p2,dim2] ...
to
[dim0,p0][dim0,p1][dim0,p2] ...
[dim1,p0][dim1,p1][dim1,p2] ...
[dim2,p0][dim2,p1][dim2,p2] ...
*/
__global__ void invert_mapping(float *input, /* original */
float *output, /* inverted */
int npoints, /* npoints */
int nfeatures) /* nfeatures */
{
int point_id = threadIdx.x + blockDim.x*blockIdx.x; /* id of thread */
int i;
if(point_id < npoints){
for(i=0;i<nfeatures;i++)
output[point_id + npoints*i] = input[point_id*nfeatures + i];
}
return;
}
/* ----------------- invert_mapping() end --------------------- */
/* to turn on the GPU delta and center reduction */
//#define GPU_DELTA_REDUCTION
//#define GPU_NEW_CENTER_REDUCTION
/* ----------------- kmeansPoint() --------------------- */
/* find the index of nearest cluster centers and change membership*/
__global__ void
kmeansPoint(float *features, /* in: [npoints*nfeatures] */
int nfeatures,
int npoints,
int nclusters,
int *membership,
float *clusters,
float *block_clusters,
int *block_deltas)
{
// block ID
const unsigned int block_id = gridDim.x*blockIdx.y+blockIdx.x;
// point/thread ID
const unsigned int point_id = block_id*blockDim.x*blockDim.y + threadIdx.x;
int index = -1;
if (point_id < npoints)
{
int i, j;
float min_dist = FLT_MAX;
float dist; /* distance square between a point to cluster center */
/* find the cluster center id with min distance to pt */
for (i=0; i<nclusters; i++) {
int cluster_base_index = i*nfeatures; /* base index of cluster centers for inverted array */
float ans=0.0; /* Euclidean distance sqaure */
for (j=0; j < nfeatures; j++)
{
int addr = point_id + j*npoints; /* appropriate index of data point */
float diff = (tex1Dfetch(t_features,addr) -
c_clusters[cluster_base_index + j]);
/* distance between a data point to cluster centers */
ans += diff*diff; /* sum of squares */
}
dist = ans;
/* see if distance is smaller than previous ones:
if so, change minimum distance and save index of cluster center */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
}
#ifdef GPU_DELTA_REDUCTION
// count how many points are now closer to a different cluster center
__shared__ int deltas[THREADS_PER_BLOCK];
if(threadIdx.x < THREADS_PER_BLOCK) {
deltas[threadIdx.x] = 0;
}
#endif
if (point_id < npoints)
{
#ifdef GPU_DELTA_REDUCTION
/* if membership changes, increase delta by 1 */
if (membership[point_id] != index) {
deltas[threadIdx.x] = 1;
}
#endif
/* assign the membership to object point_id */
membership[point_id] = index;
}
#ifdef GPU_DELTA_REDUCTION
// make sure all the deltas have finished writing to shared memory
__syncthreads();
// now let's count them
// primitve reduction follows
unsigned int threadids_participating = THREADS_PER_BLOCK / 2;
for(;threadids_participating > 1; threadids_participating /= 2) {
if(threadIdx.x < threadids_participating) {
deltas[threadIdx.x] += deltas[threadIdx.x + threadids_participating];
}
__syncthreads();
}
if(threadIdx.x < 1) {deltas[threadIdx.x] += deltas[threadIdx.x + 1];}
__syncthreads();
// propagate number of changes to global counter
if(threadIdx.x == 0) {
block_deltas[blockIdx.y * gridDim.x + blockIdx.x] = deltas[0];
//printf("original id: %d, modified: %d\n", blockIdx.y*gridDim.x+blockIdx.x, blockIdx.x);
}
#endif
#ifdef GPU_NEW_CENTER_REDUCTION
int center_id = threadIdx.x / nfeatures;
int dim_id = threadIdx.x - nfeatures*center_id;
__shared__ int new_center_ids[THREADS_PER_BLOCK];
new_center_ids[threadIdx.x] = index;
__syncthreads();
/***
determine which dimension calculte the sum for
mapping of threads is
center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...
***/
int new_base_index = (point_id - threadIdx.x)*nfeatures + dim_id;
float accumulator = 0.f;
if(threadIdx.x < nfeatures * nclusters) {
// accumulate over all the elements of this threadblock
for(int i = 0; i< (THREADS_PER_BLOCK); i++) {
float val = tex1Dfetch(t_features_flipped,new_base_index+i*nfeatures);
if(new_center_ids[i] == center_id)
accumulator += val;
}
// now store the sum for this threadblock
/***
mapping to global array is
block0[center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...]block1[...]...
***/
block_clusters[(blockIdx.y*gridDim.x + blockIdx.x) * nclusters * nfeatures + threadIdx.x] = accumulator;
}
#endif
}
#endif // #ifndef _KMEANS_CUDA_KERNEL_H_
| cf3980d7905c0d170a4517acc3046af18e4a4a6e.cu | #ifndef _KMEANS_CUDA_KERNEL_H_
#define _KMEANS_CUDA_KERNEL_H_
#include <stdio.h>
#include <cuda.h>
#include "kmeans.h"
// FIXME: Make this a runtime selectable variable!
#define ASSUMED_NR_CLUSTERS 32
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]...
texture<float, 1, cudaReadModeElementType> t_features;
// t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1]
texture<float, 1, cudaReadModeElementType> t_features_flipped;
texture<float, 1, cudaReadModeElementType> t_clusters;
__constant__ float c_clusters[ASSUMED_NR_CLUSTERS*34]; /* constant memory for cluster centers */
/* ----------------- invert_mapping() --------------------- */
/* inverts data array from row-major to column-major.
[p0,dim0][p0,dim1][p0,dim2] ...
[p1,dim0][p1,dim1][p1,dim2] ...
[p2,dim0][p2,dim1][p2,dim2] ...
to
[dim0,p0][dim0,p1][dim0,p2] ...
[dim1,p0][dim1,p1][dim1,p2] ...
[dim2,p0][dim2,p1][dim2,p2] ...
*/
__global__ void invert_mapping(float *input, /* original */
float *output, /* inverted */
int npoints, /* npoints */
int nfeatures) /* nfeatures */
{
int point_id = threadIdx.x + blockDim.x*blockIdx.x; /* id of thread */
int i;
if(point_id < npoints){
for(i=0;i<nfeatures;i++)
output[point_id + npoints*i] = input[point_id*nfeatures + i];
}
return;
}
/* ----------------- invert_mapping() end --------------------- */
/* to turn on the GPU delta and center reduction */
//#define GPU_DELTA_REDUCTION
//#define GPU_NEW_CENTER_REDUCTION
/* ----------------- kmeansPoint() --------------------- */
/* find the index of nearest cluster centers and change membership*/
__global__ void
kmeansPoint(float *features, /* in: [npoints*nfeatures] */
int nfeatures,
int npoints,
int nclusters,
int *membership,
float *clusters,
float *block_clusters,
int *block_deltas)
{
// block ID
const unsigned int block_id = gridDim.x*blockIdx.y+blockIdx.x;
// point/thread ID
const unsigned int point_id = block_id*blockDim.x*blockDim.y + threadIdx.x;
int index = -1;
if (point_id < npoints)
{
int i, j;
float min_dist = FLT_MAX;
float dist; /* distance square between a point to cluster center */
/* find the cluster center id with min distance to pt */
for (i=0; i<nclusters; i++) {
int cluster_base_index = i*nfeatures; /* base index of cluster centers for inverted array */
float ans=0.0; /* Euclidean distance sqaure */
for (j=0; j < nfeatures; j++)
{
int addr = point_id + j*npoints; /* appropriate index of data point */
float diff = (tex1Dfetch(t_features,addr) -
c_clusters[cluster_base_index + j]);
/* distance between a data point to cluster centers */
ans += diff*diff; /* sum of squares */
}
dist = ans;
/* see if distance is smaller than previous ones:
if so, change minimum distance and save index of cluster center */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
}
#ifdef GPU_DELTA_REDUCTION
// count how many points are now closer to a different cluster center
__shared__ int deltas[THREADS_PER_BLOCK];
if(threadIdx.x < THREADS_PER_BLOCK) {
deltas[threadIdx.x] = 0;
}
#endif
if (point_id < npoints)
{
#ifdef GPU_DELTA_REDUCTION
/* if membership changes, increase delta by 1 */
if (membership[point_id] != index) {
deltas[threadIdx.x] = 1;
}
#endif
/* assign the membership to object point_id */
membership[point_id] = index;
}
#ifdef GPU_DELTA_REDUCTION
// make sure all the deltas have finished writing to shared memory
__syncthreads();
// now let's count them
// primitve reduction follows
unsigned int threadids_participating = THREADS_PER_BLOCK / 2;
for(;threadids_participating > 1; threadids_participating /= 2) {
if(threadIdx.x < threadids_participating) {
deltas[threadIdx.x] += deltas[threadIdx.x + threadids_participating];
}
__syncthreads();
}
if(threadIdx.x < 1) {deltas[threadIdx.x] += deltas[threadIdx.x + 1];}
__syncthreads();
// propagate number of changes to global counter
if(threadIdx.x == 0) {
block_deltas[blockIdx.y * gridDim.x + blockIdx.x] = deltas[0];
//printf("original id: %d, modified: %d\n", blockIdx.y*gridDim.x+blockIdx.x, blockIdx.x);
}
#endif
#ifdef GPU_NEW_CENTER_REDUCTION
int center_id = threadIdx.x / nfeatures;
int dim_id = threadIdx.x - nfeatures*center_id;
__shared__ int new_center_ids[THREADS_PER_BLOCK];
new_center_ids[threadIdx.x] = index;
__syncthreads();
/***
determine which dimension calculte the sum for
mapping of threads is
center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...
***/
int new_base_index = (point_id - threadIdx.x)*nfeatures + dim_id;
float accumulator = 0.f;
if(threadIdx.x < nfeatures * nclusters) {
// accumulate over all the elements of this threadblock
for(int i = 0; i< (THREADS_PER_BLOCK); i++) {
float val = tex1Dfetch(t_features_flipped,new_base_index+i*nfeatures);
if(new_center_ids[i] == center_id)
accumulator += val;
}
// now store the sum for this threadblock
/***
mapping to global array is
block0[center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...]block1[...]...
***/
block_clusters[(blockIdx.y*gridDim.x + blockIdx.x) * nclusters * nfeatures + threadIdx.x] = accumulator;
}
#endif
}
#endif // #ifndef _KMEANS_CUDA_KERNEL_H_
|
27e0691c7e17a9ceba2dac139a9b7b3b1b138533.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__host__ __device__ __forceinline__ Mul_8uc4_32f() {}
__host__ __device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f&) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__host__ __device__ __forceinline__ Mul_16sc4_32f() {}
__host__ __device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f&) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__host__ __device__ __forceinline__ Mul() {}
__host__ __device__ __forceinline__ Mul(const Mul&) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
__host__ explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
}
#endif // CUDA_DISABLER
| 27e0691c7e17a9ceba2dac139a9b7b3b1b138533.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__host__ __device__ __forceinline__ Mul_8uc4_32f() {}
__host__ __device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f&) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__host__ __device__ __forceinline__ Mul_16sc4_32f() {}
__host__ __device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f&) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__host__ __device__ __forceinline__ Mul() {}
__host__ __device__ __forceinline__ Mul(const Mul&) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
__host__ explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
f217f3dde93002fdc63e770db8ce01d6f3018d2e.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by mustafa on 6/3/20.
//
#include "../headers/MC_RNG.cuh"
#include "../headers/MC_Math.cuh"
__device__ float MC_RNG::generate(hiprandState_t *globalState, int const i) {
hiprandState_t localState = globalState[i];
float random = hiprand_uniform(&localState);
globalState[i] = localState;
return random;
}
__device__ float MC_RNG::getRandomNumber(hiprandState_t *states, int i) {
float step;
step = generate(states, i);
return step;
}
__device__ MC_Vector MC_RNG::getRandomDirection(hiprandState_t *globalState, int const i) {
float u = generate(globalState, i);
float v = generate(globalState, i);
float theta = 2 * (float) M_PI * u;
float phi = acos(1 - 2 * v);
// Transforming into the cartesian space
float x = sin(phi) * cos(theta);
float y = sin(phi) * sin(theta);
float z = cos(phi);
return MCMath::normalized(MC_Vector(x, y, z));
}
__device__ MC_Point MC_RNG::getRandomPoint(hiprandState_t *globalState, int const i) {
float u = generate(globalState, i);
float v = generate(globalState, i);
float theta = 2 * (float) M_PI * u;
float phi = acos(1 - 2 * v);
// Transforming into the cartesian space
float x = sin(phi) * cos(theta);
float y = sin(phi) * sin(theta);
float z = cos(phi);
return {x, y, z};
}
__device__ void MC_RNG::roulette(MC_Photon &photon, float const chance, hiprandState_t *globalState, int const i) {
if (generate(globalState, i) >= chance) {
photon.terminate();
photon.setState(MC_Photon::TERMINATED);
} else {
photon.boost(chance);
}
}
__device__ float MC_RNG::getRandomStep(hiprandState_t *states, int i, float coefficient) {
return ((-1 * log(MC_RNG::getRandomNumber(states, i))) / coefficient);
}
__device__ MC_Path MC_RNG::getRandomPath(hiprandState_t *states, int i, MC_Point origin, float coefficient) {
return {origin, MC_RNG::getRandomDirection(states, i),
MC_RNG::getRandomStep(states, i, coefficient)};
}
| f217f3dde93002fdc63e770db8ce01d6f3018d2e.cu | //
// Created by mustafa on 6/3/20.
//
#include "../headers/MC_RNG.cuh"
#include "../headers/MC_Math.cuh"
__device__ float MC_RNG::generate(curandState *globalState, int const i) {
curandState localState = globalState[i];
float random = curand_uniform(&localState);
globalState[i] = localState;
return random;
}
__device__ float MC_RNG::getRandomNumber(curandState *states, int i) {
float step;
step = generate(states, i);
return step;
}
__device__ MC_Vector MC_RNG::getRandomDirection(curandState *globalState, int const i) {
float u = generate(globalState, i);
float v = generate(globalState, i);
float theta = 2 * (float) M_PI * u;
float phi = acos(1 - 2 * v);
// Transforming into the cartesian space
float x = sin(phi) * cos(theta);
float y = sin(phi) * sin(theta);
float z = cos(phi);
return MCMath::normalized(MC_Vector(x, y, z));
}
__device__ MC_Point MC_RNG::getRandomPoint(curandState *globalState, int const i) {
float u = generate(globalState, i);
float v = generate(globalState, i);
float theta = 2 * (float) M_PI * u;
float phi = acos(1 - 2 * v);
// Transforming into the cartesian space
float x = sin(phi) * cos(theta);
float y = sin(phi) * sin(theta);
float z = cos(phi);
return {x, y, z};
}
__device__ void MC_RNG::roulette(MC_Photon &photon, float const chance, curandState *globalState, int const i) {
if (generate(globalState, i) >= chance) {
photon.terminate();
photon.setState(MC_Photon::TERMINATED);
} else {
photon.boost(chance);
}
}
__device__ float MC_RNG::getRandomStep(curandState *states, int i, float coefficient) {
return ((-1 * log(MC_RNG::getRandomNumber(states, i))) / coefficient);
}
__device__ MC_Path MC_RNG::getRandomPath(curandState *states, int i, MC_Point origin, float coefficient) {
return {origin, MC_RNG::getRandomDirection(states, i),
MC_RNG::getRandomStep(states, i, coefficient)};
}
|
axdy.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/axdy.hpp"
template <typename T>
__global__ void axdy_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = y[incy * index] / (alpha * x[incx * index]);
}
}
template <>
__global__ void axdy_kernel(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCdivf(y[incy * index], (cuCmulf(alpha, x[incx * index])));
}
}
template <>
__global__ void axdy_kernel(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCdiv(y[incy * index], (cuCmul(alpha, x[incx * index])));
}
}
template <typename T>
__global__ void axdy_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = y[incy * index] / x[incx * index];
}
}
template <typename T>
void axdy_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axdy_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( axdy_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, alpha, x, incx, y, incy);
hipDeviceSynchronize();
}
template <typename T>
void axdy_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axdy_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( axdy_kernel1<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, x, incx, y, incy);
hipDeviceSynchronize();
}
void egblas_saxdy(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
axdy_kernel1_run(n, x, incx, y, incy);
} else {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_daxdy(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
axdy_kernel1_run(n, x, incx, y, incy);
} else {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_caxdy(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
void egblas_zaxdy(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
| axdy.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/axdy.hpp"
template <typename T>
__global__ void axdy_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = y[incy * index] / (alpha * x[incx * index]);
}
}
template <>
__global__ void axdy_kernel(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCdivf(y[incy * index], (cuCmulf(alpha, x[incx * index])));
}
}
template <>
__global__ void axdy_kernel(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = cuCdiv(y[incy * index], (cuCmul(alpha, x[incx * index])));
}
}
template <typename T>
__global__ void axdy_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = y[incy * index] / x[incx * index];
}
}
template <typename T>
void axdy_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axdy_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
axdy_kernel<T><<<gridSize, blockSize>>>(n, alpha, x, incx, y, incy);
cudaDeviceSynchronize();
}
template <typename T>
void axdy_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, axdy_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
axdy_kernel1<T><<<gridSize, blockSize>>>(n, x, incx, y, incy);
cudaDeviceSynchronize();
}
void egblas_saxdy(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
axdy_kernel1_run(n, x, incx, y, incy);
} else {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_daxdy(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
axdy_kernel1_run(n, x, incx, y, incy);
} else {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_caxdy(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
void egblas_zaxdy(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
axdy_kernel_run(n, alpha, x, incx, y, incy);
}
|
5ff19617e8a41570bd778b837355cf97f3174721.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcNormalThresholdWHA.cu
*
* Created on: 02-09-2013
* Author: Kamil Szewc (kamil.szewc@gmail.com)
*/
#include "../../sph.h"
__global__ void calcNormalThresholdWHA(Particle *p, Parameters *par)
{
uint tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
if (par->T_NORMAL_VECTOR_TRESHOLD == 1)
{
if (p[tid].n.w > (0.01f*par->I_H)) p[tid].na = 1;
else p[tid].na = 0;
}
else
{
if (p[tid].n.w > 0.0) p[tid].na = 1;
else p[tid].na = 0;
}
tid += blockDim.x * gridDim.x;
}
}
| 5ff19617e8a41570bd778b837355cf97f3174721.cu | /*
* calcNormalThresholdWHA.cu
*
* Created on: 02-09-2013
* Author: Kamil Szewc (kamil.szewc@gmail.com)
*/
#include "../../sph.h"
__global__ void calcNormalThresholdWHA(Particle *p, Parameters *par)
{
uint tid = threadIdx.x + blockIdx.x*blockDim.x;
while (tid < par->N) {
if (par->T_NORMAL_VECTOR_TRESHOLD == 1)
{
if (p[tid].n.w > (0.01f*par->I_H)) p[tid].na = 1;
else p[tid].na = 0;
}
else
{
if (p[tid].n.w > 0.0) p[tid].na = 1;
else p[tid].na = 0;
}
tid += blockDim.x * gridDim.x;
}
}
|
aa9c3e8caf8c29bef0beed723602ee176d88883a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
// includes, kernels
#include <scanLargeArray_kernel.cu>
#include <assert.h>
inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int
floorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define BLOCK_SIZE 256
float** g_scanBlockSums;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (float**) malloc(level * sizeof(float*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
cutilSafeCall(hipMalloc((void**) &g_scanBlockSums[level++],
numBlocks * sizeof(float)));
}
numElts = numBlocks;
} while (numElts > 1);
cutilCheckMsg("preallocBlockSums");
}
void deallocBlockSums()
{
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
{
hipFree(g_scanBlockSums[i]);
}
cutilCheckMsg("deallocBlockSums");
free((void**)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void prescanArrayRecursive(float *outArray,
const float *inArray,
int numElements,
int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks =
max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock)
{
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock =
sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize =
sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1)
{
assert(g_numEltsAllocated >= numElements);
}
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// make sure there are no CUDA errors before we start
cutilCheckMsg("prescanArrayRecursive before kernels");
// execute the scan
if (numBlocks > 1)
{
hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray,
inArray,
g_scanBlockSums[level],
numThreads * 2, 0, 0);
cutilCheckMsg("prescanWithBlockSums");
if (np2LastBlock)
{
hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0,
outArray, inArray, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
cutilCheckMsg("prescanNP2WithBlockSums");
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(g_scanBlockSums[level],
g_scanBlockSums[level],
numBlocks,
level+1);
hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray,
g_scanBlockSums[level],
numElements - numEltsLastBlock,
0, 0);
cutilCheckMsg("uniformAdd");
if (np2LastBlock)
{
hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray,
g_scanBlockSums[level],
numEltsLastBlock,
numBlocks - 1,
numElements - numEltsLastBlock);
cutilCheckMsg("uniformAdd");
}
}
else if (isPowerOfTwo(numElements))
{
hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray,
0, numThreads * 2, 0, 0);
cutilCheckMsg("prescan");
}
else
{
hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray,
0, numElements, 0, 0);
cutilCheckMsg("prescanNP2");
}
}
void prescanArray(float *outArray, float *inArray, int numElements)
{
prescanArrayRecursive(outArray, inArray, numElements, 0);
}
#endif // _PRESCAN_CU_
| aa9c3e8caf8c29bef0beed723602ee176d88883a.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
// includes, kernels
#include <scanLargeArray_kernel.cu>
#include <assert.h>
inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int
floorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define BLOCK_SIZE 256
float** g_scanBlockSums;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
void preallocBlockSums(unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (float**) malloc(level * sizeof(float*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
cutilSafeCall(cudaMalloc((void**) &g_scanBlockSums[level++],
numBlocks * sizeof(float)));
}
numElts = numBlocks;
} while (numElts > 1);
cutilCheckMsg("preallocBlockSums");
}
void deallocBlockSums()
{
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
{
cudaFree(g_scanBlockSums[i]);
}
cutilCheckMsg("deallocBlockSums");
free((void**)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void prescanArrayRecursive(float *outArray,
const float *inArray,
int numElements,
int level)
{
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks =
max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock)
{
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock =
sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize =
sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1)
{
assert(g_numEltsAllocated >= numElements);
}
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// make sure there are no CUDA errors before we start
cutilCheckMsg("prescanArrayRecursive before kernels");
// execute the scan
if (numBlocks > 1)
{
prescan<true, false><<< grid, threads, sharedMemSize >>>(outArray,
inArray,
g_scanBlockSums[level],
numThreads * 2, 0, 0);
cutilCheckMsg("prescanWithBlockSums");
if (np2LastBlock)
{
prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>>
(outArray, inArray, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
cutilCheckMsg("prescanNP2WithBlockSums");
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(g_scanBlockSums[level],
g_scanBlockSums[level],
numBlocks,
level+1);
uniformAdd<<< grid, threads >>>(outArray,
g_scanBlockSums[level],
numElements - numEltsLastBlock,
0, 0);
cutilCheckMsg("uniformAdd");
if (np2LastBlock)
{
uniformAdd<<< 1, numThreadsLastBlock >>>(outArray,
g_scanBlockSums[level],
numEltsLastBlock,
numBlocks - 1,
numElements - numEltsLastBlock);
cutilCheckMsg("uniformAdd");
}
}
else if (isPowerOfTwo(numElements))
{
prescan<false, false><<< grid, threads, sharedMemSize >>>(outArray, inArray,
0, numThreads * 2, 0, 0);
cutilCheckMsg("prescan");
}
else
{
prescan<false, true><<< grid, threads, sharedMemSize >>>(outArray, inArray,
0, numElements, 0, 0);
cutilCheckMsg("prescanNP2");
}
}
void prescanArray(float *outArray, float *inArray, int numElements)
{
prescanArrayRecursive(outArray, inArray, numElements, 0);
}
#endif // _PRESCAN_CU_
|
7f67e13b525e6b43243e44a9e638e87b302b0c42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "kernels/reduce.cuh"
#include "pagerank_push_cuda.cuh"
static const int __tb_PageRank = TB_SIZE;
__global__ void ResetGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_value[src] = 0;
p_nout[src] = 0;
p_residual[src] = 0;
p_delta[src] = 0;
}
}
// FP: "10 -> 11;
}
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, uint32_t * p_nout, float * p_residual, DynamicBitset& bitset_nout)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
uint32_t num_edges;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_residual[src] = local_alpha;
num_edges = graph.getOutDegree(src);
atomicTestAdd(&p_nout[src], num_edges);
bitset_nout.set(src);
}
}
// FP: "11 -> 12;
}
__global__ void PageRank_delta(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
float residual_old;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_residual[src] > local_tolerance)
{
residual_old = p_residual[src];
p_residual[src] = 0;
p_value[src] += residual_old;
if (p_nout[src] > 0)
{
p_delta[src] = residual_old * (1 - local_alpha) / p_nout[src];
}
}
}
}
// FP: "15 -> 16;
}
__global__ void PageRank(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, float * p_residual, DynamicBitset& bitset_residual, HGAccumulator<unsigned int> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_PageRank;
float _delta;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
// FP: "7 -> 8;
DGAccumulator_accum.thread_entry();
// FP: "8 -> 9;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "9 -> 10;
bool pop = src < __end;
// FP: "10 -> 11;
if (pop)
{
if (p_delta[src] > 0)
{
_delta = p_delta[src];
p_delta[src] = 0;
DGAccumulator_accum.reduce( 1);
}
else
{
pop = false;
}
}
// FP: "17 -> 18;
// FP: "20 -> 21;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "21 -> 22;
__shared__ struct { float _delta; } _np_closure [TB_SIZE];
// FP: "22 -> 23;
_np_closure[threadIdx.x]._delta = _delta;
// FP: "23 -> 24;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "26 -> 27;
// FP: "27 -> 28;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "28 -> 29;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "29 -> 30;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
while (true)
{
// FP: "34 -> 35;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "39 -> 40;
__syncthreads();
// FP: "40 -> 41;
break;
}
// FP: "42 -> 43;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "45 -> 46;
__syncthreads();
// FP: "46 -> 47;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "47 -> 48;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "50 -> 51;
assert(nps.tb.src < __kernel_tb_size);
_delta = _np_closure[nps.tb.src]._delta;
// FP: "51 -> 52;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type nbr;
nbr = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_residual[dst], _delta);
bitset_residual.set(dst);
}
}
// FP: "59 -> 60;
__syncthreads();
}
// FP: "61 -> 62;
// FP: "62 -> 63;
{
const int warpid = threadIdx.x / 32;
// FP: "63 -> 64;
const int _np_laneid = cub::LaneId();
// FP: "64 -> 65;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
_delta = _np_closure[nps.warp.src[warpid]]._delta;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type nbr;
nbr = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_residual[dst], _delta);
bitset_residual.set(dst);
}
}
}
// FP: "82 -> 83;
__syncthreads();
// FP: "83 -> 84;
}
// FP: "84 -> 85;
__syncthreads();
// FP: "85 -> 86;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "86 -> 87;
while (_np.work())
{
// FP: "87 -> 88;
int _np_i =0;
// FP: "88 -> 89;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "89 -> 90;
__syncthreads();
// FP: "90 -> 91;
// FP: "91 -> 92;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type nbr;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
_delta = _np_closure[nps.fg.src[_np_i]]._delta;
nbr= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_residual[dst], _delta);
bitset_residual.set(dst);
}
}
// FP: "100 -> 101;
_np.execute_round_done(ITSIZE);
// FP: "101 -> 102;
__syncthreads();
}
// FP: "103 -> 104;
assert(threadIdx.x < __kernel_tb_size);
_delta = _np_closure[threadIdx.x]._delta;
}
// FP: "106 -> 107;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "107 -> 108;
}
__global__ void PageRankSanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float local_tolerance, float * p_residual, float * p_value, HGAccumulator<uint64_t> DGAccumulator_residual_over_tolerance, HGAccumulator<float> DGAccumulator_sum, HGAccumulator<float> DGAccumulator_sum_residual, HGReduceMax<float> max_residual, HGReduceMax<float> max_value, HGReduceMin<float> min_residual, HGReduceMin<float> min_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_residual_over_tolerance_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_residual_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage max_residual_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage max_value_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage min_residual_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage min_value_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_residual_over_tolerance.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGAccumulator_sum.thread_entry();
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_sum_residual.thread_entry();
// FP: "7 -> 8;
// FP: "8 -> 9;
max_residual.thread_entry();
// FP: "9 -> 10;
// FP: "10 -> 11;
max_value.thread_entry();
// FP: "11 -> 12;
// FP: "12 -> 13;
min_residual.thread_entry();
// FP: "13 -> 14;
// FP: "14 -> 15;
min_value.thread_entry();
// FP: "15 -> 16;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
max_value.reduce(p_value[src]);
min_value.reduce(p_value[src]);
max_residual.reduce(p_residual[src]);
min_residual.reduce(p_residual[src]);
DGAccumulator_sum.reduce( p_value[src]);
DGAccumulator_sum.reduce( p_residual[src]);
if (p_residual[src] > local_tolerance)
{
DGAccumulator_residual_over_tolerance.reduce( 1);
}
}
}
// FP: "29 -> 30;
DGAccumulator_residual_over_tolerance.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_residual_over_tolerance_ts);
// FP: "30 -> 31;
DGAccumulator_sum.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "31 -> 32;
DGAccumulator_sum_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_residual_ts);
// FP: "32 -> 33;
max_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(max_residual_ts);
// FP: "33 -> 34;
max_value.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(max_value_ts);
// FP: "34 -> 35;
min_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(min_residual_ts);
// FP: "35 -> 36;
min_value.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(min_value_ts);
// FP: "36 -> 37;
}
void ResetGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( ResetGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void ResetGraph_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void ResetGraph_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void ResetGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_alpha, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->nout.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, local_alpha, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( PageRank_delta) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_alpha, local_tolerance, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PageRank_delta_allNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->gg.nnodes, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_masterNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_nodesWithEdges_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->numNodesWithEdges, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( PageRank) , dim3(blocks), dim3(__tb_PageRank), 0, 0, ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->residual.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void PageRank_allNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PageRank_masterNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PageRank_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_residual_over_tolerance;
HGAccumulator<float> _DGAccumulator_sum;
HGAccumulator<float> _DGAccumulator_sum_residual;
HGReduceMax<float> _max_residual;
HGReduceMax<float> _max_value;
HGReduceMin<float> _min_residual;
HGReduceMin<float> _min_value;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_residual_over_toleranceval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_residual_over_toleranceval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_residual_over_tolerance.rv = DGAccumulator_residual_over_toleranceval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<float> DGAccumulator_sumval = Shared<float>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "12 -> 13;
Shared<float> DGAccumulator_sum_residualval = Shared<float>(1);
// FP: "13 -> 14;
// FP: "14 -> 15;
*(DGAccumulator_sum_residualval.cpu_wr_ptr()) = 0;
// FP: "15 -> 16;
_DGAccumulator_sum_residual.rv = DGAccumulator_sum_residualval.gpu_wr_ptr();
// FP: "16 -> 17;
Shared<float> max_residualval = Shared<float>(1);
// FP: "17 -> 18;
// FP: "18 -> 19;
*(max_residualval.cpu_wr_ptr()) = 0;
// FP: "19 -> 20;
_max_residual.rv = max_residualval.gpu_wr_ptr();
// FP: "20 -> 21;
Shared<float> max_valueval = Shared<float>(1);
// FP: "21 -> 22;
// FP: "22 -> 23;
*(max_valueval.cpu_wr_ptr()) = 0;
// FP: "23 -> 24;
_max_value.rv = max_valueval.gpu_wr_ptr();
// FP: "24 -> 25;
Shared<float> min_residualval = Shared<float>(1);
// FP: "25 -> 26;
// FP: "26 -> 27;
*(min_residualval.cpu_wr_ptr()) = 0;
// FP: "27 -> 28;
_min_residual.rv = min_residualval.gpu_wr_ptr();
// FP: "28 -> 29;
Shared<float> min_valueval = Shared<float>(1);
// FP: "29 -> 30;
// FP: "30 -> 31;
*(min_valueval.cpu_wr_ptr()) = 0;
// FP: "31 -> 32;
_min_value.rv = min_valueval.gpu_wr_ptr();
// FP: "32 -> 33;
hipLaunchKernelGGL(( PageRankSanity) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_tolerance, ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_residual_over_tolerance, _DGAccumulator_sum, _DGAccumulator_sum_residual, _max_residual, _max_value, _min_residual, _min_value);
// FP: "33 -> 34;
check_cuda_kernel;
// FP: "34 -> 35;
DGAccumulator_residual_over_tolerance = *(DGAccumulator_residual_over_toleranceval.cpu_rd_ptr());
// FP: "35 -> 36;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "36 -> 37;
DGAccumulator_sum_residual = *(DGAccumulator_sum_residualval.cpu_rd_ptr());
// FP: "37 -> 38;
max_residual = *(max_residualval.cpu_rd_ptr());
// FP: "38 -> 39;
max_value = *(max_valueval.cpu_rd_ptr());
// FP: "39 -> 40;
min_residual = *(min_residualval.cpu_rd_ptr());
// FP: "40 -> 41;
min_value = *(min_valueval.cpu_rd_ptr());
// FP: "41 -> 42;
}
void PageRankSanity_allNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->gg.nnodes, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_masterNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_nodesWithEdges_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
} | 7f67e13b525e6b43243e44a9e638e87b302b0c42.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "kernels/reduce.cuh"
#include "pagerank_push_cuda.cuh"
static const int __tb_PageRank = TB_SIZE;
__global__ void ResetGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_value[src] = 0;
p_nout[src] = 0;
p_residual[src] = 0;
p_delta[src] = 0;
}
}
// FP: "10 -> 11;
}
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, uint32_t * p_nout, float * p_residual, DynamicBitset& bitset_nout)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
uint32_t num_edges;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_residual[src] = local_alpha;
num_edges = graph.getOutDegree(src);
atomicTestAdd(&p_nout[src], num_edges);
bitset_nout.set(src);
}
}
// FP: "11 -> 12;
}
__global__ void PageRank_delta(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
float residual_old;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_residual[src] > local_tolerance)
{
residual_old = p_residual[src];
p_residual[src] = 0;
p_value[src] += residual_old;
if (p_nout[src] > 0)
{
p_delta[src] = residual_old * (1 - local_alpha) / p_nout[src];
}
}
}
}
// FP: "15 -> 16;
}
__global__ void PageRank(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, float * p_residual, DynamicBitset& bitset_residual, HGAccumulator<unsigned int> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_PageRank;
float _delta;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
// FP: "7 -> 8;
DGAccumulator_accum.thread_entry();
// FP: "8 -> 9;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "9 -> 10;
bool pop = src < __end;
// FP: "10 -> 11;
if (pop)
{
if (p_delta[src] > 0)
{
_delta = p_delta[src];
p_delta[src] = 0;
DGAccumulator_accum.reduce( 1);
}
else
{
pop = false;
}
}
// FP: "17 -> 18;
// FP: "20 -> 21;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "21 -> 22;
__shared__ struct { float _delta; } _np_closure [TB_SIZE];
// FP: "22 -> 23;
_np_closure[threadIdx.x]._delta = _delta;
// FP: "23 -> 24;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "26 -> 27;
// FP: "27 -> 28;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "28 -> 29;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "29 -> 30;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
while (true)
{
// FP: "34 -> 35;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "39 -> 40;
__syncthreads();
// FP: "40 -> 41;
break;
}
// FP: "42 -> 43;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "45 -> 46;
__syncthreads();
// FP: "46 -> 47;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "47 -> 48;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "50 -> 51;
assert(nps.tb.src < __kernel_tb_size);
_delta = _np_closure[nps.tb.src]._delta;
// FP: "51 -> 52;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type nbr;
nbr = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_residual[dst], _delta);
bitset_residual.set(dst);
}
}
// FP: "59 -> 60;
__syncthreads();
}
// FP: "61 -> 62;
// FP: "62 -> 63;
{
const int warpid = threadIdx.x / 32;
// FP: "63 -> 64;
const int _np_laneid = cub::LaneId();
// FP: "64 -> 65;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
_delta = _np_closure[nps.warp.src[warpid]]._delta;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type nbr;
nbr = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_residual[dst], _delta);
bitset_residual.set(dst);
}
}
}
// FP: "82 -> 83;
__syncthreads();
// FP: "83 -> 84;
}
// FP: "84 -> 85;
__syncthreads();
// FP: "85 -> 86;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "86 -> 87;
while (_np.work())
{
// FP: "87 -> 88;
int _np_i =0;
// FP: "88 -> 89;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "89 -> 90;
__syncthreads();
// FP: "90 -> 91;
// FP: "91 -> 92;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type nbr;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
_delta = _np_closure[nps.fg.src[_np_i]]._delta;
nbr= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_residual[dst], _delta);
bitset_residual.set(dst);
}
}
// FP: "100 -> 101;
_np.execute_round_done(ITSIZE);
// FP: "101 -> 102;
__syncthreads();
}
// FP: "103 -> 104;
assert(threadIdx.x < __kernel_tb_size);
_delta = _np_closure[threadIdx.x]._delta;
}
// FP: "106 -> 107;
DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "107 -> 108;
}
__global__ void PageRankSanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float local_tolerance, float * p_residual, float * p_value, HGAccumulator<uint64_t> DGAccumulator_residual_over_tolerance, HGAccumulator<float> DGAccumulator_sum, HGAccumulator<float> DGAccumulator_sum_residual, HGReduceMax<float> max_residual, HGReduceMax<float> max_value, HGReduceMin<float> min_residual, HGReduceMin<float> min_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_residual_over_tolerance_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_residual_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage max_residual_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage max_value_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage min_residual_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage min_value_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_residual_over_tolerance.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGAccumulator_sum.thread_entry();
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_sum_residual.thread_entry();
// FP: "7 -> 8;
// FP: "8 -> 9;
max_residual.thread_entry();
// FP: "9 -> 10;
// FP: "10 -> 11;
max_value.thread_entry();
// FP: "11 -> 12;
// FP: "12 -> 13;
min_residual.thread_entry();
// FP: "13 -> 14;
// FP: "14 -> 15;
min_value.thread_entry();
// FP: "15 -> 16;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
max_value.reduce(p_value[src]);
min_value.reduce(p_value[src]);
max_residual.reduce(p_residual[src]);
min_residual.reduce(p_residual[src]);
DGAccumulator_sum.reduce( p_value[src]);
DGAccumulator_sum.reduce( p_residual[src]);
if (p_residual[src] > local_tolerance)
{
DGAccumulator_residual_over_tolerance.reduce( 1);
}
}
}
// FP: "29 -> 30;
DGAccumulator_residual_over_tolerance.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_residual_over_tolerance_ts);
// FP: "30 -> 31;
DGAccumulator_sum.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "31 -> 32;
DGAccumulator_sum_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_residual_ts);
// FP: "32 -> 33;
max_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(max_residual_ts);
// FP: "33 -> 34;
max_value.thread_exit<cub::BlockReduce<float, TB_SIZE> >(max_value_ts);
// FP: "34 -> 35;
min_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(min_residual_ts);
// FP: "35 -> 36;
min_value.thread_exit<cub::BlockReduce<float, TB_SIZE> >(min_value_ts);
// FP: "36 -> 37;
}
void ResetGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
ResetGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void ResetGraph_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void ResetGraph_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void ResetGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, local_alpha, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->nout.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, local_alpha, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
PageRank_delta <<<blocks, threads>>>(ctx->gg, __begin, __end, local_alpha, local_tolerance, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PageRank_delta_allNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->gg.nnodes, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_masterNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_nodesWithEdges_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->numNodesWithEdges, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
PageRank <<<blocks, __tb_PageRank>>>(ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->residual.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void PageRank_allNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PageRank_masterNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PageRank_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_residual_over_tolerance;
HGAccumulator<float> _DGAccumulator_sum;
HGAccumulator<float> _DGAccumulator_sum_residual;
HGReduceMax<float> _max_residual;
HGReduceMax<float> _max_value;
HGReduceMin<float> _min_residual;
HGReduceMin<float> _min_value;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_residual_over_toleranceval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_residual_over_toleranceval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_residual_over_tolerance.rv = DGAccumulator_residual_over_toleranceval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<float> DGAccumulator_sumval = Shared<float>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "12 -> 13;
Shared<float> DGAccumulator_sum_residualval = Shared<float>(1);
// FP: "13 -> 14;
// FP: "14 -> 15;
*(DGAccumulator_sum_residualval.cpu_wr_ptr()) = 0;
// FP: "15 -> 16;
_DGAccumulator_sum_residual.rv = DGAccumulator_sum_residualval.gpu_wr_ptr();
// FP: "16 -> 17;
Shared<float> max_residualval = Shared<float>(1);
// FP: "17 -> 18;
// FP: "18 -> 19;
*(max_residualval.cpu_wr_ptr()) = 0;
// FP: "19 -> 20;
_max_residual.rv = max_residualval.gpu_wr_ptr();
// FP: "20 -> 21;
Shared<float> max_valueval = Shared<float>(1);
// FP: "21 -> 22;
// FP: "22 -> 23;
*(max_valueval.cpu_wr_ptr()) = 0;
// FP: "23 -> 24;
_max_value.rv = max_valueval.gpu_wr_ptr();
// FP: "24 -> 25;
Shared<float> min_residualval = Shared<float>(1);
// FP: "25 -> 26;
// FP: "26 -> 27;
*(min_residualval.cpu_wr_ptr()) = 0;
// FP: "27 -> 28;
_min_residual.rv = min_residualval.gpu_wr_ptr();
// FP: "28 -> 29;
Shared<float> min_valueval = Shared<float>(1);
// FP: "29 -> 30;
// FP: "30 -> 31;
*(min_valueval.cpu_wr_ptr()) = 0;
// FP: "31 -> 32;
_min_value.rv = min_valueval.gpu_wr_ptr();
// FP: "32 -> 33;
PageRankSanity <<<blocks, threads>>>(ctx->gg, __begin, __end, local_tolerance, ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_residual_over_tolerance, _DGAccumulator_sum, _DGAccumulator_sum_residual, _max_residual, _max_value, _min_residual, _min_value);
// FP: "33 -> 34;
check_cuda_kernel;
// FP: "34 -> 35;
DGAccumulator_residual_over_tolerance = *(DGAccumulator_residual_over_toleranceval.cpu_rd_ptr());
// FP: "35 -> 36;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "36 -> 37;
DGAccumulator_sum_residual = *(DGAccumulator_sum_residualval.cpu_rd_ptr());
// FP: "37 -> 38;
max_residual = *(max_residualval.cpu_rd_ptr());
// FP: "38 -> 39;
max_value = *(max_valueval.cpu_rd_ptr());
// FP: "39 -> 40;
min_residual = *(min_residualval.cpu_rd_ptr());
// FP: "40 -> 41;
min_value = *(min_valueval.cpu_rd_ptr());
// FP: "41 -> 42;
}
void PageRankSanity_allNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->gg.nnodes, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_masterNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_nodesWithEdges_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
} |
f32c58b2c6094260c068b7957002a1149d29f92b.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2018, Lawrence Livermore National Security, LLC. Produced at the
// Lawrence Livermore National Laboratory in collaboration with University of
// Illinois Urbana-Champaign.
//
// Written by the LBANN Research Team (N. Dryden, N. Maruyama, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-756777.
// All rights reserved.
//
// This file is part of Aluminum GPU-aware Communication Library. For details, see
// http://software.llnl.gov/Aluminum or https://github.com/LLNL/Aluminum.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include <Al_config.hpp>
#if defined AL_HAS_ROCM
#include <hip/hip_runtime.h>
#elif defined AL_HAS_CUDA
#include <hip/hip_runtime.h>
#endif
#include "aluminum/cuda/helper_kernels.hpp"
namespace Al {
namespace internal {
namespace cuda {
__global__ void spin_wait_kernel(int32_t wait_value, volatile int32_t* wait_mem) {
for (;;)
{
__threadfence_system();
int32_t value = *wait_mem;
if (value == wait_value) break;
}
}
void launch_wait_kernel(AlGpuStream_t stream,
int32_t wait_value,
volatile int32_t* wait_mem) {
hipLaunchKernelGGL(( spin_wait_kernel), dim3(1),dim3(1),0,stream, wait_value, wait_mem);
}
#if defined AL_HAS_ROCM
void launch_wait_kernel(hipStream_t stream,
int32_t wait_value,
hipDeviceptr_t wait_mem) {
AL_CHECK_CUDA(hipStreamWaitValue32(
stream, wait_mem, wait_value, hipStreamWaitValueEq));
}
#elif defined AL_HAS_CUDA
void launch_wait_kernel(hipStream_t stream,
int32_t wait_value,
hipDeviceptr_t wait_mem) {
AL_CHECK_CUDA_DRV(hipStreamWaitValue32(
stream, wait_mem, wait_value, hipStreamWaitValueEq));
}
#endif
} // namespace cuda
} // namespace internal
} // namespace Al
| f32c58b2c6094260c068b7957002a1149d29f92b.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2018, Lawrence Livermore National Security, LLC. Produced at the
// Lawrence Livermore National Laboratory in collaboration with University of
// Illinois Urbana-Champaign.
//
// Written by the LBANN Research Team (N. Dryden, N. Maruyama, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-756777.
// All rights reserved.
//
// This file is part of Aluminum GPU-aware Communication Library. For details, see
// http://software.llnl.gov/Aluminum or https://github.com/LLNL/Aluminum.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include <Al_config.hpp>
#if defined AL_HAS_ROCM
#include <hip/hip_runtime.h>
#elif defined AL_HAS_CUDA
#include <cuda_runtime.h>
#endif
#include "aluminum/cuda/helper_kernels.hpp"
namespace Al {
namespace internal {
namespace cuda {
__global__ void spin_wait_kernel(int32_t wait_value, volatile int32_t* wait_mem) {
for (;;)
{
__threadfence_system();
int32_t value = *wait_mem;
if (value == wait_value) break;
}
}
void launch_wait_kernel(AlGpuStream_t stream,
int32_t wait_value,
volatile int32_t* wait_mem) {
spin_wait_kernel<<<1,1,0,stream>>>(wait_value, wait_mem);
}
#if defined AL_HAS_ROCM
void launch_wait_kernel(hipStream_t stream,
int32_t wait_value,
hipDeviceptr_t wait_mem) {
AL_CHECK_CUDA(hipStreamWaitValue32(
stream, wait_mem, wait_value, hipStreamWaitValueEq));
}
#elif defined AL_HAS_CUDA
void launch_wait_kernel(cudaStream_t stream,
int32_t wait_value,
CUdeviceptr wait_mem) {
AL_CHECK_CUDA_DRV(cuStreamWaitValue32(
stream, wait_mem, wait_value, CU_STREAM_WAIT_VALUE_EQ));
}
#endif
} // namespace cuda
} // namespace internal
} // namespace Al
|
afbaf8726e7f0b657cef0bd7b46e3bc9bdf5cff4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@generated from zgeelltmv.cu normal z -> s, Sun May 3 11:22:58 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
// ELL SpMV kernel
//Michael Garland
__global__ void
sgeelltmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
float val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
sgeelltmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float lambda,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
int offset,
int blocksize,
magma_index_t * addrows,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
float val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeelltmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
lambda float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeelltmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
float lambda,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
float tmp_shift;
//magma_ssetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
hipLaunchKernelGGL(( sgeelltmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
| afbaf8726e7f0b657cef0bd7b46e3bc9bdf5cff4.cu | /*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@generated from zgeelltmv.cu normal z -> s, Sun May 3 11:22:58 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
// ELL SpMV kernel
//Michael Garland
__global__ void
sgeelltmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
float val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
sgeelltmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float lambda,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
int offset,
int blocksize,
magma_index_t * addrows,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
float val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgeelltmv_kernel<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
lambda float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeelltmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
float lambda,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
float tmp_shift;
//magma_ssetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
sgeelltmv_kernel_shift<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
2a680a20c075d10f514dfa3a37ff2905925d2db8.hip | // !!! This is a file automatically generated by hipify!!!
#include "commonHeaders.h"
#include <hip/hip_runtime.h>
#include "Utils/CudaUtils.h"
#include "Core/Common.h"
#include "Utils/CudaAlloc.h"
#include "Core/Scene.h"
#include "Core/Film.h"
using namespace Gorilla;
template <typename T>
CudaAlloc<T>::CudaAlloc(bool pinned_):pinned(pinned_)
{
}
template <typename T>
CudaAlloc<T>::~CudaAlloc()
{
release();
}
template <typename T>
void CudaAlloc<T>::resize(size_t count)
{
assert(count > 0);
release();
maxCount = count;
if(pinned)
{
CudaUtils::checkError(hipHostMalloc(&hostPtr, sizeof(T) * count), "Could not allocate pinned host memory");
if(hostPtr == nullptr)
throw std::runtime_error("Could not allocate pinned host memory");
}
else
{
hostPtr = static_cast<T*>(malloc(sizeof(T) * count));
if(hostPtr == nullptr)
throw std::runtime_error("Could not allocate host memory");
}
CudaUtils::checkError(hipMalloc(&devicePtr, sizeof(T) * count), "Could not allocate device memory");
if(devicePtr == nullptr)
throw std::runtime_error("Could not allocate device memory");
}
template <typename T>
void CudaAlloc<T>::write(T* source, size_t count)
{
assert(count <= maxCount);
memcpy(hostPtr, source, sizeof(T) * count);
CudaUtils::checkError(hipMemcpy(devicePtr, hostPtr, sizeof(T)*count, hipMemcpyHostToDevice), "Could not write data to device");
}
template <typename T>
void CudaAlloc<T>::read(size_t count)
{
assert(count < maxCount);
CudaUtils::checkError(hipMemcpy(hostPtr, devicePtr, sizeof(T) * count, hipMemcpyDeviceToHost), "Could not read data from device");
}
template <typename T>
CUDA_CALLABLE T* CudaAlloc<T>::getPtr() const
{
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ > 0))
return devicePtr;
#else
return hostPtr;
#endif
}
template <typename T>
T* CudaAlloc<T>::getHostPtr() const
{
return hostPtr;
}
template <typename T>
T* CudaAlloc<T>::getDevicePtr() const
{
return devicePtr;
}
template <typename T>
void CudaAlloc<T>::release()
{
maxCount = 0;
if(hostPtr != nullptr)
{
if(pinned)
CudaUtils::checkError(hipHostFree(hostPtr), "Could not free pinned host memory");
else
free(hostPtr);
hostPtr = nullptr;
}
if(devicePtr != nullptr)
{
CudaUtils::checkError(hipFree(devicePtr), "Could not free device memory");
devicePtr = nullptr;
}
}
namespace Gorilla
{
template class CudaAlloc<uint32_t>;
template class CudaAlloc<Scene>;
template class CudaAlloc<Film>;
template class CudaAlloc<Image>;
template class CudaAlloc<Texture>;
template class CudaAlloc<Material>;
template class CudaAlloc<Triangle>;
template class CudaAlloc<BVHNode>;
template class CudaAlloc<RandomGeneratorState>;
} | 2a680a20c075d10f514dfa3a37ff2905925d2db8.cu | #include "commonHeaders.h"
#include <cuda_runtime.h>
#include "Utils/CudaUtils.h"
#include "Core/Common.h"
#include "Utils/CudaAlloc.h"
#include "Core/Scene.h"
#include "Core/Film.h"
using namespace Gorilla;
template <typename T>
CudaAlloc<T>::CudaAlloc(bool pinned_):pinned(pinned_)
{
}
template <typename T>
CudaAlloc<T>::~CudaAlloc()
{
release();
}
template <typename T>
void CudaAlloc<T>::resize(size_t count)
{
assert(count > 0);
release();
maxCount = count;
if(pinned)
{
CudaUtils::checkError(cudaMallocHost(&hostPtr, sizeof(T) * count), "Could not allocate pinned host memory");
if(hostPtr == nullptr)
throw std::runtime_error("Could not allocate pinned host memory");
}
else
{
hostPtr = static_cast<T*>(malloc(sizeof(T) * count));
if(hostPtr == nullptr)
throw std::runtime_error("Could not allocate host memory");
}
CudaUtils::checkError(cudaMalloc(&devicePtr, sizeof(T) * count), "Could not allocate device memory");
if(devicePtr == nullptr)
throw std::runtime_error("Could not allocate device memory");
}
template <typename T>
void CudaAlloc<T>::write(T* source, size_t count)
{
assert(count <= maxCount);
memcpy(hostPtr, source, sizeof(T) * count);
CudaUtils::checkError(cudaMemcpy(devicePtr, hostPtr, sizeof(T)*count, cudaMemcpyHostToDevice), "Could not write data to device");
}
template <typename T>
void CudaAlloc<T>::read(size_t count)
{
assert(count < maxCount);
CudaUtils::checkError(cudaMemcpy(hostPtr, devicePtr, sizeof(T) * count, cudaMemcpyDeviceToHost), "Could not read data from device");
}
template <typename T>
CUDA_CALLABLE T* CudaAlloc<T>::getPtr() const
{
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ > 0))
return devicePtr;
#else
return hostPtr;
#endif
}
template <typename T>
T* CudaAlloc<T>::getHostPtr() const
{
return hostPtr;
}
template <typename T>
T* CudaAlloc<T>::getDevicePtr() const
{
return devicePtr;
}
template <typename T>
void CudaAlloc<T>::release()
{
maxCount = 0;
if(hostPtr != nullptr)
{
if(pinned)
CudaUtils::checkError(cudaFreeHost(hostPtr), "Could not free pinned host memory");
else
free(hostPtr);
hostPtr = nullptr;
}
if(devicePtr != nullptr)
{
CudaUtils::checkError(cudaFree(devicePtr), "Could not free device memory");
devicePtr = nullptr;
}
}
namespace Gorilla
{
template class CudaAlloc<uint32_t>;
template class CudaAlloc<Scene>;
template class CudaAlloc<Film>;
template class CudaAlloc<Image>;
template class CudaAlloc<Texture>;
template class CudaAlloc<Material>;
template class CudaAlloc<Triangle>;
template class CudaAlloc<BVHNode>;
template class CudaAlloc<RandomGeneratorState>;
} |
e90df20225af542a5f2158d706d4ee17bf5e97dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void writeKernel(float* vec, int len)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= c_size.x || j >= c_size.y || k >= c_size.z)
return;
for(auto w = 0; w < len; ++w)
{
long int id = w + len * (i + c_size.x * (j + k * c_size.y));
vec[id] = id;
}
} | e90df20225af542a5f2158d706d4ee17bf5e97dd.cu | #include "includes.h"
__global__ void writeKernel(float* vec, int len)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= c_size.x || j >= c_size.y || k >= c_size.z)
return;
for(auto w = 0; w < len; ++w)
{
long int id = w + len * (i + c_size.x * (j + k * c_size.y));
vec[id] = id;
}
} |
1068cbd3ee34bc1fe816e284b904fb571cf01137.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Reference code implementing the box blur filter.
Build and execute as follows:
make clean && make
./blur_filter size
Author: Naga Kandasamy
Date created: May 3, 2019
Date modified: May 12, 2020
name: Trung Hoang
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
// #define DEBUG
/* Include the kernel code */
#include "blur_filter_kernel.cu"
extern "C" void compute_gold(const image_t, image_t);
void compute_on_device(const image_t, image_t);
int check_results(const float *, const float *, int, float);
void print_image(const image_t);
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(stderr, "Usage: %s size\n", argv[0]);
fprintf(stderr, "size: Height of the image. The program assumes size x size image.\n");
exit(EXIT_FAILURE);
}
/* Allocate memory for the input and output images */
int size = atoi(argv[1]);
fprintf(stderr, "Creating %d x %d images\n", size, size);
printf("\n");
image_t in, out_gold, out_gpu;
in.size = out_gold.size = out_gpu.size = size;
in.element = (float *)malloc(sizeof(float) * size * size);
out_gold.element = (float *)malloc(sizeof(float) * size * size);
out_gpu.element = (float *)malloc(sizeof(float) * size * size);
if ((in.element == NULL) || (out_gold.element == NULL) || (out_gpu.element == NULL)) {
perror("Malloc");
exit(EXIT_FAILURE);
}
/* Poplulate our image with random values between [-0.5 +0.5] */
srand(time(NULL));
int i;
for (i = 0; i < size * size; i++)
in.element[i] = rand()/(float)RAND_MAX - 0.5;
struct timeval start, stop;
/* Calculate the blur on the CPU. The result is stored in out_gold. */
fprintf(stderr, "Calculating blur on the CPU\n");
gettimeofday(&start, NULL);
compute_gold(in, out_gold);
gettimeofday(&stop, NULL);
fprintf(stderr, "Gold execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
printf("\n");
fprintf(stderr, "Calculating blur on the GPU\n");
compute_on_device(in, out_gpu);
printf("\n");
#ifdef DEBUG
printf("\n");
print_image(in);
print_image(out_gold);
print_image(out_gpu);
#endif
/* Check CPU and GPU results for correctness */
fprintf(stderr, "Checking CPU and GPU results\n");
int num_elements = out_gold.size * out_gold.size;
float eps = 1e-6; /* Do not change */
int check;
check = check_results(out_gold.element, out_gpu.element, num_elements, eps);
if (check == 0)
fprintf(stderr, "TEST PASSED\n");
else
fprintf(stderr, "TEST FAILED\n");
/* Free data structures on the host */
free((void *)in.element);
free((void *)out_gold.element);
free((void *)out_gpu.element);
exit(EXIT_SUCCESS);
}
/* This function to calculate the blur on the GPU */
void compute_on_device(const image_t in, image_t out)
{
struct timeval start, stop;
float *in_on_device = NULL;
float *out_on_device = NULL;
int size = in.size * in.size;
// Allocate and trasnfer `in` on GPU
hipMalloc((void**)&in_on_device, size * sizeof(float));
hipMemcpy(in_on_device, in.element, size * sizeof(float), hipMemcpyHostToDevice);
// Allocate `out` on GPU
hipMalloc((void**)&out_on_device, size * sizeof(float));
// /* Set up execution grid on the GPU */
int BLOCK_SIZE = 32;
dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE, 1); /* Set number of threads in thread block */
dim3 grid(1,1);
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( blur_filter_kernel), dim3(grid), dim3(thread_block), 0, 0, in_on_device, out_on_device, in.size);
hipDeviceSynchronize(); /* Kernel execution is asynchronous; force CPU to wait here */
gettimeofday(&stop, NULL);
fprintf(stderr, "Kernel execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
// Copy Result from GPU
hipMemcpy(out.element, out_on_device, size * sizeof(float), hipMemcpyDeviceToHost);
hipFree(in_on_device);
hipFree(out_on_device);
}
/* Check correctness of results */
int check_results(const float *pix1, const float *pix2, int num_elements, float eps)
{
int i;
for (i = 0; i < num_elements; i++)
if (fabsf((pix1[i] - pix2[i])/pix1[i]) > eps)
return -1;
return 0;
}
/* Print out the image contents */
void print_image(const image_t img)
{
int i, j;
float val;
for (i = 0; i < img.size; i++) {
for (j = 0; j < img.size; j++) {
val = img.element[i * img.size + j];
printf("%0.4f ", val);
}
printf("\n");
}
printf("\n");
}
| 1068cbd3ee34bc1fe816e284b904fb571cf01137.cu | /* Reference code implementing the box blur filter.
Build and execute as follows:
make clean && make
./blur_filter size
Author: Naga Kandasamy
Date created: May 3, 2019
Date modified: May 12, 2020
name: Trung Hoang
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
// #define DEBUG
/* Include the kernel code */
#include "blur_filter_kernel.cu"
extern "C" void compute_gold(const image_t, image_t);
void compute_on_device(const image_t, image_t);
int check_results(const float *, const float *, int, float);
void print_image(const image_t);
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(stderr, "Usage: %s size\n", argv[0]);
fprintf(stderr, "size: Height of the image. The program assumes size x size image.\n");
exit(EXIT_FAILURE);
}
/* Allocate memory for the input and output images */
int size = atoi(argv[1]);
fprintf(stderr, "Creating %d x %d images\n", size, size);
printf("\n");
image_t in, out_gold, out_gpu;
in.size = out_gold.size = out_gpu.size = size;
in.element = (float *)malloc(sizeof(float) * size * size);
out_gold.element = (float *)malloc(sizeof(float) * size * size);
out_gpu.element = (float *)malloc(sizeof(float) * size * size);
if ((in.element == NULL) || (out_gold.element == NULL) || (out_gpu.element == NULL)) {
perror("Malloc");
exit(EXIT_FAILURE);
}
/* Poplulate our image with random values between [-0.5 +0.5] */
srand(time(NULL));
int i;
for (i = 0; i < size * size; i++)
in.element[i] = rand()/(float)RAND_MAX - 0.5;
struct timeval start, stop;
/* Calculate the blur on the CPU. The result is stored in out_gold. */
fprintf(stderr, "Calculating blur on the CPU\n");
gettimeofday(&start, NULL);
compute_gold(in, out_gold);
gettimeofday(&stop, NULL);
fprintf(stderr, "Gold execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec +\
(stop.tv_usec - start.tv_usec)/(float)1000000));
printf("\n");
fprintf(stderr, "Calculating blur on the GPU\n");
compute_on_device(in, out_gpu);
printf("\n");
#ifdef DEBUG
printf("\n");
print_image(in);
print_image(out_gold);
print_image(out_gpu);
#endif
/* Check CPU and GPU results for correctness */
fprintf(stderr, "Checking CPU and GPU results\n");
int num_elements = out_gold.size * out_gold.size;
float eps = 1e-6; /* Do not change */
int check;
check = check_results(out_gold.element, out_gpu.element, num_elements, eps);
if (check == 0)
fprintf(stderr, "TEST PASSED\n");
else
fprintf(stderr, "TEST FAILED\n");
/* Free data structures on the host */
free((void *)in.element);
free((void *)out_gold.element);
free((void *)out_gpu.element);
exit(EXIT_SUCCESS);
}
/* This function to calculate the blur on the GPU */
void compute_on_device(const image_t in, image_t out)
{
struct timeval start, stop;
float *in_on_device = NULL;
float *out_on_device = NULL;
int size = in.size * in.size;
// Allocate and trasnfer `in` on GPU
cudaMalloc((void**)&in_on_device, size * sizeof(float));
cudaMemcpy(in_on_device, in.element, size * sizeof(float), cudaMemcpyHostToDevice);
// Allocate `out` on GPU
cudaMalloc((void**)&out_on_device, size * sizeof(float));
// /* Set up execution grid on the GPU */
int BLOCK_SIZE = 32;
dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE, 1); /* Set number of threads in thread block */
dim3 grid(1,1);
gettimeofday(&start, NULL);
blur_filter_kernel<<<grid, thread_block>>>(in_on_device, out_on_device, in.size);
cudaDeviceSynchronize(); /* Kernel execution is asynchronous; force CPU to wait here */
gettimeofday(&stop, NULL);
fprintf(stderr, "Kernel execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
// Copy Result from GPU
cudaMemcpy(out.element, out_on_device, size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(in_on_device);
cudaFree(out_on_device);
}
/* Check correctness of results */
int check_results(const float *pix1, const float *pix2, int num_elements, float eps)
{
int i;
for (i = 0; i < num_elements; i++)
if (fabsf((pix1[i] - pix2[i])/pix1[i]) > eps)
return -1;
return 0;
}
/* Print out the image contents */
void print_image(const image_t img)
{
int i, j;
float val;
for (i = 0; i < img.size; i++) {
for (j = 0; j < img.size; j++) {
val = img.element[i * img.size + j];
printf("%0.4f ", val);
}
printf("\n");
}
printf("\n");
}
|
881967d0a489357c8af23f7258d86bbbf3e2bc91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Note: [ds] precisions generated from csymv_tesla.cu
*/
#include "common_magma.h"
#define PRECISION_c
/* The version for fermi can be found in chemv_fermi.cu */
#define hemv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (hemv_bs)
*/
__global__ void
chemv_kernel_tesla_L_special(
int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex * __restrict__ WC)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
// la must be at least half_thread_x*bank_shift = 32x33 = 1056;
// quarter_thread_x*(thread_x+2) = 16*(64+2) = 1056
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3]; /* Why +3? */
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2[thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx)*incx;
A += break_d * (lda+1);
A += ty_*lda + tx_;
// load x[block] into buff
if ( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(ty_ * 4 + 4); i++) {
if ( i < tx_ ) {
la[0][bank_shift * tx_ + i] = cuConjf( la[0][ i * bank_shift + tx_] );
}
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_== 0 ) {
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf( la[0][bank_shift*i+tx_] );
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
magmaFloatComplex res2;
res2 = MAGMA_C_ZERO;
if ( ty_== 1 ) {
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_C_ZERO;
}
A -= ty_* lda;
A -= tx_;
A = A - lda * blkc * thread_x;
x = x - blkc * thread_x * incx;
A += 4 * ty* lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if ( blkc * thread_x >= thread_x ) {
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = cuConjf(tr[j]) * buff[tx];
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4 + j];
la[j + ty*4][tx] = cuConjf( tr[j] ) * buff[tx];
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc) ] = res;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
chemv_kernel_tesla_L_generic(
int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex * __restrict__ WC,
int m_mod_thread_x)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3];
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2[thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_C_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(ty_*4+4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][i*bank_shift+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_== 0 ) {
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][bank_shift*i+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
magmaFloatComplex res2;
res2 = MAGMA_C_ZERO;
if ( ty_== 1 ) {
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
res_ = MAGMA_C_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_C_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_C_ZERO;
}
A -= ty_* lda;
A -= tx_;
A = A - lda*break_d;
x = x - break_d*incx;
A += 4 * ty* lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if ( break_d > 0 )
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for( int i=thread_x; i < break_d; i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
}
__global__ void
chemv_kernel_tesla_L_update(
int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex * __restrict__ WC )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
magmaFloatComplex Ca;
Ca = MAGMA_C_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
extern "C"
void magmablas_chemv_tesla_L(
magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *x, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *y, magma_int_t incy,
magmaFloatComplex *dwork)
{
magma_int_t blocks = (n - 1)/hemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
/*
* If matrix size is multiple of hemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % hemv_bs == 0 ) {
hipLaunchKernelGGL(( chemv_kernel_tesla_L_special), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
else{
magma_int_t m_mod_thread_x = (n % hemv_bs) - 1;
hipLaunchKernelGGL(( chemv_kernel_tesla_L_generic), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x);
}
dim3 threads_u(hemv_bs, 1, 1);
hipLaunchKernelGGL(( chemv_kernel_tesla_L_update), dim3(grid), dim3(threads_u), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
/*************************************************************************
Purpose
=======
magmablas_chemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n hermitian matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA COMPLEX*16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_chemv_tesla(
char uplo, magma_int_t n,
magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *x, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *y, magma_int_t incy)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper )
hipblasChemv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
else {
magmaFloatComplex *dwork;
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwork = lda * (blocks + 1);
// TODO deal with error
magma_cmalloc( &dwork, lwork );
magmablas_chemv_tesla_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
magma_free( dwork );
}
return MAGMA_SUCCESS;
}
| 881967d0a489357c8af23f7258d86bbbf3e2bc91.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Note: [ds] precisions generated from csymv_tesla.cu
*/
#include "common_magma.h"
#define PRECISION_c
/* The version for fermi can be found in chemv_fermi.cu */
#define hemv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (hemv_bs)
*/
__global__ void
chemv_kernel_tesla_L_special(
int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex * __restrict__ WC)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
// la must be at least half_thread_x*bank_shift = 32x33 = 1056;
// quarter_thread_x*(thread_x+2) = 16*(64+2) = 1056
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3]; /* Why +3? */
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2[thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx)*incx;
A += break_d * (lda+1);
A += ty_*lda + tx_;
// load x[block] into buff
if ( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(ty_ * 4 + 4); i++) {
if ( i < tx_ ) {
la[0][bank_shift * tx_ + i] = cuConjf( la[0][ i * bank_shift + tx_] );
}
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_== 0 ) {
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf( la[0][bank_shift*i+tx_] );
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
magmaFloatComplex res2;
res2 = MAGMA_C_ZERO;
if ( ty_== 1 ) {
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_C_ZERO;
}
A -= ty_* lda;
A -= tx_;
A = A - lda * blkc * thread_x;
x = x - blkc * thread_x * incx;
A += 4 * ty* lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if ( blkc * thread_x >= thread_x ) {
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = cuConjf(tr[j]) * buff[tx];
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4 + j];
la[j + ty*4][tx] = cuConjf( tr[j] ) * buff[tx];
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc) ] = res;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
chemv_kernel_tesla_L_generic(
int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex * __restrict__ WC,
int m_mod_thread_x)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3];
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2[thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_C_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(ty_*4+4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][i*bank_shift+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_== 0 ) {
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i<(4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][bank_shift*i+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
magmaFloatComplex res2;
res2 = MAGMA_C_ZERO;
if ( ty_== 1 ) {
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
res = MAGMA_C_ZERO;
res_ = MAGMA_C_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_C_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_C_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_C_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_C_ZERO;
}
A -= ty_* lda;
A -= tx_;
A = A - lda*break_d;
x = x - break_d*incx;
A += 4 * ty* lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if ( break_d > 0 )
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for( int i=thread_x; i < break_d; i += thread_x ) {
res_ = MAGMA_C_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
res_ = MAGMA_C_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
}
__global__ void
chemv_kernel_tesla_L_update(
int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex * __restrict__ WC )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
magmaFloatComplex Ca;
Ca = MAGMA_C_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
extern "C"
void magmablas_chemv_tesla_L(
magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *x, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *y, magma_int_t incy,
magmaFloatComplex *dwork)
{
magma_int_t blocks = (n - 1)/hemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
/*
* If matrix size is multiple of hemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % hemv_bs == 0 ) {
chemv_kernel_tesla_L_special<<< grid, threads, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
else{
magma_int_t m_mod_thread_x = (n % hemv_bs) - 1;
chemv_kernel_tesla_L_generic<<< grid, threads, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x);
}
dim3 threads_u(hemv_bs, 1, 1);
chemv_kernel_tesla_L_update<<< grid, threads_u, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
/*************************************************************************
Purpose
=======
magmablas_chemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n hermitian matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA COMPLEX*16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_chemv_tesla(
char uplo, magma_int_t n,
magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *x, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *y, magma_int_t incy)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper )
cublasChemv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
else {
magmaFloatComplex *dwork;
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwork = lda * (blocks + 1);
// TODO deal with error
magma_cmalloc( &dwork, lwork );
magmablas_chemv_tesla_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
magma_free( dwork );
}
return MAGMA_SUCCESS;
}
|
100b710b5d52df566d6f5709f187d43a3363042d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <linalg/rsvd.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct RsvdInputs {
T tolerance;
int n_row;
int n_col;
T PC_perc;
T UpS_perc;
int k;
int p;
bool use_bbt;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const RsvdInputs<T> &dims) {
return os;
}
template <typename T>
class RsvdTest : public ::testing::TestWithParam<RsvdInputs<T>> {
protected:
void SetUp() override {
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam();
// rSVD seems to be very sensitive to the random number sequence as well!
raft::random::Rng r(params.seed, raft::random::GenTaps);
int m = params.n_row, n = params.n_col;
T eig_svd_tol = 1.e-7;
int max_sweeps = 100;
T mu = 0.0, sigma = 1.0;
raft::allocate(A, m * n);
if (params.tolerance > 1) { // Sanity check
ASSERT(m == 3, "This test only supports mxn=3x2!");
ASSERT(m * n == 6, "This test only supports mxn=3x2!");
T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
raft::update_device(A, data_h, m * n, stream);
T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695};
T right_eig_vectors_ref_h[] = {-0.638636, -0.769509};
T sing_vals_ref_h[] = {7.065283};
raft::allocate(left_eig_vectors_ref, m * 1);
raft::allocate(right_eig_vectors_ref, n * 1);
raft::allocate(sing_vals_ref, 1);
raft::update_device(left_eig_vectors_ref, left_eig_vectors_ref_h, m * 1,
stream);
raft::update_device(right_eig_vectors_ref, right_eig_vectors_ref_h, n * 1,
stream);
raft::update_device(sing_vals_ref, sing_vals_ref_h, 1, stream);
} else { // Other normal tests
r.normal(A, m * n, mu, sigma, stream);
}
A_backup_cpu = (T *)malloc(
sizeof(T) * m *
n); // Backup A matrix as svdJacobi will destroy the content of A
raft::update_host(A_backup_cpu, A, m * n, stream);
// RSVD tests
if (params.k == 0) { // Test with PC and upsampling ratio
params.k = max((int)(min(m, n) * params.PC_perc), 1);
params.p = max((int)(min(m, n) * params.UpS_perc), 1);
raft::allocate(U, m * params.k, true);
raft::allocate(S, params.k, true);
raft::allocate(V, n * params.k, true);
rsvdPerc(A, m, n, S, U, V, params.PC_perc, params.UpS_perc,
params.use_bbt, true, true, false, eig_svd_tol, max_sweeps,
cusolverH, cublasH, stream, allocator);
} else { // Test with directly given fixed rank
raft::allocate(U, m * params.k, true);
raft::allocate(S, params.k, true);
raft::allocate(V, n * params.k, true);
rsvdFixedRank(A, m, n, S, U, V, params.k, params.p, params.use_bbt, true,
true, true, eig_svd_tol, max_sweeps, cusolverH, cublasH,
stream, allocator);
}
raft::update_device(A, A_backup_cpu, m * n, stream);
free(A_backup_cpu);
}
void TearDown() override {
CUDA_CHECK(hipFree(A));
CUDA_CHECK(hipFree(U));
CUDA_CHECK(hipFree(S));
CUDA_CHECK(hipFree(V));
if (left_eig_vectors_ref) CUDA_CHECK(hipFree(left_eig_vectors_ref));
if (right_eig_vectors_ref) CUDA_CHECK(hipFree(right_eig_vectors_ref));
if (sing_vals_ref) CUDA_CHECK(hipFree(sing_vals_ref));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
RsvdInputs<T> params;
T *A, *A_backup_cpu,
*U = nullptr, *S = nullptr, *V = nullptr, *left_eig_vectors_ref = nullptr,
*right_eig_vectors_ref = nullptr, *sing_vals_ref = nullptr;
hipsolverDnHandle_t cusolverH = nullptr;
hipblasHandle_t cublasH = nullptr;
hipStream_t stream;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<RsvdInputs<float>> inputs_fx = {
// Test with ratios
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.20f, 2048, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.60f, 16384, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 2048, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.60f, 16384, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.12f, 2048, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.12f, 2048, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.60f, 2048, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{1.00f, 16384, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.60f, 2048, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{1.00f, 16384, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<double>> inputs_dx = {
// Test with ratios
{0.20, 256, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 256, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.20, 2048, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.60, 16384, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 2048, 2048, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.60, 16384, 2048, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10, 256, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.12, 2048, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10, 256, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.12, 2048, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.60, 2048, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{1.00, 16384, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.60, 2048, 2048, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{1.00, 16384, 2048, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<float>> sanity_inputs_fx = {
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, false, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, false, 4321ULL}};
const std::vector<RsvdInputs<double>> sanity_inputs_dx = {
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, false, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, false, 4321ULL}};
typedef RsvdTest<float> RsvdSanityCheckValF;
TEST_P(RsvdSanityCheckValF, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckValD;
TEST_P(RsvdSanityCheckValD, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckLeftVecF;
TEST_P(RsvdSanityCheckLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckLeftVecD;
TEST_P(RsvdSanityCheckLeftVecD, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckRightVecF;
TEST_P(RsvdSanityCheckRightVecF, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckRightVecD;
TEST_P(RsvdSanityCheckRightVecD, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdTestSquareMatrixNormF;
TEST_P(RsvdTestSquareMatrixNormF, Result) {
hipblasHandle_t cublasH;
CUBLAS_CHECK(hipblasCreate(&cublasH));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col,
params.k, 4 * params.tolerance, cublasH,
stream, allocator));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
}
typedef RsvdTest<double> RsvdTestSquareMatrixNormD;
TEST_P(RsvdTestSquareMatrixNormD, Result) {
hipblasHandle_t cublasH;
CUBLAS_CHECK(hipblasCreate(&cublasH));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col,
params.k, 4 * params.tolerance, cublasH,
stream, allocator));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
}
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF,
::testing::ValuesIn(inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD,
::testing::ValuesIn(inputs_dx));
} // end namespace LinAlg
} // end namespace MLCommon
| 100b710b5d52df566d6f5709f187d43a3363042d.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <linalg/rsvd.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct RsvdInputs {
T tolerance;
int n_row;
int n_col;
T PC_perc;
T UpS_perc;
int k;
int p;
bool use_bbt;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const RsvdInputs<T> &dims) {
return os;
}
template <typename T>
class RsvdTest : public ::testing::TestWithParam<RsvdInputs<T>> {
protected:
void SetUp() override {
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUBLAS_CHECK(cublasCreate(&cublasH));
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam();
// rSVD seems to be very sensitive to the random number sequence as well!
raft::random::Rng r(params.seed, raft::random::GenTaps);
int m = params.n_row, n = params.n_col;
T eig_svd_tol = 1.e-7;
int max_sweeps = 100;
T mu = 0.0, sigma = 1.0;
raft::allocate(A, m * n);
if (params.tolerance > 1) { // Sanity check
ASSERT(m == 3, "This test only supports mxn=3x2!");
ASSERT(m * n == 6, "This test only supports mxn=3x2!");
T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
raft::update_device(A, data_h, m * n, stream);
T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695};
T right_eig_vectors_ref_h[] = {-0.638636, -0.769509};
T sing_vals_ref_h[] = {7.065283};
raft::allocate(left_eig_vectors_ref, m * 1);
raft::allocate(right_eig_vectors_ref, n * 1);
raft::allocate(sing_vals_ref, 1);
raft::update_device(left_eig_vectors_ref, left_eig_vectors_ref_h, m * 1,
stream);
raft::update_device(right_eig_vectors_ref, right_eig_vectors_ref_h, n * 1,
stream);
raft::update_device(sing_vals_ref, sing_vals_ref_h, 1, stream);
} else { // Other normal tests
r.normal(A, m * n, mu, sigma, stream);
}
A_backup_cpu = (T *)malloc(
sizeof(T) * m *
n); // Backup A matrix as svdJacobi will destroy the content of A
raft::update_host(A_backup_cpu, A, m * n, stream);
// RSVD tests
if (params.k == 0) { // Test with PC and upsampling ratio
params.k = max((int)(min(m, n) * params.PC_perc), 1);
params.p = max((int)(min(m, n) * params.UpS_perc), 1);
raft::allocate(U, m * params.k, true);
raft::allocate(S, params.k, true);
raft::allocate(V, n * params.k, true);
rsvdPerc(A, m, n, S, U, V, params.PC_perc, params.UpS_perc,
params.use_bbt, true, true, false, eig_svd_tol, max_sweeps,
cusolverH, cublasH, stream, allocator);
} else { // Test with directly given fixed rank
raft::allocate(U, m * params.k, true);
raft::allocate(S, params.k, true);
raft::allocate(V, n * params.k, true);
rsvdFixedRank(A, m, n, S, U, V, params.k, params.p, params.use_bbt, true,
true, true, eig_svd_tol, max_sweeps, cusolverH, cublasH,
stream, allocator);
}
raft::update_device(A, A_backup_cpu, m * n, stream);
free(A_backup_cpu);
}
void TearDown() override {
CUDA_CHECK(cudaFree(A));
CUDA_CHECK(cudaFree(U));
CUDA_CHECK(cudaFree(S));
CUDA_CHECK(cudaFree(V));
if (left_eig_vectors_ref) CUDA_CHECK(cudaFree(left_eig_vectors_ref));
if (right_eig_vectors_ref) CUDA_CHECK(cudaFree(right_eig_vectors_ref));
if (sing_vals_ref) CUDA_CHECK(cudaFree(sing_vals_ref));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
RsvdInputs<T> params;
T *A, *A_backup_cpu,
*U = nullptr, *S = nullptr, *V = nullptr, *left_eig_vectors_ref = nullptr,
*right_eig_vectors_ref = nullptr, *sing_vals_ref = nullptr;
cusolverDnHandle_t cusolverH = nullptr;
cublasHandle_t cublasH = nullptr;
cudaStream_t stream;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<RsvdInputs<float>> inputs_fx = {
// Test with ratios
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 256, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20f, 2048, 256, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.20f, 2048, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.60f, 16384, 2048, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 2048, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.60f, 16384, 2048, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.12f, 2048, 256, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10f, 256, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.12f, 2048, 256, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.60f, 2048, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{1.00f, 16384, 2048, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.60f, 2048, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{1.00f, 16384, 2048, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<double>> inputs_dx = {
// Test with ratios
{0.20, 256, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 256, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20, 2048, 256, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.20, 2048, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.60, 16384, 2048, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 2048, 2048, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.60, 16384, 2048, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10, 256, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.12, 2048, 256, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10, 256, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.12, 2048, 256, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.60, 2048, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{1.00, 16384, 2048, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.60, 2048, 2048, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{1.00, 16384, 2048, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<float>> sanity_inputs_fx = {
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.2f, 0.05f, 0, 0, false, 4321ULL},
{100000000000000000.0f, 3, 2, 0.0f, 0.0f, 1, 1, false, 4321ULL}};
const std::vector<RsvdInputs<double>> sanity_inputs_dx = {
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.2, 0.05, 0, 0, false, 4321ULL},
{100000000000000000.0, 3, 2, 0.0, 0.0, 1, 1, false, 4321ULL}};
typedef RsvdTest<float> RsvdSanityCheckValF;
TEST_P(RsvdSanityCheckValF, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckValD;
TEST_P(RsvdSanityCheckValD, Result) {
ASSERT_TRUE(devArrMatch(sing_vals_ref, S, params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckLeftVecF;
TEST_P(RsvdSanityCheckLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckLeftVecD;
TEST_P(RsvdSanityCheckLeftVecD, Result) {
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref, U, params.n_row * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckRightVecF;
TEST_P(RsvdSanityCheckRightVecF, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckRightVecD;
TEST_P(RsvdSanityCheckRightVecD, Result) {
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref, V, params.n_col * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdTestSquareMatrixNormF;
TEST_P(RsvdTestSquareMatrixNormF, Result) {
cublasHandle_t cublasH;
CUBLAS_CHECK(cublasCreate(&cublasH));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col,
params.k, 4 * params.tolerance, cublasH,
stream, allocator));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
}
typedef RsvdTest<double> RsvdTestSquareMatrixNormD;
TEST_P(RsvdTestSquareMatrixNormD, Result) {
cublasHandle_t cublasH;
CUBLAS_CHECK(cublasCreate(&cublasH));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
ASSERT_TRUE(evaluateSVDByL2Norm(A, U, S, V, params.n_row, params.n_col,
params.k, 4 * params.tolerance, cublasH,
stream, allocator));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
}
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF,
::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD,
::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF,
::testing::ValuesIn(inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD,
::testing::ValuesIn(inputs_dx));
} // end namespace LinAlg
} // end namespace MLCommon
|
7777ac20a5fc03d76837a71288d3e69968116e4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sum(int* input, int* sumOut) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
for(int j = 0; j < 100/(blockDim.x*gridDim.x); j++){
if (i < 100){
atomicAdd(sumOut, input[i+(j*blockDim.x*gridDim.x)]);
printf("NUM:%d Thread: %d ||\n",input[i+(j*blockDim.x*gridDim.x)],i);
}
}
__syncthreads();
} | 7777ac20a5fc03d76837a71288d3e69968116e4c.cu | #include "includes.h"
__global__ void sum(int* input, int* sumOut) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
for(int j = 0; j < 100/(blockDim.x*gridDim.x); j++){
if (i < 100){
atomicAdd(sumOut, input[i+(j*blockDim.x*gridDim.x)]);
printf("NUM:%d Thread: %d ||\n",input[i+(j*blockDim.x*gridDim.x)],i);
}
}
__syncthreads();
} |
3d635e2dbe0112cd122565ffad22622befd6850d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CalculateFixed.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *subBG = NULL;
hipMalloc(&subBG, XSIZE*YSIZE);
const float *subT = NULL;
hipMalloc(&subT, XSIZE*YSIZE);
const int *subM = NULL;
hipMalloc(&subM, XSIZE*YSIZE);
float *fixed = NULL;
hipMalloc(&fixed, XSIZE*YSIZE);
const int wb = 1;
const int hb = 1;
const int wt = 1;
const int ht = 1;
const int oy = 1;
const int ox = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CalculateFixed), dim3(gridBlock),dim3(threadBlock), 0, 0, subBG,subT,subM,fixed,wb,hb,wt,ht,oy,ox);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CalculateFixed), dim3(gridBlock),dim3(threadBlock), 0, 0, subBG,subT,subM,fixed,wb,hb,wt,ht,oy,ox);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CalculateFixed), dim3(gridBlock),dim3(threadBlock), 0, 0, subBG,subT,subM,fixed,wb,hb,wt,ht,oy,ox);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3d635e2dbe0112cd122565ffad22622befd6850d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CalculateFixed.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *subBG = NULL;
cudaMalloc(&subBG, XSIZE*YSIZE);
const float *subT = NULL;
cudaMalloc(&subT, XSIZE*YSIZE);
const int *subM = NULL;
cudaMalloc(&subM, XSIZE*YSIZE);
float *fixed = NULL;
cudaMalloc(&fixed, XSIZE*YSIZE);
const int wb = 1;
const int hb = 1;
const int wt = 1;
const int ht = 1;
const int oy = 1;
const int ox = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CalculateFixed<<<gridBlock,threadBlock>>>(subBG,subT,subM,fixed,wb,hb,wt,ht,oy,ox);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CalculateFixed<<<gridBlock,threadBlock>>>(subBG,subT,subM,fixed,wb,hb,wt,ht,oy,ox);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CalculateFixed<<<gridBlock,threadBlock>>>(subBG,subT,subM,fixed,wb,hb,wt,ht,oy,ox);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b6e4a23d34de9b69785986c15436e3c147061cd0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2012 Susanne Kunis, Stefan Kunis
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*
* $Id: c_util.c 2012-05-31 11:36:00Z sukunis $
*/
/**
* @file c_util.cpp
* @brief Utilities for C
*/
#include <CUNFFT/cunfft_util.h>
void showCPUMemUse(cunfft_plan *p)
{
hipfftType type;
#ifdef CUNFFT_DOUBLE_PRECISION
type=HIPFFT_Z2Z;
#else
type=HIPFFT_C2C;
#endif
uint_t size_g=/*pow(2,p->d)**/sizeof(gpuComplex)*p->n_total;
uint_t size_f=1*sizeof(gpuComplex)*p->M_total;
uint_t size_f_hat=1*sizeof(gpuComplex)*p->N_total;
uint_t size_x=p->d*sizeof(dTyp)*p->M_total;
uint_t neededMem = size_g+size_f+size_f_hat+size_x+sizeof(type)*size_g*2;
printf("# CPU Mem min used : " PRINT_FORMAT " bytes (" PRINT_FORMAT " KB) (" PRINT_FORMAT " MB)\n", neededMem,inKB(neededMem),inMB(neededMem));
}
//------------------------------------------------------------------------------
// TIMER OUTPUT FUNCTION
//------------------------------------------------------------------------------
void showTimes(NFFTTimeSpec *times,int tfac)
{
#ifdef MEASURED_TIMES
showTime_t("\n\tKERNEL RollOf",times->time_ROC/tfac);
showTime_t("\tKERNEL FFT",times->time_FFT/tfac);
showTime_t("\tKERNEL Conv",times->time_CONV/tfac);
showTime_t("\tCOPY IN",times->time_COPY_IN/tfac);
showTime_t("\tCOPY OUT",times->time_COPY_OUT/tfac);
#endif
showTime_t("\n\tprocess took",times->runTime/tfac);
}
//------------------------------------------------------------------------------
// Memory OUTPUT FUNCTION
//------------------------------------------------------------------------------
void printStats(unsigned long free, unsigned long total)
{
printf("\tFree : %lu bytes (%lu KB) (%lu MB)\n", free,inKB(free),inMB(free));
printf("\tTotal: %lu bytes (%lu KB) (%lu MB)\n",total,inKB(total),inMB(total));
printf("\t%f%% free, %f%% used\n", 100.0*free/(double)total,
100.0*(total - free)/(double)total);
}
void printStatsToFile(unsigned long free, unsigned long total,FILE *file,int device)
{
fprintf(file,"Use Device: %d\t Free : %lu MB (%f%%)\t Total : %lu MB\n",
device,inMB(free),100.0*free/(double)total,inMB(total));
}
//------------------------------------------------------------------------------
// OUTPUT FUNCTIONS
//------------------------------------------------------------------------------
void showCoeff_double(const dTyp *x, int n, const char* text)
{
if(text != NULL){
printf("\n %s\n",text);
}
int k;
for (k = 0; k < n; k++){
if (k%4 == 0) printf("%6d.\t", k);
printf("%+.1lE,", x[k]);
if (k%4 == 3) printf("\n");
}
if (n%4 != 0) printf("\n");
printf("\n");
}
//------------------------------------------------------------------------------
// MATH HELPER FUNCTION
//------------------------------------------------------------------------------
/** Computes \f$\prod_{t=0}^{d-1} v_t\f$.*/
uint_t prod_int(uint_t *vec, int d)
{
int t;
uint_t prod=1;
for(t=0; t<d; t++){
prod *= vec[t];
}
return prod;
}
/** Computes \f$n\ge N\f$ such that \f$n=2^j,\, j\in\mathhb{N}_0\f$.*/
//TODO berechnung mit shift: siehe mathematisches Paper/link
uint_t next_power_of_2(uint_t N)
{
uint_t n,i,logn;
uint_t N_is_not_power_of_2=0;
if (N == 0){
return 1;
}else{
n=N;
logn=0;
while (n != 1){
if (n%2 == 1){
N_is_not_power_of_2=1;
}
n = n/2; //TODO n= n>>1
logn++;
}
if (!N_is_not_power_of_2){
logn--;
}
for (i = 0; i <= logn; i++){
n = n*2; //TODO n= n<<1
}
return n;
}
}
| b6e4a23d34de9b69785986c15436e3c147061cd0.cu | /*
* Copyright (c) 2012 Susanne Kunis, Stefan Kunis
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*
* $Id: c_util.c 2012-05-31 11:36:00Z sukunis $
*/
/**
* @file c_util.cpp
* @brief Utilities for C
*/
#include <CUNFFT/cunfft_util.h>
void showCPUMemUse(cunfft_plan *p)
{
cufftType type;
#ifdef CUNFFT_DOUBLE_PRECISION
type=CUFFT_Z2Z;
#else
type=CUFFT_C2C;
#endif
uint_t size_g=/*pow(2,p->d)**/sizeof(gpuComplex)*p->n_total;
uint_t size_f=1*sizeof(gpuComplex)*p->M_total;
uint_t size_f_hat=1*sizeof(gpuComplex)*p->N_total;
uint_t size_x=p->d*sizeof(dTyp)*p->M_total;
uint_t neededMem = size_g+size_f+size_f_hat+size_x+sizeof(type)*size_g*2;
printf("# CPU Mem min used : " PRINT_FORMAT " bytes (" PRINT_FORMAT " KB) (" PRINT_FORMAT " MB)\n", neededMem,inKB(neededMem),inMB(neededMem));
}
//------------------------------------------------------------------------------
// TIMER OUTPUT FUNCTION
//------------------------------------------------------------------------------
void showTimes(NFFTTimeSpec *times,int tfac)
{
#ifdef MEASURED_TIMES
showTime_t("\n\tKERNEL RollOf",times->time_ROC/tfac);
showTime_t("\tKERNEL FFT",times->time_FFT/tfac);
showTime_t("\tKERNEL Conv",times->time_CONV/tfac);
showTime_t("\tCOPY IN",times->time_COPY_IN/tfac);
showTime_t("\tCOPY OUT",times->time_COPY_OUT/tfac);
#endif
showTime_t("\n\tprocess took",times->runTime/tfac);
}
//------------------------------------------------------------------------------
// Memory OUTPUT FUNCTION
//------------------------------------------------------------------------------
void printStats(unsigned long free, unsigned long total)
{
printf("\tFree : %lu bytes (%lu KB) (%lu MB)\n", free,inKB(free),inMB(free));
printf("\tTotal: %lu bytes (%lu KB) (%lu MB)\n",total,inKB(total),inMB(total));
printf("\t%f%% free, %f%% used\n", 100.0*free/(double)total,
100.0*(total - free)/(double)total);
}
void printStatsToFile(unsigned long free, unsigned long total,FILE *file,int device)
{
fprintf(file,"Use Device: %d\t Free : %lu MB (%f%%)\t Total : %lu MB\n",
device,inMB(free),100.0*free/(double)total,inMB(total));
}
//------------------------------------------------------------------------------
// OUTPUT FUNCTIONS
//------------------------------------------------------------------------------
void showCoeff_double(const dTyp *x, int n, const char* text)
{
if(text != NULL){
printf("\n %s\n",text);
}
int k;
for (k = 0; k < n; k++){
if (k%4 == 0) printf("%6d.\t", k);
printf("%+.1lE,", x[k]);
if (k%4 == 3) printf("\n");
}
if (n%4 != 0) printf("\n");
printf("\n");
}
//------------------------------------------------------------------------------
// MATH HELPER FUNCTION
//------------------------------------------------------------------------------
/** Computes \f$\prod_{t=0}^{d-1} v_t\f$.*/
uint_t prod_int(uint_t *vec, int d)
{
int t;
uint_t prod=1;
for(t=0; t<d; t++){
prod *= vec[t];
}
return prod;
}
/** Computes \f$n\ge N\f$ such that \f$n=2^j,\, j\in\mathhb{N}_0\f$.*/
//TODO berechnung mit shift: siehe mathematisches Paper/link
uint_t next_power_of_2(uint_t N)
{
uint_t n,i,logn;
uint_t N_is_not_power_of_2=0;
if (N == 0){
return 1;
}else{
n=N;
logn=0;
while (n != 1){
if (n%2 == 1){
N_is_not_power_of_2=1;
}
n = n/2; //TODO n= n>>1
logn++;
}
if (!N_is_not_power_of_2){
logn--;
}
for (i = 0; i <= logn; i++){
n = n*2; //TODO n= n<<1
}
return n;
}
}
|
dbb11fe1c9aa0b6c065151b5a844bc8242d073f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( group_points_kernel), dim3(b), dim3(opt_block_config(npoints, c)), 0, stream,
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( group_points_grad_kernel), dim3(b), dim3(opt_block_config(npoints, c)), 0, stream,
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
| dbb11fe1c9aa0b6c065151b5a844bc8242d073f6.cu | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
|
818b5d1c5f936d236b831673be2e9afc11a74ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__
void mean(int *a,int *b)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
b[id] += a[id];
}
int main()
{
int a[100],b[100];
int i,sum=0;
int *dev_a,*dev_b;
for(i=0;i<100;i++)
{
a[i] = 1;
b[i] = 1;
}
printf("\n\t Printing Arrays : ");
printf("Array A");
for(i=0;i<100;i++)
{
printf("\n\t %d" ,a[i]);
}
printf("Array B");
for(i=0;i<100;i++)
{
printf("\n\t %d" ,b[i]);
}
hipMalloc(&dev_a,100*sizeof(int));
hipMalloc(&dev_b,100*sizeof(int));
hipMemcpy(dev_a,a,100*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,100*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mean), dim3(1),dim3(100), 0, 0, dev_a,dev_b);
hipMemcpy(&b,dev_b,100*sizeof(int),hipMemcpyDeviceToHost);
for(i=0;i<100;i++)
{
sum+=b[i];
}
printf("\n\tSum = %d",sum);
printf("\n\tMean = %d",sum/100);
} | 818b5d1c5f936d236b831673be2e9afc11a74ffb.cu | #include<stdio.h>
__global__
void mean(int *a,int *b)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
b[id] += a[id];
}
int main()
{
int a[100],b[100];
int i,sum=0;
int *dev_a,*dev_b;
for(i=0;i<100;i++)
{
a[i] = 1;
b[i] = 1;
}
printf("\n\t Printing Arrays : ");
printf("Array A");
for(i=0;i<100;i++)
{
printf("\n\t %d" ,a[i]);
}
printf("Array B");
for(i=0;i<100;i++)
{
printf("\n\t %d" ,b[i]);
}
cudaMalloc(&dev_a,100*sizeof(int));
cudaMalloc(&dev_b,100*sizeof(int));
cudaMemcpy(dev_a,a,100*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,100*sizeof(int),cudaMemcpyHostToDevice);
mean<<<1,100>>>(dev_a,dev_b);
cudaMemcpy(&b,dev_b,100*sizeof(int),cudaMemcpyDeviceToHost);
for(i=0;i<100;i++)
{
sum+=b[i];
}
printf("\n\tSum = %d",sum);
printf("\n\tMean = %d",sum/100);
} |
588c3065aa5b7fc4858c18acee3279eb563d5a6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***
Copyright (c) 2017 Patryk Orzechowski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
***/
#ifndef _CALCULATE_FITNESS_CU_
#define _CALCULATE_FITNESS_CU_
#include "evaluate_trends.cu"
template <typename T>
__global__ void calculate_fitness(int SHARED_MEM_SIZE,
const float EPSILON,
float MISSING_VALUE,
int *bicl_indices,
int size_indices,
int *compressed_biclusters,
int num_rows,
int num_cols,
T *data,
int *fitness_array) {
long long int index_x = blockIdx.x * blockDim.x + threadIdx.x; //block of row
long long int index_y = blockIdx.y * blockDim.y + threadIdx.y; //block of bicluster
extern __shared__ int memory[];
int *trendcheck=memory;
T *trendvalue = (T*)&trendcheck[SHARED_MEM_SIZE];
evaluate_trends(bicl_indices, compressed_biclusters, num_rows, num_cols, data, trendcheck, trendvalue, EPSILON, MISSING_VALUE);
if (trendcheck[threadIdx.x]<(bicl_indices[index_y+1]-bicl_indices[index_y])) {
trendcheck[threadIdx.x]=0;
}
else {
trendcheck[threadIdx.x]=1;
}
__syncthreads();
for(int offset = blockDim.x/2; offset > 0;offset >>= 1) {
if(threadIdx.x < offset && index_x<num_rows) {
trendcheck[threadIdx.x] += trendcheck[threadIdx.x+offset];
}
__syncthreads();
}
if (threadIdx.x==0 && index_x<num_rows) {
fitness_array[blockIdx.x*size_indices+index_y]=trendcheck[0];
}
}
#endif | 588c3065aa5b7fc4858c18acee3279eb563d5a6a.cu | /***
Copyright (c) 2017 Patryk Orzechowski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
***/
#ifndef _CALCULATE_FITNESS_CU_
#define _CALCULATE_FITNESS_CU_
#include "evaluate_trends.cu"
template <typename T>
__global__ void calculate_fitness(int SHARED_MEM_SIZE,
const float EPSILON,
float MISSING_VALUE,
int *bicl_indices,
int size_indices,
int *compressed_biclusters,
int num_rows,
int num_cols,
T *data,
int *fitness_array) {
long long int index_x = blockIdx.x * blockDim.x + threadIdx.x; //block of row
long long int index_y = blockIdx.y * blockDim.y + threadIdx.y; //block of bicluster
extern __shared__ int memory[];
int *trendcheck=memory;
T *trendvalue = (T*)&trendcheck[SHARED_MEM_SIZE];
evaluate_trends(bicl_indices, compressed_biclusters, num_rows, num_cols, data, trendcheck, trendvalue, EPSILON, MISSING_VALUE);
if (trendcheck[threadIdx.x]<(bicl_indices[index_y+1]-bicl_indices[index_y])) {
trendcheck[threadIdx.x]=0;
}
else {
trendcheck[threadIdx.x]=1;
}
__syncthreads();
for(int offset = blockDim.x/2; offset > 0;offset >>= 1) {
if(threadIdx.x < offset && index_x<num_rows) {
trendcheck[threadIdx.x] += trendcheck[threadIdx.x+offset];
}
__syncthreads();
}
if (threadIdx.x==0 && index_x<num_rows) {
fitness_array[blockIdx.x*size_indices+index_y]=trendcheck[0];
}
}
#endif |
a7356b76de9b6fbc1ca6aaf3cc7309ba517bb521.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zdiinertia.cu, normal z -> c, Thu Oct 8 23:05:31 2020
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
cdiinertia_kernel(int n, magmaFloatComplex_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
float diag = MAGMA_C_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_cdiinertia computes the inertia of a real diagonal matrix.
If matrix entries are complex, magmablas_cdiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is complex, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_cdiinertia(
magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
hipMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
hipLaunchKernelGGL(( cdiinertia_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dneig);
return info;
}
// end magmablas_cdiinertia
| a7356b76de9b6fbc1ca6aaf3cc7309ba517bb521.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zdiinertia.cu, normal z -> c, Thu Oct 8 23:05:31 2020
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
cdiinertia_kernel(int n, magmaFloatComplex_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
float diag = MAGMA_C_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_cdiinertia computes the inertia of a real diagonal matrix.
If matrix entries are complex, magmablas_cdiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is complex, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_cdiinertia(
magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
cudaMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
cdiinertia_kernel<<<grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dneig);
return info;
}
// end magmablas_cdiinertia
|
99a9ce42109e5d15c1b9f0fc18eef94be665a2ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <limits>
#include <utility>
#include "dali/core/convert.h"
#include "dali/core/span.h"
#include "dali/operators/generic/lookup_table.h"
namespace dali {
namespace detail {
template <typename OutputType, typename InputType>
__global__ void LookupValuesImpl(const LutSampleDesc *samples, const kernels::BlockDesc<1> *blocks,
const OutputType *lookup_table, const OutputType default_value) {
const auto &block = blocks[blockIdx.x];
const auto &sample = samples[block.sample_idx];
auto *output = reinterpret_cast<OutputType *>(sample.output);
const auto *input = reinterpret_cast<const InputType *>(sample.input);
for (int64_t x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) {
DoLookup<GPUBackend>(output[x], input[x], lookup_table, default_value);
}
}
} // namespace detail
template<>
void LookupTable<GPUBackend>::RunImpl(Workspace &ws) {
const auto &input = ws.Input<GPUBackend>(0);
const auto &shape = input.shape();
auto &output = ws.Output<GPUBackend>(0);
output.SetLayout(input.GetLayout());
const auto stream = ws.stream();
auto num_samples = shape.num_samples();
samples_.resize(num_samples);
for (int sample_id = 0; sample_id < num_samples; sample_id++) {
samples_[sample_id].output = output.raw_mutable_tensor(sample_id);
samples_[sample_id].input = input.raw_tensor(sample_id);
}
samples_dev_.from_host(samples_, stream);
auto collapsed_shape = collapse_dims<1>(shape, {std::make_pair(0, shape.sample_dim())});
block_setup_.SetupBlocks(collapsed_shape, true);
blocks_dev_.from_host(block_setup_.Blocks(), stream);
TYPE_SWITCH(input.type(), dali::type2id, InputType, LUT_IN_TYPES, (
TYPE_SWITCH(output_type_, dali::type2id, OutputType, LUT_OUT_TYPES, (
const OutputType *lookup_table = lut_.data<OutputType>();
OutputType default_value = ConvertSat<OutputType>(default_value_f_);
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
hipLaunchKernelGGL(( detail::LookupValuesImpl<OutputType, InputType>), dim3(grid_dim), dim3(block_dim), 0, stream,
samples_dev_.data(), blocks_dev_.data(), lookup_table, default_value);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT
}
DALI_REGISTER_OPERATOR(LookupTable, LookupTable<GPUBackend>, GPU);
} // namespace dali
| 99a9ce42109e5d15c1b9f0fc18eef94be665a2ed.cu | // Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <limits>
#include <utility>
#include "dali/core/convert.h"
#include "dali/core/span.h"
#include "dali/operators/generic/lookup_table.h"
namespace dali {
namespace detail {
template <typename OutputType, typename InputType>
__global__ void LookupValuesImpl(const LutSampleDesc *samples, const kernels::BlockDesc<1> *blocks,
const OutputType *lookup_table, const OutputType default_value) {
const auto &block = blocks[blockIdx.x];
const auto &sample = samples[block.sample_idx];
auto *output = reinterpret_cast<OutputType *>(sample.output);
const auto *input = reinterpret_cast<const InputType *>(sample.input);
for (int64_t x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) {
DoLookup<GPUBackend>(output[x], input[x], lookup_table, default_value);
}
}
} // namespace detail
template<>
void LookupTable<GPUBackend>::RunImpl(Workspace &ws) {
const auto &input = ws.Input<GPUBackend>(0);
const auto &shape = input.shape();
auto &output = ws.Output<GPUBackend>(0);
output.SetLayout(input.GetLayout());
const auto stream = ws.stream();
auto num_samples = shape.num_samples();
samples_.resize(num_samples);
for (int sample_id = 0; sample_id < num_samples; sample_id++) {
samples_[sample_id].output = output.raw_mutable_tensor(sample_id);
samples_[sample_id].input = input.raw_tensor(sample_id);
}
samples_dev_.from_host(samples_, stream);
auto collapsed_shape = collapse_dims<1>(shape, {std::make_pair(0, shape.sample_dim())});
block_setup_.SetupBlocks(collapsed_shape, true);
blocks_dev_.from_host(block_setup_.Blocks(), stream);
TYPE_SWITCH(input.type(), dali::type2id, InputType, LUT_IN_TYPES, (
TYPE_SWITCH(output_type_, dali::type2id, OutputType, LUT_OUT_TYPES, (
const OutputType *lookup_table = lut_.data<OutputType>();
OutputType default_value = ConvertSat<OutputType>(default_value_f_);
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
detail::LookupValuesImpl<OutputType, InputType><<<grid_dim, block_dim, 0, stream>>>(
samples_dev_.data(), blocks_dev_.data(), lookup_table, default_value);
), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT
), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT
}
DALI_REGISTER_OPERATOR(LookupTable, LookupTable<GPUBackend>, GPU);
} // namespace dali
|
abbefb4f71732a1fe9cb941a7bf23a719ae1be26.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nn.cu
* Nearest Neighbor
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <float.h>
#include <vector>
#include "hip/hip_runtime.h"
#include "../../common/timing.h"
#define min( a, b ) a > b ? b : a
#define ceilDiv( a, b ) ( a + b - 1 ) / b
#define print( x ) printf( #x ": %lu\n", (unsigned long) x )
#define DEBUG false
#define DEFAULT_THREADS_PER_BLOCK 256
#define MAX_ARGS 10
#define REC_LENGTH 53 // size of a record in db
#define LATITUDE_POS 28 // character position of the latitude value in each record
#define OPEN 10000 // initial value of nearest neighbors
typedef struct latLong
{
float lat;
float lng;
} LatLong;
typedef struct record
{
char recString[REC_LENGTH];
float distance;
} Record;
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations);
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN);
void printUsage();
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d);
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
//int globalId = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x; // more efficient
LatLong *latLong = d_locations+globalId;
if (globalId < numRecords) {
float *dist=d_distances+globalId;
*dist = (float)sqrt((lat-latLong->lat)*(lat-latLong->lat)+(lng-latLong->lng)*(lng-latLong->lng));
}
}
/**
* This program finds the k-nearest neighbors
**/
int main(int argc, char* argv[])
{
int i=0;
float lat, lng;
int quiet=0,timing=0,platform=0,device=0;
std::vector<Record> records;
std::vector<LatLong> locations;
char filename[100];
int resultsCount=10;
// parse command line
if (parseCommandline(argc, argv, filename,&resultsCount,&lat,&lng,
&quiet, &timing, &platform, &device)) {
printUsage();
return 0;
}
int numRecords = loadData(filename,records,locations);
if (resultsCount > numRecords) resultsCount = numRecords;
//for(i=0;i<numRecords;i++)
// printf("%s, %f, %f\n",(records[i].recString),locations[i].lat,locations[i].lng);
hipSetDevice(1);
//Pointers to host memory
float *distances;
//Pointers to device memory
LatLong *d_locations;
float *d_distances;
// Scaling calculations - added by Sam Kauffman
hipDeviceProp_t deviceProp;
hipGetDeviceProperties( &deviceProp, 0 );
hipDeviceSynchronize();
unsigned long maxGridX = deviceProp.maxGridSize[0];
unsigned long threadsPerBlock = min( deviceProp.maxThreadsPerBlock, DEFAULT_THREADS_PER_BLOCK );
size_t totalDeviceMemory;
size_t freeDeviceMemory;
hipMemGetInfo( &freeDeviceMemory, &totalDeviceMemory );
hipDeviceSynchronize();
unsigned long usableDeviceMemory = freeDeviceMemory * 85 / 100; // 85% arbitrary throttle to compensate for known CUDA bug
unsigned long maxThreads = usableDeviceMemory / 12; // 4 bytes in 3 vectors per thread
if ( numRecords > maxThreads )
{
fprintf( stderr, "Error: Input too large.\n" );
exit( 1 );
}
unsigned long blocks = ceilDiv( numRecords, threadsPerBlock ); // extra threads will do nothing
unsigned long gridY = ceilDiv( blocks, maxGridX );
unsigned long gridX = ceilDiv( blocks, gridY );
// There will be no more than (gridY - 1) extra blocks
dim3 gridDim( gridX, gridY );
if ( DEBUG )
{
print( totalDeviceMemory ); // 804454400
print( freeDeviceMemory );
print( usableDeviceMemory );
print( maxGridX ); // 65535
print( deviceProp.maxThreadsPerBlock ); // 1024
print( threadsPerBlock );
print( maxThreads );
print( blocks ); // 130933
print( gridY );
print( gridX );
}
/**
* Allocate memory on host and device
*/
distances = (float *)malloc(sizeof(float) * numRecords);
hipMalloc((void **) &d_locations,sizeof(LatLong) * numRecords);
hipMalloc((void **) &d_distances,sizeof(float) * numRecords);
/**
* Transfer data from host to device
*/
hipMemcpy( d_locations, &locations[0], sizeof(LatLong) * numRecords, hipMemcpyHostToDevice);
/**
* Execute kernel
*/
FILE *fresult = fopen("result_gpu.txt","a+");
double s_time=gettime_ms();
hipLaunchKernelGGL(( euclid), dim3(gridDim), dim3(threadsPerBlock) , 0, 0, d_locations,d_distances,numRecords,lat,lng);
hipDeviceSynchronize();
double e_time=gettime_ms();
//fprintf(fresult,"%-10d, %-18s, %-20lf\n", affinity,filename, e_time-s_time);
fprintf(fresult,"%-18s, %-20lf\n",filename, e_time-s_time);
printf("\nExecution time:%lf.\n", e_time-s_time);
fclose(fresult);
//Copy data from device memory to host memory
hipMemcpy( distances, d_distances, sizeof(float)*numRecords, hipMemcpyDeviceToHost );
// find the resultsCount least distances
findLowest(records,distances,numRecords,resultsCount);
// print out results
if (!quiet)
for(i=0;i<resultsCount;i++) {
printf("%s --> Distance=%f\n",records[i].recString,records[i].distance);
}
free(distances);
//Free memory
hipFree(d_locations);
hipFree(d_distances);
}
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations){
FILE *flist,*fp;
int i=0;
char dbname[64];
int recNum=0;
/**Main processing **/
flist = fopen(filename, "r");
while(!feof(flist)) {
/**
* Read in all records of length REC_LENGTH
* If this is the last file in the filelist, then done
* else open next file to be read next iteration
*/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
fp = fopen(dbname, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
// read each record
while(!feof(fp)){
Record record;
LatLong latLong;
fgets(record.recString,49,fp);
fgetc(fp); // newline
if (feof(fp)) break;
// parse for lat and long
char substr[6];
for(i=0;i<5;i++) substr[i] = *(record.recString+i+28);
substr[5] = '\0';
latLong.lat = atof(substr);
for(i=0;i<5;i++) substr[i] = *(record.recString+i+33);
substr[5] = '\0';
latLong.lng = atof(substr);
locations.push_back(latLong);
records.push_back(record);
recNum++;
}
fclose(fp);
}
fclose(flist);
// for(i=0;i<rec_count*REC_LENGTH;i++) printf("%c",sandbox[i]);
return recNum;
}
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN){
int i,j;
float val;
int minLoc;
Record *tempRec;
float tempDist;
for(i=0;i<topN;i++) {
minLoc = i;
for(j=i;j<numRecords;j++) {
val = distances[j];
if (val < distances[minLoc]) minLoc = j;
}
// swap locations and distances
tempRec = &records[i];
records[i] = records[minLoc];
records[minLoc] = *tempRec;
tempDist = distances[i];
distances[i] = distances[minLoc];
distances[minLoc] = tempDist;
// add distance to the min we just found
records[i].distance = distances[i];
}
}
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d){
int i;
if (argc < 2) return 1; // error
strncpy(filename,argv[1],100);
char flag;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 'r': // number of results
i++;
*r = atoi(argv[i]);
break;
case 'l': // lat or lng
if (argv[i][2]=='a') {//lat
*lat = atof(argv[i+1]);
}
else {//lng
*lng = atof(argv[i+1]);
}
i++;
break;
case 'h': // help
return 1;
case 'q': // quiet
*q = 1;
break;
case 't': // timing
*t = 1;
break;
case 'p': // platform
i++;
*p = atoi(argv[i]);
break;
case 'd': // device
i++;
*d = atoi(argv[i]);
break;
}
}
}
if ((*d >= 0 && *p<0) || (*p>=0 && *d<0)) // both p and d must be specified if either are specified
return 1;
return 0;
}
void printUsage(){
printf("Nearest Neighbor Usage\n");
printf("\n");
printf("nearestNeighbor [filename] -r [int] -lat [float] -lng [float] [-hqt] [-p [int] -d [int]]\n");
printf("\n");
printf("example:\n");
printf("$ ./nearestNeighbor filelist.txt -r 5 -lat 30 -lng 90\n");
printf("\n");
printf("filename the filename that lists the data input files\n");
printf("-r [int] the number of records to return (default: 10)\n");
printf("-lat [float] the latitude for nearest neighbors (default: 0)\n");
printf("-lng [float] the longitude for nearest neighbors (default: 0)\n");
printf("\n");
printf("-h, --help Display the help file\n");
printf("-q Quiet mode. Suppress all text output.\n");
printf("-t Print timing information.\n");
printf("\n");
printf("-p [int] Choose the platform (must choose both platform and device)\n");
printf("-d [int] Choose the device (must choose both platform and device)\n");
printf("\n");
printf("\n");
printf("Notes: 1. The filename is required as the first parameter.\n");
printf(" 2. If you declare either the device or the platform,\n");
printf(" you must declare both.\n\n");
}
| abbefb4f71732a1fe9cb941a7bf23a719ae1be26.cu | /*
* nn.cu
* Nearest Neighbor
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <float.h>
#include <vector>
#include "cuda.h"
#include "../../common/timing.h"
#define min( a, b ) a > b ? b : a
#define ceilDiv( a, b ) ( a + b - 1 ) / b
#define print( x ) printf( #x ": %lu\n", (unsigned long) x )
#define DEBUG false
#define DEFAULT_THREADS_PER_BLOCK 256
#define MAX_ARGS 10
#define REC_LENGTH 53 // size of a record in db
#define LATITUDE_POS 28 // character position of the latitude value in each record
#define OPEN 10000 // initial value of nearest neighbors
typedef struct latLong
{
float lat;
float lng;
} LatLong;
typedef struct record
{
char recString[REC_LENGTH];
float distance;
} Record;
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations);
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN);
void printUsage();
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d);
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
//int globalId = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x; // more efficient
LatLong *latLong = d_locations+globalId;
if (globalId < numRecords) {
float *dist=d_distances+globalId;
*dist = (float)sqrt((lat-latLong->lat)*(lat-latLong->lat)+(lng-latLong->lng)*(lng-latLong->lng));
}
}
/**
* This program finds the k-nearest neighbors
**/
int main(int argc, char* argv[])
{
int i=0;
float lat, lng;
int quiet=0,timing=0,platform=0,device=0;
std::vector<Record> records;
std::vector<LatLong> locations;
char filename[100];
int resultsCount=10;
// parse command line
if (parseCommandline(argc, argv, filename,&resultsCount,&lat,&lng,
&quiet, &timing, &platform, &device)) {
printUsage();
return 0;
}
int numRecords = loadData(filename,records,locations);
if (resultsCount > numRecords) resultsCount = numRecords;
//for(i=0;i<numRecords;i++)
// printf("%s, %f, %f\n",(records[i].recString),locations[i].lat,locations[i].lng);
cudaSetDevice(1);
//Pointers to host memory
float *distances;
//Pointers to device memory
LatLong *d_locations;
float *d_distances;
// Scaling calculations - added by Sam Kauffman
cudaDeviceProp deviceProp;
cudaGetDeviceProperties( &deviceProp, 0 );
cudaThreadSynchronize();
unsigned long maxGridX = deviceProp.maxGridSize[0];
unsigned long threadsPerBlock = min( deviceProp.maxThreadsPerBlock, DEFAULT_THREADS_PER_BLOCK );
size_t totalDeviceMemory;
size_t freeDeviceMemory;
cudaMemGetInfo( &freeDeviceMemory, &totalDeviceMemory );
cudaThreadSynchronize();
unsigned long usableDeviceMemory = freeDeviceMemory * 85 / 100; // 85% arbitrary throttle to compensate for known CUDA bug
unsigned long maxThreads = usableDeviceMemory / 12; // 4 bytes in 3 vectors per thread
if ( numRecords > maxThreads )
{
fprintf( stderr, "Error: Input too large.\n" );
exit( 1 );
}
unsigned long blocks = ceilDiv( numRecords, threadsPerBlock ); // extra threads will do nothing
unsigned long gridY = ceilDiv( blocks, maxGridX );
unsigned long gridX = ceilDiv( blocks, gridY );
// There will be no more than (gridY - 1) extra blocks
dim3 gridDim( gridX, gridY );
if ( DEBUG )
{
print( totalDeviceMemory ); // 804454400
print( freeDeviceMemory );
print( usableDeviceMemory );
print( maxGridX ); // 65535
print( deviceProp.maxThreadsPerBlock ); // 1024
print( threadsPerBlock );
print( maxThreads );
print( blocks ); // 130933
print( gridY );
print( gridX );
}
/**
* Allocate memory on host and device
*/
distances = (float *)malloc(sizeof(float) * numRecords);
cudaMalloc((void **) &d_locations,sizeof(LatLong) * numRecords);
cudaMalloc((void **) &d_distances,sizeof(float) * numRecords);
/**
* Transfer data from host to device
*/
cudaMemcpy( d_locations, &locations[0], sizeof(LatLong) * numRecords, cudaMemcpyHostToDevice);
/**
* Execute kernel
*/
FILE *fresult = fopen("result_gpu.txt","a+");
double s_time=gettime_ms();
euclid<<< gridDim, threadsPerBlock >>>(d_locations,d_distances,numRecords,lat,lng);
cudaDeviceSynchronize();
double e_time=gettime_ms();
//fprintf(fresult,"%-10d, %-18s, %-20lf\n", affinity,filename, e_time-s_time);
fprintf(fresult,"%-18s, %-20lf\n",filename, e_time-s_time);
printf("\nExecution time:%lf.\n", e_time-s_time);
fclose(fresult);
//Copy data from device memory to host memory
cudaMemcpy( distances, d_distances, sizeof(float)*numRecords, cudaMemcpyDeviceToHost );
// find the resultsCount least distances
findLowest(records,distances,numRecords,resultsCount);
// print out results
if (!quiet)
for(i=0;i<resultsCount;i++) {
printf("%s --> Distance=%f\n",records[i].recString,records[i].distance);
}
free(distances);
//Free memory
cudaFree(d_locations);
cudaFree(d_distances);
}
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations){
FILE *flist,*fp;
int i=0;
char dbname[64];
int recNum=0;
/**Main processing **/
flist = fopen(filename, "r");
while(!feof(flist)) {
/**
* Read in all records of length REC_LENGTH
* If this is the last file in the filelist, then done
* else open next file to be read next iteration
*/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
fp = fopen(dbname, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
// read each record
while(!feof(fp)){
Record record;
LatLong latLong;
fgets(record.recString,49,fp);
fgetc(fp); // newline
if (feof(fp)) break;
// parse for lat and long
char substr[6];
for(i=0;i<5;i++) substr[i] = *(record.recString+i+28);
substr[5] = '\0';
latLong.lat = atof(substr);
for(i=0;i<5;i++) substr[i] = *(record.recString+i+33);
substr[5] = '\0';
latLong.lng = atof(substr);
locations.push_back(latLong);
records.push_back(record);
recNum++;
}
fclose(fp);
}
fclose(flist);
// for(i=0;i<rec_count*REC_LENGTH;i++) printf("%c",sandbox[i]);
return recNum;
}
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN){
int i,j;
float val;
int minLoc;
Record *tempRec;
float tempDist;
for(i=0;i<topN;i++) {
minLoc = i;
for(j=i;j<numRecords;j++) {
val = distances[j];
if (val < distances[minLoc]) minLoc = j;
}
// swap locations and distances
tempRec = &records[i];
records[i] = records[minLoc];
records[minLoc] = *tempRec;
tempDist = distances[i];
distances[i] = distances[minLoc];
distances[minLoc] = tempDist;
// add distance to the min we just found
records[i].distance = distances[i];
}
}
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d){
int i;
if (argc < 2) return 1; // error
strncpy(filename,argv[1],100);
char flag;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 'r': // number of results
i++;
*r = atoi(argv[i]);
break;
case 'l': // lat or lng
if (argv[i][2]=='a') {//lat
*lat = atof(argv[i+1]);
}
else {//lng
*lng = atof(argv[i+1]);
}
i++;
break;
case 'h': // help
return 1;
case 'q': // quiet
*q = 1;
break;
case 't': // timing
*t = 1;
break;
case 'p': // platform
i++;
*p = atoi(argv[i]);
break;
case 'd': // device
i++;
*d = atoi(argv[i]);
break;
}
}
}
if ((*d >= 0 && *p<0) || (*p>=0 && *d<0)) // both p and d must be specified if either are specified
return 1;
return 0;
}
void printUsage(){
printf("Nearest Neighbor Usage\n");
printf("\n");
printf("nearestNeighbor [filename] -r [int] -lat [float] -lng [float] [-hqt] [-p [int] -d [int]]\n");
printf("\n");
printf("example:\n");
printf("$ ./nearestNeighbor filelist.txt -r 5 -lat 30 -lng 90\n");
printf("\n");
printf("filename the filename that lists the data input files\n");
printf("-r [int] the number of records to return (default: 10)\n");
printf("-lat [float] the latitude for nearest neighbors (default: 0)\n");
printf("-lng [float] the longitude for nearest neighbors (default: 0)\n");
printf("\n");
printf("-h, --help Display the help file\n");
printf("-q Quiet mode. Suppress all text output.\n");
printf("-t Print timing information.\n");
printf("\n");
printf("-p [int] Choose the platform (must choose both platform and device)\n");
printf("-d [int] Choose the device (must choose both platform and device)\n");
printf("\n");
printf("\n");
printf("Notes: 1. The filename is required as the first parameter.\n");
printf(" 2. If you declare either the device or the platform,\n");
printf(" you must declare both.\n\n");
}
|
8b87979dd8c6d1d9479d0638d6b2d07d6ac948f9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gGather.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *denseData = NULL;
hipMalloc(&denseData, XSIZE*YSIZE);
float *sparseData = NULL;
hipMalloc(&sparseData, XSIZE*YSIZE);
int *sparseIndices = NULL;
hipMalloc(&sparseIndices, XSIZE*YSIZE);
int denseSize = XSIZE*YSIZE;
int sparseSize = XSIZE*YSIZE;
int offset = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gGather), dim3(gridBlock),dim3(threadBlock), 0, 0, denseData,sparseData,sparseIndices,denseSize,sparseSize,offset);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gGather), dim3(gridBlock),dim3(threadBlock), 0, 0, denseData,sparseData,sparseIndices,denseSize,sparseSize,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gGather), dim3(gridBlock),dim3(threadBlock), 0, 0, denseData,sparseData,sparseIndices,denseSize,sparseSize,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8b87979dd8c6d1d9479d0638d6b2d07d6ac948f9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gGather.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *denseData = NULL;
cudaMalloc(&denseData, XSIZE*YSIZE);
float *sparseData = NULL;
cudaMalloc(&sparseData, XSIZE*YSIZE);
int *sparseIndices = NULL;
cudaMalloc(&sparseIndices, XSIZE*YSIZE);
int denseSize = XSIZE*YSIZE;
int sparseSize = XSIZE*YSIZE;
int offset = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gGather<<<gridBlock,threadBlock>>>(denseData,sparseData,sparseIndices,denseSize,sparseSize,offset);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gGather<<<gridBlock,threadBlock>>>(denseData,sparseData,sparseIndices,denseSize,sparseSize,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gGather<<<gridBlock,threadBlock>>>(denseData,sparseData,sparseIndices,denseSize,sparseSize,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8c48d427d1f0925e87e38e84196a27d13e4e85c5.hip | // !!! This is a file automatically generated by hipify!!!
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/**
Copyright 2011 Carnegie Mellon University
Authors: Iulian Moraru and David G. Andersen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This is the implementation of a feed-forward Bloom filter for GPGPU.
*/
#define _DARWIN_FEATURE_64_BIT_INODE 1
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/mman.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "math_functions.h"
#include "sbox.h"
using namespace std;
extern "C" {
int getfile(char *infile, size_t *filesize);
#include "timing.h"
}
//#define BLOOMBITS 1048576 /* 1 millllion bits */
#define BLOOMBITS 0x10000000 /* 32 MB */
#define BLOOMMASK (BLOOMBITS - 1)
#define BLOCK_SIZE 256
#define HASH_LEN 19
#define FILE_MAX 6710886400
#define NR_STREAMS 10
char *pinnedBuf;
struct countmap {
unsigned int hval;
unsigned int charloc;
};
texture<unsigned char, 1, hipReadModeElementType> tex_bloom;
bool is_bit_set(int i, unsigned int *bv) {
unsigned int word = bv[i >> 5];
unsigned int bitMask = 1 << (i & 31);
return (word & bitMask);
}
__device__ bool texbf_is_bit_set(int i) {
unsigned char word = tex1Dfetch(tex_bloom, i/8);
unsigned int bitMask = 1 << (i % 8);
return (word & bitMask);
}
__device__ bool device_is_bit_set(int i, unsigned int *bv) {
unsigned int word = bv[i >> 5];
unsigned int bitMask = 1 << (i & 31);
return (word & bitMask);
}
__device__ void device_set_bit(int i, unsigned int *bv) {
unsigned int bitMask = 1 << (i & 31);
atomicOr(&bv[i >> 5], bitMask);
}
inline __device__ unsigned int rol32(unsigned int word, int shift)
{
return (word << shift) | (word >> (32 - shift));
}
__global__ void grepSetup(unsigned char *d_a,
unsigned int *d_b,
unsigned int starting_offset)
{
/* SPEED: Copy into local memory coalescing and then do this
* all locally. */
int i = starting_offset + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
int char_offset = i * (HASH_LEN + 1); /* Skip \n */
unsigned int hval = 0, hval2 = 0;
for (int j = 0; j < HASH_LEN; j++) {
hval = rol32(hval, 1);
hval2 = rol32(hval2, 3);
unsigned int sbv = sbox[d_a[char_offset + j]];
hval ^= sbv;
hval2 ^= sbv;
}
device_set_bit(hval & BLOOMMASK, d_b);
device_set_bit(hval2 & BLOOMMASK, d_b);
unsigned int hval3 = hval + hval2;
device_set_bit(hval3 & BLOOMMASK, d_b);
unsigned int hval4 = hval + 5 * hval2;
device_set_bit(hval4 & BLOOMMASK, d_b);
// unsigned int hval5 = (hval << 16) | (hval2 >> 16);
// device_set_bit(hval5 & BLOOMMASK, d_b);
}
__global__ void GrepKernel(unsigned char *d_a,
unsigned int *blooms,
unsigned int *dev_reverse_bloom,
unsigned int *dev_positions_matched,
unsigned int char_offset,
unsigned int n_chars)
{
__shared__ unsigned boxed[BLOCK_SIZE + HASH_LEN];
int i = char_offset + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
/* SPeed: This part takes .06 seconds. Without boxing and
* without cleanup, it takes .04. Without cleanup, .05. */
/* Step 1: Bring the base chars in to local memory,
* sboxing them on the way in. SPEED: This is faster or equiv to
* doing 32 bit reads into a register and then shifting out the
* chars. */
/* TIME: 0.03 seconds */
boxed[threadIdx.x] = sbox[d_a[i]];
/* Ugly, but let some threads pull in the remainder */
/* TIME: 0.01 seconds */
int otid = threadIdx.x;
if (otid < HASH_LEN) {
int new_i = blockDim.x + i;
int new_b = blockDim.x + otid;
boxed[new_b] = sbox[d_a[new_i]];
}
/* TIME: Almost none. */
__syncthreads();
unsigned int hval = 0, hval2 = 0;
/* Step 2: Compute the hash of the next HASH_LEN characters */
for (int j = 0; j < HASH_LEN; j++) {
hval = rol32(hval, 1);
hval2 = rol32(hval2, 3);
unsigned int sbv = boxed[threadIdx.x+j];
hval ^= sbv;
hval2 ^= sbv;
}
/* Other idea: Steal from the blocked bloom filter idea to do two
* bit lookups in a single bus transaction. */
/* Attempt X: Loop over the bit vector, load into local memory,
* do a subset of the tests. */
/* Idea: Have 4 threads process each character position.
* And have them only do the bit lookup if hash >> [all but 2 bits]== <index>
* in some way -- thus forcing locality. Trading bandwidth, but hey,
* we've got bandwidth.
* To really do this right, we might want to optimize the
* hash computation further so that we don't use too much
* global bandwidth copying the post-hash results out.
* XXX - probably not too helpful; tried using lowest of 4
* hash functions to improve texture locality, little benefit. Maybe
* could combine. */
/* SPEED: This step takes 0.22 of 0.27 seconds */
/* searchbig: 0.31 out of 0.37 */
/* 3 version 1: Do them into global memory and let threads diverge... */
/* Unrolling and doing a no-branch, dual fetch is slower. */
/* Hm. With more hash functions, might be able to use the sorted
* hash trick to improve locality at the cost of a bit more
* computation. Can we bubble sort 5 hash functions rapidly? Does
* that give us a cache advantage with texture memory? */
unsigned int h1 = hval & BLOOMMASK;
unsigned int h2 = hval2 & BLOOMMASK;
unsigned int h3 = (hval + hval2) & BLOOMMASK;
unsigned int h4 = (hval + 5 * hval2) & BLOOMMASK;
// unsigned int h5 = ((hval << 16) | (hval2 >> 16)) & BLOOMMASK;
/* This doesn't help with two hash functions */
/* Kernel time: 0.38 with, 0.37 without */
unsigned int w1 = h1 >> 3;
unsigned char bit1 = 1 << (h1 & 7);
unsigned int w2 = h2 >> 3;
unsigned char bit2 = 1 << (h2 & 7);
unsigned int w3 = h3 >> 3;
unsigned char bit3 = 1 << (h3 & 7);
unsigned int w4 = h4 >> 3;
unsigned char bit4 = 1 << (h4 & 7);
// unsigned int w5 = h5 >> 3;
// unsigned char bit5 = 1 << (h5 & 7);
unsigned char t1 = tex1Dfetch(tex_bloom, w1); /* SPEED: Slowest part */
if (t1 & bit1) {
unsigned char t2 = tex1Dfetch(tex_bloom, w2);
if (t2 & bit2) {
unsigned char t3 = tex1Dfetch(tex_bloom, w3);
if (t3 & bit3) {
unsigned char t4 = tex1Dfetch(tex_bloom, w4);
if (t4 & bit4) {
// unsigned char t5 = tex1Dfetch(tex_bloom, w5);
// if (t5 & bit5) {
unsigned int hh5 = (hval + 7 * hval2) & BLOOMMASK;
unsigned int h6 = (hval + 3 * hval2) & BLOOMMASK;
unsigned int h7 = ((hval << 1) + hval2) & BLOOMMASK;
unsigned int h8 = ((hval << 2) + hval2) & BLOOMMASK;
// unsigned int h10 = (hval * 11 + hval2) & BLOOMMASK;
device_set_bit(hh5, dev_reverse_bloom);
device_set_bit(h6, dev_reverse_bloom);
device_set_bit(h7, dev_reverse_bloom);
device_set_bit(h8, dev_reverse_bloom);
// device_set_bit(h10, dev_reverse_bloom);
/* If we hit, annotate in a bit vector */
/* SPEED: If we start doing a lot of matches, do this in local
* memory and flush all 64 bytes out to main memory.
* Not needed yet. */
device_set_bit(i, dev_positions_matched);
}}
// }
}
}
}
__global__ void filterPatterns(unsigned char *d_a,
unsigned int *d_b,
unsigned int *dev_patterns_matched,
unsigned int starting_offset)
{
/* SPEED: Copy into local memory coalescing and then do this
* all locally. */
int i = starting_offset + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
int char_offset = i * (HASH_LEN + 1); /* Skip \n */
unsigned int hval = 0, hval2 = 0;
for (int j = 0; j < HASH_LEN; j++) {
hval = rol32(hval, 1);
hval2 = rol32(hval2, 3);
unsigned int sbv = sbox[d_a[char_offset + j]];
hval ^= sbv;
hval2 ^= sbv;
}
unsigned int h5 = (hval + 7 * hval2) & BLOOMMASK;
unsigned int h6 = (hval + 3 * hval2) & BLOOMMASK;
unsigned int h7 = ((hval << 1) + hval2) & BLOOMMASK;
unsigned int h8 = ((hval << 2) + hval2) & BLOOMMASK;
// unsigned int h10 = (hval * 11 + hval2) & BLOOMMASK;
if (device_is_bit_set(h5, d_b)) {
if (device_is_bit_set(h6, d_b)) {
if (device_is_bit_set(h7, d_b)) {
if (device_is_bit_set(h8, d_b)) {
// if (device_is_bit_set(h10, d_b)) {
device_set_bit(i, dev_patterns_matched);
// }
}
}
}
}
}
void checkReportCudaStatus(const char *name) {
hipError_t err = hipGetLastError();
printf("CudaStatus %s: ", name);
if (err) printf("Error: %s\n", hipGetErrorString(err));
else printf("Success\n");
}
void exitOnError(const char *name, hipError_t err) {
if (err) {
if (err) printf("%s Error: %s\n", name, hipGetErrorString(err));
exit(-1);
}
}
size_t filetodevice(char *filename, void **devMemPtr)
{
size_t filesize;
int f = getfile(filename, &filesize);
if (f == -1) {
perror(filename);
exit(-1);
}
char *buf = (char *)mmap(NULL, filesize, PROT_READ, MAP_FILE | MAP_SHARED, f, 0);
if (!buf) {
perror("filetodevice mmap failed");
exit(-1);
}
filesize = min((unsigned long long)filesize, (unsigned long long)FILE_MAX);
posix_madvise(buf, filesize, POSIX_MADV_SEQUENTIAL);
printf("filesize = %lu\n", filesize);
//exitOnError("hipHostMalloc", hipHostMalloc(&pinnedBuf, filesize));
//memcpy(pinnedBuf, buf, filesize);
exitOnError("hipMalloc",
hipMalloc(devMemPtr, filesize + HASH_LEN));
exitOnError("hipMemcpy",
hipMemcpy(*devMemPtr, buf, filesize, hipMemcpyHostToDevice));
munmap(buf, filesize);
close(f);
return filesize;
}
void bvDump(char *bloomname, unsigned int *dev_bloom, unsigned int bits) {
printf("bvDump %s\n", bloomname);
unsigned int *blooms = (unsigned int *)malloc(bits/8);
hipMemcpy(blooms, dev_bloom, bits/8, hipMemcpyDeviceToHost);
for (int i = 0; i < bits; i++) {
if (is_bit_set(i, blooms)) {
printf("%d\n", i);
}
}
free(blooms);
}
void printpositions(char *filename,
unsigned int *bv,
unsigned int file_ints)
{
size_t filesize;
int f = getfile(filename, &filesize);
if (f == -1) {
perror(filename);
exit(-1);
}
char *buf = pinnedBuf;
filesize = min((unsigned long long)filesize, (unsigned long long)FILE_MAX);
int prev_end = -1;
for (int i = 0; i < file_ints; i++) {
if (bv[i]) {
for (int j = ffs(bv[i]) - 1; j < 32; j++) {
int offset = i*32 + j;
if (is_bit_set(offset, bv)) {
/* Find end of previous line */
if (offset > prev_end && buf[offset] != '\n') {
char *sol = ((char*)memrchr(buf, '\n', offset));
int start_line;
if (sol) {
start_line = sol - buf;
} else {
start_line = 0;
}
int end_line;
char *eol = (char*)memchr(buf + offset, '\n', filesize - offset + 1);
end_line = eol - buf;
j += end_line - offset;
if (buf[start_line] == '\n') start_line++;
fwrite(buf + start_line, 1, end_line - start_line, stdout);
fputc('\n', stdout);
prev_end = end_line;
}
}
}
}
}
close(f);
}
void printpatterns(char **patterns,
int *lengths,
unsigned int *bv,
unsigned int file_ints,
char* out_filename)
{
FILE *out = fopen(out_filename, "w");
if (!out) {
perror("Error opening patterns output file");
exit(-1);
}
for (int i = 0; i < file_ints; i++) {
if (!bv[i]) {
continue;
}
unsigned qw = bv[i];
int offset = i << 5;
for (unsigned mask = 1; mask != 0; mask = mask << 1, offset++) {
if (qw & mask) {
fwrite(patterns[offset], 1, lengths[offset], out);
fputc('\n', out);
}
}
}
fclose(out);
}
int dimPick(dim3 &dimGrid,
dim3 &dimBlock,
int numthreads,
int blocksize)
{
unsigned int blocks_y = 1;
unsigned int blocks_x = 1;
unsigned int threads_1d = numthreads % blocksize;
if (numthreads > (256 * blocksize)) {
blocks_y = numthreads / (256 * blocksize);
blocks_x = 256;
threads_1d = blocksize;
} else if (numthreads > blocksize) {
blocks_x = numthreads / blocksize;
threads_1d = blocksize;
}
unsigned int threads_used = blocks_y * blocks_x * threads_1d;
numthreads -= threads_used;
//printf("dimPick %d %d %d\n", blocks_y, blocks_x, threads_1d);
dimGrid = dim3(blocks_x, blocks_y);
dimBlock = dim3(threads_1d);
return threads_used;
}
void setup_bloom_search(int grepsize,
unsigned char *dev_greps,
unsigned int *dev_bloom)
{
exitOnError("setup_bloom_search cudaMemSet dev_bloom = 0",
hipMemset(dev_bloom, 0, BLOOMBITS/8));
int numthreads = grepsize / (HASH_LEN + 1);
unsigned int char_offset = 0;
dim3 dimGrid, dimBlock;
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing grepSetup (%d,%d,%d)\n", dimGrid.x, dimGrid.y, dimBlock.x);
hipLaunchKernelGGL(( grepSetup), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_greps, dev_bloom, char_offset);
checkReportCudaStatus("grepSetup kernel");
numthreads -= tu;
char_offset += tu;
}
}
/*
void executeGrep(int filesize,
unsigned char *dev_chars,
unsigned int *dev_bloom,
unsigned int *dev_reverse_bloom,
unsigned int *dev_positions_matched)
{
int numthreads = filesize;
unsigned int char_offset = 0;
dim3 dimGrid, dimBlock;
exitOnError("executeGrep cudaMemSet dev_reverse_bloom = 0",
hipMemset(dev_reverse_bloom, 0, BLOOMBITS/8));
printf("Executing grep on %d\n", filesize);
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing GrepKernel (%d,%d,%d) @ %u\n", dimGrid.x, dimGrid.y, dimBlock.x, char_offset);
GrepKernel<<<dimGrid, dimBlock>>>(dev_chars, dev_bloom, dev_reverse_bloom,
dev_positions_matched, char_offset, filesize);
numthreads -= tu;
char_offset += tu;
}
}
*/
void executeGrepOverlap(char *filename,
unsigned char **devMemPtr,
unsigned int *dev_bloom,
unsigned int *dev_reverse_bloom,
unsigned int *dev_positions_matched)
{
size_t filesize;
int f = getfile(filename, &filesize);
if (f == -1) {
perror(filename);
exit(-1);
}
filesize = min((unsigned long long)filesize, (unsigned long long)FILE_MAX);
exitOnError("hipHostMalloc", hipHostMalloc(&pinnedBuf, filesize));
exitOnError("hipMalloc",
hipMalloc(devMemPtr, filesize + HASH_LEN));
unsigned char *dev_chars = *devMemPtr;
int numthreads;
dim3 dimGrid, dimBlock;
exitOnError("executeGrep cudaMemSet dev_reverse_bloom = 0",
hipMemset(dev_reverse_bloom, 0, BLOOMBITS/8));
hipStream_t streams[NR_STREAMS];
for (int i = 0; i < NR_STREAMS; i++) {
exitOnError("hipStreamCreate",
hipStreamCreate(&streams[i]));
}
int size = filesize / NR_STREAMS;
int fd = open(filename, O_RDONLY);
for (int i = 0; i < NR_STREAMS; i++) {
unsigned offset = i * size;
if (i == NR_STREAMS - 1) {
size = filesize - i * size;
}
numthreads = size;
printf("Executing grep on %d\n", size);
read(fd, pinnedBuf + offset, size);
exitOnError("hipMemcpyAsync",
hipMemcpyAsync(dev_chars + offset, pinnedBuf + offset, size, hipMemcpyHostToDevice, streams[i]));
unsigned int char_offset = 0;
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing GrepKernel (%d,%d,%d) @ %u\n", dimGrid.x, dimGrid.y, dimBlock.x, (offset + char_offset));
hipLaunchKernelGGL(( GrepKernel), dim3(dimGrid), dim3(dimBlock), 0, streams[i], dev_chars, dev_bloom, dev_reverse_bloom,
dev_positions_matched, offset + char_offset, size);
checkReportCudaStatus("GrepKernel");
numthreads -= tu;
char_offset += tu;
}
}
close(f);
close(fd);
hipDeviceSynchronize();
for (int i = 0; i < NR_STREAMS; i++) {
hipStreamDestroy(streams[i]);
}
}
void executePatternFiltering(int grepsize,
unsigned char *dev_greps,
unsigned int *dev_reverse_bloom,
unsigned int *dev_patterns_matched)
{
int numthreads = grepsize / (HASH_LEN + 1);
printf("NUMTHREADS = %d\n", numthreads);
unsigned int line_offset = 0;
dim3 dimGrid, dimBlock;
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing pattern filtering (%d,%d,%d)\n", dimGrid.x, dimGrid.y, dimBlock.x);
hipLaunchKernelGGL(( filterPatterns), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_greps, dev_reverse_bloom, dev_patterns_matched, line_offset);
checkReportCudaStatus("filterPatterns kernel");
numthreads -= tu;
line_offset += tu;
}
}
int main(int argc, char **argv)
{
if (argc != 5)
{
cerr << "usage: ./greptest truncatedPatternsFile corpus fullPatternsFile outPatternsFile" << endl;
return -1;
}
char *searchfile = argv[1];
char *infile = argv[2];
char *full_patterns_file = argv[3];
char *out_patterns_file = argv[4];
unsigned char *dev_chars;
unsigned char *dev_greps;
/* Bit vectors */
unsigned int *dev_bloom, *dev_reverse_bloom, *dev_positions_matched, *dev_patterns_matched;
timing_stamp("start", false);
size_t grepsize = filetodevice(searchfile, (void **)&dev_greps);
exitOnError("hipMalloc dev_bloom",
hipMalloc((void **)&dev_bloom, BLOOMBITS/8));
exitOnError("hipMalloc dev_reverse_bloom",
hipMalloc((void **)&dev_reverse_bloom, BLOOMBITS/8));
setup_bloom_search(grepsize, dev_greps, dev_bloom);
/* Bind the bloom filter to a texture */
exitOnError("hipBindTexture tex_bloom to dev_bloom",
hipBindTexture(NULL, tex_bloom, dev_bloom, BLOOMBITS/8));
//bvDump(searchfile, dev_bloom, BLOOMBITS);
//index patterns by line
int nr_patterns = grepsize / (HASH_LEN + 1);
char **patterns = new char*[nr_patterns];
int *lengths = new int[nr_patterns];
ifstream in_patterns(full_patterns_file);
printf("nr_patterns = %d\n", nr_patterns);
for (int i = 0; i < nr_patterns; i++) {
char line[1001];
in_patterns.getline(line, 1000);
patterns[i] = new char[strlen(line) + 1];
memcpy(patterns[i], line, strlen(line) + 1);
lengths[i] = strlen(patterns[i]);
}
in_patterns.close();
timing_stamp("setup complete", false);
printf("GPUGrep opening %s\n", infile);
size_t filesize;
int f = getfile(infile, &filesize);
if (f == -1) {
perror(infile);
exit(-1);
}
exitOnError("hipMalloc dev_positions_matched",
hipMalloc((void **)&dev_positions_matched, filesize / 8 + 1));
exitOnError("hipMemset dev_positions_matched = 0",
hipMemset(dev_positions_matched, 0, filesize/8 + 1));
timing_stamp("posmatch init", false);
printf("\nPhase 3: Executing kernel\n");
executeGrepOverlap(infile, &dev_chars, dev_bloom, dev_reverse_bloom, dev_positions_matched);
hipDeviceSynchronize();
timing_stamp("grep done", false);
checkReportCudaStatus("Grep Kernel");
exitOnError("hipMalloc dev_patterns_matched",
hipMalloc((void **)&dev_patterns_matched, nr_patterns / 8));
exitOnError("hipMemset dev_patterns_matched = 0",
hipMemset(dev_patterns_matched, 0, nr_patterns / 8));
executePatternFiltering(grepsize, dev_greps, dev_reverse_bloom, dev_patterns_matched);
hipDeviceSynchronize();
hipFree(dev_greps);
timing_stamp("patterns filtering done", false);
/* Idea:
* Record array of bit positions to check + chars;
* Sort that array.
* Divvy up the array to threads. Compute min, max of the bit vector
* address space accessed by that array, and pull that part of the BV array
* (as much as fits?) into local shared memory. Check in parallel, and
* issue atomic increments to the set bit positions into a global count
* array (presumably somewhat rare???).
* If that takes too long, then output the maps of counts and
* char offsets, sort that, merge, and then do the bit sets. */
/* But : radixSort only gets 20 MElements/sec; very possibly
* slower than what we're already doing. */
#if 1
printf("\nPhase 4: Copying results to host memory.\n");
unsigned int *host_positions_matched = (unsigned int *)malloc(filesize / 8);
unsigned int *host_patterns_matched = (unsigned int *)malloc(grepsize);
exitOnError("hipMemcpy corpus results to host",
hipMemcpy(host_positions_matched, dev_positions_matched,
filesize / 8, hipMemcpyDeviceToHost));
exitOnError("hipMemcpy pattern results to host",
hipMemcpy(host_patterns_matched, dev_patterns_matched,
grepsize / (HASH_LEN + 1) / 8, hipMemcpyDeviceToHost));
timing_stamp("copyout done", false);
#if 1
printpositions(infile, host_positions_matched, filesize / 32);
printpatterns(patterns, lengths, host_patterns_matched, grepsize / (HASH_LEN + 1) / 32, out_patterns_file);
timing_stamp("printout done", false);
#endif
printf("\n");
free(host_positions_matched);
free(host_patterns_matched);
#endif
hipFree(dev_bloom);
hipFree(dev_reverse_bloom);
hipFree(dev_chars);
hipFree(dev_positions_matched);
timing_stamp("cleanup done", true);
timing_report();
struct hipDeviceProp_t cdp;
hipGetDeviceProperties(&cdp, 0);
printf("\ndeviceOverlap = %d\n", cdp.deviceOverlap);
}
| 8c48d427d1f0925e87e38e84196a27d13e4e85c5.cu | /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/**
Copyright 2011 Carnegie Mellon University
Authors: Iulian Moraru and David G. Andersen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This is the implementation of a feed-forward Bloom filter for GPGPU.
*/
#define _DARWIN_FEATURE_64_BIT_INODE 1
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/mman.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "math_functions.h"
#include "sbox.h"
using namespace std;
extern "C" {
int getfile(char *infile, size_t *filesize);
#include "timing.h"
}
//#define BLOOMBITS 1048576 /* 1 millllion bits */
#define BLOOMBITS 0x10000000 /* 32 MB */
#define BLOOMMASK (BLOOMBITS - 1)
#define BLOCK_SIZE 256
#define HASH_LEN 19
#define FILE_MAX 6710886400
#define NR_STREAMS 10
char *pinnedBuf;
struct countmap {
unsigned int hval;
unsigned int charloc;
};
texture<unsigned char, 1, cudaReadModeElementType> tex_bloom;
bool is_bit_set(int i, unsigned int *bv) {
unsigned int word = bv[i >> 5];
unsigned int bitMask = 1 << (i & 31);
return (word & bitMask);
}
__device__ bool texbf_is_bit_set(int i) {
unsigned char word = tex1Dfetch(tex_bloom, i/8);
unsigned int bitMask = 1 << (i % 8);
return (word & bitMask);
}
__device__ bool device_is_bit_set(int i, unsigned int *bv) {
unsigned int word = bv[i >> 5];
unsigned int bitMask = 1 << (i & 31);
return (word & bitMask);
}
__device__ void device_set_bit(int i, unsigned int *bv) {
unsigned int bitMask = 1 << (i & 31);
atomicOr(&bv[i >> 5], bitMask);
}
inline __device__ unsigned int rol32(unsigned int word, int shift)
{
return (word << shift) | (word >> (32 - shift));
}
__global__ void grepSetup(unsigned char *d_a,
unsigned int *d_b,
unsigned int starting_offset)
{
/* SPEED: Copy into local memory coalescing and then do this
* all locally. */
int i = starting_offset + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
int char_offset = i * (HASH_LEN + 1); /* Skip \n */
unsigned int hval = 0, hval2 = 0;
for (int j = 0; j < HASH_LEN; j++) {
hval = rol32(hval, 1);
hval2 = rol32(hval2, 3);
unsigned int sbv = sbox[d_a[char_offset + j]];
hval ^= sbv;
hval2 ^= sbv;
}
device_set_bit(hval & BLOOMMASK, d_b);
device_set_bit(hval2 & BLOOMMASK, d_b);
unsigned int hval3 = hval + hval2;
device_set_bit(hval3 & BLOOMMASK, d_b);
unsigned int hval4 = hval + 5 * hval2;
device_set_bit(hval4 & BLOOMMASK, d_b);
// unsigned int hval5 = (hval << 16) | (hval2 >> 16);
// device_set_bit(hval5 & BLOOMMASK, d_b);
}
__global__ void GrepKernel(unsigned char *d_a,
unsigned int *blooms,
unsigned int *dev_reverse_bloom,
unsigned int *dev_positions_matched,
unsigned int char_offset,
unsigned int n_chars)
{
__shared__ unsigned boxed[BLOCK_SIZE + HASH_LEN];
int i = char_offset + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
/* SPeed: This part takes .06 seconds. Without boxing and
* without cleanup, it takes .04. Without cleanup, .05. */
/* Step 1: Bring the base chars in to local memory,
* sboxing them on the way in. SPEED: This is faster or equiv to
* doing 32 bit reads into a register and then shifting out the
* chars. */
/* TIME: 0.03 seconds */
boxed[threadIdx.x] = sbox[d_a[i]];
/* Ugly, but let some threads pull in the remainder */
/* TIME: 0.01 seconds */
int otid = threadIdx.x;
if (otid < HASH_LEN) {
int new_i = blockDim.x + i;
int new_b = blockDim.x + otid;
boxed[new_b] = sbox[d_a[new_i]];
}
/* TIME: Almost none. */
__syncthreads();
unsigned int hval = 0, hval2 = 0;
/* Step 2: Compute the hash of the next HASH_LEN characters */
for (int j = 0; j < HASH_LEN; j++) {
hval = rol32(hval, 1);
hval2 = rol32(hval2, 3);
unsigned int sbv = boxed[threadIdx.x+j];
hval ^= sbv;
hval2 ^= sbv;
}
/* Other idea: Steal from the blocked bloom filter idea to do two
* bit lookups in a single bus transaction. */
/* Attempt X: Loop over the bit vector, load into local memory,
* do a subset of the tests. */
/* Idea: Have 4 threads process each character position.
* And have them only do the bit lookup if hash >> [all but 2 bits]== <index>
* in some way -- thus forcing locality. Trading bandwidth, but hey,
* we've got bandwidth.
* To really do this right, we might want to optimize the
* hash computation further so that we don't use too much
* global bandwidth copying the post-hash results out.
* XXX - probably not too helpful; tried using lowest of 4
* hash functions to improve texture locality, little benefit. Maybe
* could combine. */
/* SPEED: This step takes 0.22 of 0.27 seconds */
/* searchbig: 0.31 out of 0.37 */
/* 3 version 1: Do them into global memory and let threads diverge... */
/* Unrolling and doing a no-branch, dual fetch is slower. */
/* Hm. With more hash functions, might be able to use the sorted
* hash trick to improve locality at the cost of a bit more
* computation. Can we bubble sort 5 hash functions rapidly? Does
* that give us a cache advantage with texture memory? */
unsigned int h1 = hval & BLOOMMASK;
unsigned int h2 = hval2 & BLOOMMASK;
unsigned int h3 = (hval + hval2) & BLOOMMASK;
unsigned int h4 = (hval + 5 * hval2) & BLOOMMASK;
// unsigned int h5 = ((hval << 16) | (hval2 >> 16)) & BLOOMMASK;
/* This doesn't help with two hash functions */
/* Kernel time: 0.38 with, 0.37 without */
unsigned int w1 = h1 >> 3;
unsigned char bit1 = 1 << (h1 & 7);
unsigned int w2 = h2 >> 3;
unsigned char bit2 = 1 << (h2 & 7);
unsigned int w3 = h3 >> 3;
unsigned char bit3 = 1 << (h3 & 7);
unsigned int w4 = h4 >> 3;
unsigned char bit4 = 1 << (h4 & 7);
// unsigned int w5 = h5 >> 3;
// unsigned char bit5 = 1 << (h5 & 7);
unsigned char t1 = tex1Dfetch(tex_bloom, w1); /* SPEED: Slowest part */
if (t1 & bit1) {
unsigned char t2 = tex1Dfetch(tex_bloom, w2);
if (t2 & bit2) {
unsigned char t3 = tex1Dfetch(tex_bloom, w3);
if (t3 & bit3) {
unsigned char t4 = tex1Dfetch(tex_bloom, w4);
if (t4 & bit4) {
// unsigned char t5 = tex1Dfetch(tex_bloom, w5);
// if (t5 & bit5) {
unsigned int hh5 = (hval + 7 * hval2) & BLOOMMASK;
unsigned int h6 = (hval + 3 * hval2) & BLOOMMASK;
unsigned int h7 = ((hval << 1) + hval2) & BLOOMMASK;
unsigned int h8 = ((hval << 2) + hval2) & BLOOMMASK;
// unsigned int h10 = (hval * 11 + hval2) & BLOOMMASK;
device_set_bit(hh5, dev_reverse_bloom);
device_set_bit(h6, dev_reverse_bloom);
device_set_bit(h7, dev_reverse_bloom);
device_set_bit(h8, dev_reverse_bloom);
// device_set_bit(h10, dev_reverse_bloom);
/* If we hit, annotate in a bit vector */
/* SPEED: If we start doing a lot of matches, do this in local
* memory and flush all 64 bytes out to main memory.
* Not needed yet. */
device_set_bit(i, dev_positions_matched);
}}
// }
}
}
}
__global__ void filterPatterns(unsigned char *d_a,
unsigned int *d_b,
unsigned int *dev_patterns_matched,
unsigned int starting_offset)
{
/* SPEED: Copy into local memory coalescing and then do this
* all locally. */
int i = starting_offset + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
int char_offset = i * (HASH_LEN + 1); /* Skip \n */
unsigned int hval = 0, hval2 = 0;
for (int j = 0; j < HASH_LEN; j++) {
hval = rol32(hval, 1);
hval2 = rol32(hval2, 3);
unsigned int sbv = sbox[d_a[char_offset + j]];
hval ^= sbv;
hval2 ^= sbv;
}
unsigned int h5 = (hval + 7 * hval2) & BLOOMMASK;
unsigned int h6 = (hval + 3 * hval2) & BLOOMMASK;
unsigned int h7 = ((hval << 1) + hval2) & BLOOMMASK;
unsigned int h8 = ((hval << 2) + hval2) & BLOOMMASK;
// unsigned int h10 = (hval * 11 + hval2) & BLOOMMASK;
if (device_is_bit_set(h5, d_b)) {
if (device_is_bit_set(h6, d_b)) {
if (device_is_bit_set(h7, d_b)) {
if (device_is_bit_set(h8, d_b)) {
// if (device_is_bit_set(h10, d_b)) {
device_set_bit(i, dev_patterns_matched);
// }
}
}
}
}
}
void checkReportCudaStatus(const char *name) {
cudaError_t err = cudaGetLastError();
printf("CudaStatus %s: ", name);
if (err) printf("Error: %s\n", cudaGetErrorString(err));
else printf("Success\n");
}
void exitOnError(const char *name, cudaError_t err) {
if (err) {
if (err) printf("%s Error: %s\n", name, cudaGetErrorString(err));
exit(-1);
}
}
size_t filetodevice(char *filename, void **devMemPtr)
{
size_t filesize;
int f = getfile(filename, &filesize);
if (f == -1) {
perror(filename);
exit(-1);
}
char *buf = (char *)mmap(NULL, filesize, PROT_READ, MAP_FILE | MAP_SHARED, f, 0);
if (!buf) {
perror("filetodevice mmap failed");
exit(-1);
}
filesize = min((unsigned long long)filesize, (unsigned long long)FILE_MAX);
posix_madvise(buf, filesize, POSIX_MADV_SEQUENTIAL);
printf("filesize = %lu\n", filesize);
//exitOnError("cudaMallocHost", cudaMallocHost(&pinnedBuf, filesize));
//memcpy(pinnedBuf, buf, filesize);
exitOnError("cudaMalloc",
cudaMalloc(devMemPtr, filesize + HASH_LEN));
exitOnError("cudaMemcpy",
cudaMemcpy(*devMemPtr, buf, filesize, cudaMemcpyHostToDevice));
munmap(buf, filesize);
close(f);
return filesize;
}
void bvDump(char *bloomname, unsigned int *dev_bloom, unsigned int bits) {
printf("bvDump %s\n", bloomname);
unsigned int *blooms = (unsigned int *)malloc(bits/8);
cudaMemcpy(blooms, dev_bloom, bits/8, cudaMemcpyDeviceToHost);
for (int i = 0; i < bits; i++) {
if (is_bit_set(i, blooms)) {
printf("%d\n", i);
}
}
free(blooms);
}
void printpositions(char *filename,
unsigned int *bv,
unsigned int file_ints)
{
size_t filesize;
int f = getfile(filename, &filesize);
if (f == -1) {
perror(filename);
exit(-1);
}
char *buf = pinnedBuf;
filesize = min((unsigned long long)filesize, (unsigned long long)FILE_MAX);
int prev_end = -1;
for (int i = 0; i < file_ints; i++) {
if (bv[i]) {
for (int j = ffs(bv[i]) - 1; j < 32; j++) {
int offset = i*32 + j;
if (is_bit_set(offset, bv)) {
/* Find end of previous line */
if (offset > prev_end && buf[offset] != '\n') {
char *sol = ((char*)memrchr(buf, '\n', offset));
int start_line;
if (sol) {
start_line = sol - buf;
} else {
start_line = 0;
}
int end_line;
char *eol = (char*)memchr(buf + offset, '\n', filesize - offset + 1);
end_line = eol - buf;
j += end_line - offset;
if (buf[start_line] == '\n') start_line++;
fwrite(buf + start_line, 1, end_line - start_line, stdout);
fputc('\n', stdout);
prev_end = end_line;
}
}
}
}
}
close(f);
}
void printpatterns(char **patterns,
int *lengths,
unsigned int *bv,
unsigned int file_ints,
char* out_filename)
{
FILE *out = fopen(out_filename, "w");
if (!out) {
perror("Error opening patterns output file");
exit(-1);
}
for (int i = 0; i < file_ints; i++) {
if (!bv[i]) {
continue;
}
unsigned qw = bv[i];
int offset = i << 5;
for (unsigned mask = 1; mask != 0; mask = mask << 1, offset++) {
if (qw & mask) {
fwrite(patterns[offset], 1, lengths[offset], out);
fputc('\n', out);
}
}
}
fclose(out);
}
int dimPick(dim3 &dimGrid,
dim3 &dimBlock,
int numthreads,
int blocksize)
{
unsigned int blocks_y = 1;
unsigned int blocks_x = 1;
unsigned int threads_1d = numthreads % blocksize;
if (numthreads > (256 * blocksize)) {
blocks_y = numthreads / (256 * blocksize);
blocks_x = 256;
threads_1d = blocksize;
} else if (numthreads > blocksize) {
blocks_x = numthreads / blocksize;
threads_1d = blocksize;
}
unsigned int threads_used = blocks_y * blocks_x * threads_1d;
numthreads -= threads_used;
//printf("dimPick %d %d %d\n", blocks_y, blocks_x, threads_1d);
dimGrid = dim3(blocks_x, blocks_y);
dimBlock = dim3(threads_1d);
return threads_used;
}
void setup_bloom_search(int grepsize,
unsigned char *dev_greps,
unsigned int *dev_bloom)
{
exitOnError("setup_bloom_search cudaMemSet dev_bloom = 0",
cudaMemset(dev_bloom, 0, BLOOMBITS/8));
int numthreads = grepsize / (HASH_LEN + 1);
unsigned int char_offset = 0;
dim3 dimGrid, dimBlock;
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing grepSetup (%d,%d,%d)\n", dimGrid.x, dimGrid.y, dimBlock.x);
grepSetup<<<dimGrid, dimBlock>>>(dev_greps, dev_bloom, char_offset);
checkReportCudaStatus("grepSetup kernel");
numthreads -= tu;
char_offset += tu;
}
}
/*
void executeGrep(int filesize,
unsigned char *dev_chars,
unsigned int *dev_bloom,
unsigned int *dev_reverse_bloom,
unsigned int *dev_positions_matched)
{
int numthreads = filesize;
unsigned int char_offset = 0;
dim3 dimGrid, dimBlock;
exitOnError("executeGrep cudaMemSet dev_reverse_bloom = 0",
cudaMemset(dev_reverse_bloom, 0, BLOOMBITS/8));
printf("Executing grep on %d\n", filesize);
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing GrepKernel (%d,%d,%d) @ %u\n", dimGrid.x, dimGrid.y, dimBlock.x, char_offset);
GrepKernel<<<dimGrid, dimBlock>>>(dev_chars, dev_bloom, dev_reverse_bloom,
dev_positions_matched, char_offset, filesize);
numthreads -= tu;
char_offset += tu;
}
}
*/
void executeGrepOverlap(char *filename,
unsigned char **devMemPtr,
unsigned int *dev_bloom,
unsigned int *dev_reverse_bloom,
unsigned int *dev_positions_matched)
{
size_t filesize;
int f = getfile(filename, &filesize);
if (f == -1) {
perror(filename);
exit(-1);
}
filesize = min((unsigned long long)filesize, (unsigned long long)FILE_MAX);
exitOnError("cudaMallocHost", cudaMallocHost(&pinnedBuf, filesize));
exitOnError("cudaMalloc",
cudaMalloc(devMemPtr, filesize + HASH_LEN));
unsigned char *dev_chars = *devMemPtr;
int numthreads;
dim3 dimGrid, dimBlock;
exitOnError("executeGrep cudaMemSet dev_reverse_bloom = 0",
cudaMemset(dev_reverse_bloom, 0, BLOOMBITS/8));
cudaStream_t streams[NR_STREAMS];
for (int i = 0; i < NR_STREAMS; i++) {
exitOnError("cudaStreamCreate",
cudaStreamCreate(&streams[i]));
}
int size = filesize / NR_STREAMS;
int fd = open(filename, O_RDONLY);
for (int i = 0; i < NR_STREAMS; i++) {
unsigned offset = i * size;
if (i == NR_STREAMS - 1) {
size = filesize - i * size;
}
numthreads = size;
printf("Executing grep on %d\n", size);
read(fd, pinnedBuf + offset, size);
exitOnError("cudaMemcpyAsync",
cudaMemcpyAsync(dev_chars + offset, pinnedBuf + offset, size, cudaMemcpyHostToDevice, streams[i]));
unsigned int char_offset = 0;
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing GrepKernel (%d,%d,%d) @ %u\n", dimGrid.x, dimGrid.y, dimBlock.x, (offset + char_offset));
GrepKernel<<<dimGrid, dimBlock, 0, streams[i]>>>(dev_chars, dev_bloom, dev_reverse_bloom,
dev_positions_matched, offset + char_offset, size);
checkReportCudaStatus("GrepKernel");
numthreads -= tu;
char_offset += tu;
}
}
close(f);
close(fd);
cudaThreadSynchronize();
for (int i = 0; i < NR_STREAMS; i++) {
cudaStreamDestroy(streams[i]);
}
}
void executePatternFiltering(int grepsize,
unsigned char *dev_greps,
unsigned int *dev_reverse_bloom,
unsigned int *dev_patterns_matched)
{
int numthreads = grepsize / (HASH_LEN + 1);
printf("NUMTHREADS = %d\n", numthreads);
unsigned int line_offset = 0;
dim3 dimGrid, dimBlock;
while (numthreads > 0) {
unsigned int tu = dimPick(dimGrid, dimBlock, numthreads, BLOCK_SIZE);
printf("Executing pattern filtering (%d,%d,%d)\n", dimGrid.x, dimGrid.y, dimBlock.x);
filterPatterns<<<dimGrid, dimBlock>>>(dev_greps, dev_reverse_bloom, dev_patterns_matched, line_offset);
checkReportCudaStatus("filterPatterns kernel");
numthreads -= tu;
line_offset += tu;
}
}
int main(int argc, char **argv)
{
if (argc != 5)
{
cerr << "usage: ./greptest truncatedPatternsFile corpus fullPatternsFile outPatternsFile" << endl;
return -1;
}
char *searchfile = argv[1];
char *infile = argv[2];
char *full_patterns_file = argv[3];
char *out_patterns_file = argv[4];
unsigned char *dev_chars;
unsigned char *dev_greps;
/* Bit vectors */
unsigned int *dev_bloom, *dev_reverse_bloom, *dev_positions_matched, *dev_patterns_matched;
timing_stamp("start", false);
size_t grepsize = filetodevice(searchfile, (void **)&dev_greps);
exitOnError("cudaMalloc dev_bloom",
cudaMalloc((void **)&dev_bloom, BLOOMBITS/8));
exitOnError("cudaMalloc dev_reverse_bloom",
cudaMalloc((void **)&dev_reverse_bloom, BLOOMBITS/8));
setup_bloom_search(grepsize, dev_greps, dev_bloom);
/* Bind the bloom filter to a texture */
exitOnError("cudaBindTexture tex_bloom to dev_bloom",
cudaBindTexture(NULL, tex_bloom, dev_bloom, BLOOMBITS/8));
//bvDump(searchfile, dev_bloom, BLOOMBITS);
//index patterns by line
int nr_patterns = grepsize / (HASH_LEN + 1);
char **patterns = new char*[nr_patterns];
int *lengths = new int[nr_patterns];
ifstream in_patterns(full_patterns_file);
printf("nr_patterns = %d\n", nr_patterns);
for (int i = 0; i < nr_patterns; i++) {
char line[1001];
in_patterns.getline(line, 1000);
patterns[i] = new char[strlen(line) + 1];
memcpy(patterns[i], line, strlen(line) + 1);
lengths[i] = strlen(patterns[i]);
}
in_patterns.close();
timing_stamp("setup complete", false);
printf("GPUGrep opening %s\n", infile);
size_t filesize;
int f = getfile(infile, &filesize);
if (f == -1) {
perror(infile);
exit(-1);
}
exitOnError("cudaMalloc dev_positions_matched",
cudaMalloc((void **)&dev_positions_matched, filesize / 8 + 1));
exitOnError("cudaMemset dev_positions_matched = 0",
cudaMemset(dev_positions_matched, 0, filesize/8 + 1));
timing_stamp("posmatch init", false);
printf("\nPhase 3: Executing kernel\n");
executeGrepOverlap(infile, &dev_chars, dev_bloom, dev_reverse_bloom, dev_positions_matched);
cudaThreadSynchronize();
timing_stamp("grep done", false);
checkReportCudaStatus("Grep Kernel");
exitOnError("cudaMalloc dev_patterns_matched",
cudaMalloc((void **)&dev_patterns_matched, nr_patterns / 8));
exitOnError("cudaMemset dev_patterns_matched = 0",
cudaMemset(dev_patterns_matched, 0, nr_patterns / 8));
executePatternFiltering(grepsize, dev_greps, dev_reverse_bloom, dev_patterns_matched);
cudaThreadSynchronize();
cudaFree(dev_greps);
timing_stamp("patterns filtering done", false);
/* Idea:
* Record array of bit positions to check + chars;
* Sort that array.
* Divvy up the array to threads. Compute min, max of the bit vector
* address space accessed by that array, and pull that part of the BV array
* (as much as fits?) into local shared memory. Check in parallel, and
* issue atomic increments to the set bit positions into a global count
* array (presumably somewhat rare???).
* If that takes too long, then output the maps of counts and
* char offsets, sort that, merge, and then do the bit sets. */
/* But : radixSort only gets 20 MElements/sec; very possibly
* slower than what we're already doing. */
#if 1
printf("\nPhase 4: Copying results to host memory.\n");
unsigned int *host_positions_matched = (unsigned int *)malloc(filesize / 8);
unsigned int *host_patterns_matched = (unsigned int *)malloc(grepsize);
exitOnError("cudaMemcpy corpus results to host",
cudaMemcpy(host_positions_matched, dev_positions_matched,
filesize / 8, cudaMemcpyDeviceToHost));
exitOnError("cudaMemcpy pattern results to host",
cudaMemcpy(host_patterns_matched, dev_patterns_matched,
grepsize / (HASH_LEN + 1) / 8, cudaMemcpyDeviceToHost));
timing_stamp("copyout done", false);
#if 1
printpositions(infile, host_positions_matched, filesize / 32);
printpatterns(patterns, lengths, host_patterns_matched, grepsize / (HASH_LEN + 1) / 32, out_patterns_file);
timing_stamp("printout done", false);
#endif
printf("\n");
free(host_positions_matched);
free(host_patterns_matched);
#endif
cudaFree(dev_bloom);
cudaFree(dev_reverse_bloom);
cudaFree(dev_chars);
cudaFree(dev_positions_matched);
timing_stamp("cleanup done", true);
timing_report();
struct cudaDeviceProp cdp;
cudaGetDeviceProperties(&cdp, 0);
printf("\ndeviceOverlap = %d\n", cdp.deviceOverlap);
}
|
8ba2647cb9fd4222963cf1244981629cafcb53bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
__global__
void add(int n, float *x, float *y, float *z){
int index = threadIdx.x + blockIdx.x * blockDim.x;
z[index] = x[index] + y[index];
}
int main(void){
int N = 1<<20;
float *x, *y, *z;
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
hipMallocManaged(&z, N * sizeof(float));
for(int i = 0; i < N; i++){
x[i] = 1;
y[i] = 2;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y, z);
hipDeviceSynchronize();
float maxError = 0;
for(int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(z[i] - 3));
}
std::cout << "Error: " << maxError << std::endl;
hipFree(x);
hipFree(y);
hipFree(z);
return 0;
} | 8ba2647cb9fd4222963cf1244981629cafcb53bd.cu | #include <iostream>
#include <math.h>
__global__
void add(int n, float *x, float *y, float *z){
int index = threadIdx.x + blockIdx.x * blockDim.x;
z[index] = x[index] + y[index];
}
int main(void){
int N = 1<<20;
float *x, *y, *z;
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
cudaMallocManaged(&z, N * sizeof(float));
for(int i = 0; i < N; i++){
x[i] = 1;
y[i] = 2;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y, z);
cudaDeviceSynchronize();
float maxError = 0;
for(int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(z[i] - 3));
}
std::cout << "Error: " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
} |
a3079929bc19012a97d3292356e91ed2f37817a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sort.h"
#include "../Helper_Code/timer.h"
#include <algorithm>
#define BLOCK_DIM 512
#define ELEMENTS_PER_MERGE_THREAD 8
#define THREADS_PER_MERGE_BLOCK 64
#define ELEMENTS_PER_MERGE_BLOCK (ELEMENTS_PER_MERGE_THREAD * THREADS_PER_MERGE_BLOCK)
template <typename T>
__device__ void mergeSequential(const T *A, const T *B, T *C, unsigned int n, unsigned int m){
unsigned int i = 0, j = 0, k = 0;
while(i < n && j < m){
if (A[i] < B[j]){ C[k++] = A[i++]; }
else { C[k++] = B[j++]; }
}
while(i < n){ C[k++] = A[i++]; }
while(j < m){ C[k++] = B[j++]; }
}
template <typename T>
__device__ unsigned int getCoRank(const T *A, const T *B, unsigned int n, unsigned int m, unsigned int k){
unsigned int l = (k > m) ? (k - m) : 0;
unsigned int r = (k < n) ? k : n;
while(true){
unsigned int i = (l + r) / 2;
unsigned int j = k - i;
if (i > 0 && j < m && A[i-1] > B[j]) { r = i - 1; }
else if (j > 0 && i < n && B[j-1] > A[i]){ l = i + 1; }
else { return i; }
}
}
template <typename T>
__global__ void mergeKernel(const T *A, const T *B, T *C, unsigned int n, unsigned int m){
unsigned int k = (blockIdx.x * blockDim.x + threadIdx.x) * ELEMENTS_PER_MERGE_THREAD;
if (k < m + n){
unsigned int i = getCoRank<T>(A, B, n, m, k);
unsigned int j = k-i;
unsigned int kNext = (k + ELEMENTS_PER_MERGE_THREAD < n + m) ? (k + ELEMENTS_PER_MERGE_THREAD) : (n + m);
unsigned int iNext = getCoRank<T>(A, B, n, m, kNext);
unsigned int jNext = kNext - iNext;
mergeSequential<T>(&A[i], &B[j], &C[k], iNext - i, jNext - j);
}
}
template <typename T>
__global__ void setOutputKernel(const T* input_d, T* output_d, unsigned int N){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) { output_d[i] = input_d[i]; }
}
template <typename T>
__global__ void mergeSortKernel(T* output_d, T* tempOutput_d, unsigned int stride, unsigned int N, unsigned int numThreadsNeeded){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numThreadsNeeded){ return; }
unsigned int startIdx = i * 2 * stride;
unsigned int n = stride, m = stride;
if (startIdx + stride >= N){ n = N - startIdx; m = 0; }
else if (startIdx + 2 * stride - 1 >= N) { m = N - (startIdx + stride); }
mergeSequential<T>(&output_d[startIdx], &output_d[startIdx + n], &tempOutput_d[startIdx], n, m);
}
template <typename T>
void mergeSortGPUHelper(const T* input_d, T* output_d, unsigned int N){
unsigned int numBlocks = (N + BLOCK_DIM - 1) / BLOCK_DIM;
T *tempOutput_d;
hipMalloc((void**) &tempOutput_d, N*sizeof(T));
hipLaunchKernelGGL(( setOutputKernel<T>) , dim3(numBlocks), dim3(BLOCK_DIM) , 0, 0, input_d, output_d, N);
bool outputIsCorrect = true;
for (unsigned int stride = 1; stride < N; stride *= 2) {
if (stride >= 10000){
unsigned int numBlocks = (2*stride + ELEMENTS_PER_MERGE_BLOCK - 1) / ELEMENTS_PER_MERGE_BLOCK;
for(unsigned int i = 0; i < N; i += 2 * stride){
unsigned int n = stride, m = stride;
if (i + stride >= N){ n = N - i; m = 0;}
else if (i + 2*stride >= N) { m = N - (i+stride); }
hipLaunchKernelGGL(( mergeKernel<T>) , dim3(numBlocks), dim3(THREADS_PER_MERGE_BLOCK) , 0, 0, &output_d[i], &output_d[i+n], &tempOutput_d[i], n, m);
}
}
else{
unsigned int numThreadsNeeded = (N + 2 * stride - 1) / (2 * stride);
numBlocks = (numThreadsNeeded + BLOCK_DIM - 1) / BLOCK_DIM;
hipLaunchKernelGGL(( mergeSortKernel<T>) , dim3(numBlocks), dim3(BLOCK_DIM) , 0, 0, output_d, tempOutput_d, stride, N, numThreadsNeeded);
}
std::swap(tempOutput_d, output_d);
outputIsCorrect = !outputIsCorrect;
}
if (!outputIsCorrect){
std::swap(tempOutput_d, output_d);
numBlocks = (N + BLOCK_DIM - 1) / BLOCK_DIM;
hipLaunchKernelGGL(( setOutputKernel<T>) , dim3(numBlocks), dim3(BLOCK_DIM) , 0, 0, tempOutput_d, output_d, N);
}
hipFree(tempOutput_d);
}
template <typename T>
void mergeSortGPU(const T* input, T* output, unsigned int N){
Timer timer;
// Allocating GPU memory
startTime(&timer);
T *input_d, *output_d;
hipMalloc((void**) &input_d, N*sizeof(T));
hipMalloc((void**) &output_d, N*sizeof(T));
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "GPU Allocation time");
//Copying data to GPU from Host
startTime(&timer);
hipMemcpy(input_d, input, N*sizeof(T), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Copying to GPU time");
// Calling kernel
startTime(&timer);
mergeSortGPUHelper<T>(input_d, output_d, N);
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "GPU kernel time", GREEN);
//Copying data from GPU to Host
startTime(&timer);
hipMemcpy(output, output_d, N*sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Copying from GPU time");
// Freeing memory
startTime(&timer);
hipFree(input_d); hipFree(output_d);
hipDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Deallocation time");
}
//Explicit instantiation to use different types
template void mergeSortGPU(const unsigned int* input, unsigned int* output, unsigned int N); | a3079929bc19012a97d3292356e91ed2f37817a4.cu | #include "sort.h"
#include "../Helper_Code/timer.h"
#include <algorithm>
#define BLOCK_DIM 512
#define ELEMENTS_PER_MERGE_THREAD 8
#define THREADS_PER_MERGE_BLOCK 64
#define ELEMENTS_PER_MERGE_BLOCK (ELEMENTS_PER_MERGE_THREAD * THREADS_PER_MERGE_BLOCK)
template <typename T>
__device__ void mergeSequential(const T *A, const T *B, T *C, unsigned int n, unsigned int m){
unsigned int i = 0, j = 0, k = 0;
while(i < n && j < m){
if (A[i] < B[j]){ C[k++] = A[i++]; }
else { C[k++] = B[j++]; }
}
while(i < n){ C[k++] = A[i++]; }
while(j < m){ C[k++] = B[j++]; }
}
template <typename T>
__device__ unsigned int getCoRank(const T *A, const T *B, unsigned int n, unsigned int m, unsigned int k){
unsigned int l = (k > m) ? (k - m) : 0;
unsigned int r = (k < n) ? k : n;
while(true){
unsigned int i = (l + r) / 2;
unsigned int j = k - i;
if (i > 0 && j < m && A[i-1] > B[j]) { r = i - 1; }
else if (j > 0 && i < n && B[j-1] > A[i]){ l = i + 1; }
else { return i; }
}
}
template <typename T>
__global__ void mergeKernel(const T *A, const T *B, T *C, unsigned int n, unsigned int m){
unsigned int k = (blockIdx.x * blockDim.x + threadIdx.x) * ELEMENTS_PER_MERGE_THREAD;
if (k < m + n){
unsigned int i = getCoRank<T>(A, B, n, m, k);
unsigned int j = k-i;
unsigned int kNext = (k + ELEMENTS_PER_MERGE_THREAD < n + m) ? (k + ELEMENTS_PER_MERGE_THREAD) : (n + m);
unsigned int iNext = getCoRank<T>(A, B, n, m, kNext);
unsigned int jNext = kNext - iNext;
mergeSequential<T>(&A[i], &B[j], &C[k], iNext - i, jNext - j);
}
}
template <typename T>
__global__ void setOutputKernel(const T* input_d, T* output_d, unsigned int N){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) { output_d[i] = input_d[i]; }
}
template <typename T>
__global__ void mergeSortKernel(T* output_d, T* tempOutput_d, unsigned int stride, unsigned int N, unsigned int numThreadsNeeded){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numThreadsNeeded){ return; }
unsigned int startIdx = i * 2 * stride;
unsigned int n = stride, m = stride;
if (startIdx + stride >= N){ n = N - startIdx; m = 0; }
else if (startIdx + 2 * stride - 1 >= N) { m = N - (startIdx + stride); }
mergeSequential<T>(&output_d[startIdx], &output_d[startIdx + n], &tempOutput_d[startIdx], n, m);
}
template <typename T>
void mergeSortGPUHelper(const T* input_d, T* output_d, unsigned int N){
unsigned int numBlocks = (N + BLOCK_DIM - 1) / BLOCK_DIM;
T *tempOutput_d;
cudaMalloc((void**) &tempOutput_d, N*sizeof(T));
setOutputKernel<T> <<< numBlocks, BLOCK_DIM >>> (input_d, output_d, N);
bool outputIsCorrect = true;
for (unsigned int stride = 1; stride < N; stride *= 2) {
if (stride >= 10000){
unsigned int numBlocks = (2*stride + ELEMENTS_PER_MERGE_BLOCK - 1) / ELEMENTS_PER_MERGE_BLOCK;
for(unsigned int i = 0; i < N; i += 2 * stride){
unsigned int n = stride, m = stride;
if (i + stride >= N){ n = N - i; m = 0;}
else if (i + 2*stride >= N) { m = N - (i+stride); }
mergeKernel<T> <<< numBlocks, THREADS_PER_MERGE_BLOCK >>> (&output_d[i], &output_d[i+n], &tempOutput_d[i], n, m);
}
}
else{
unsigned int numThreadsNeeded = (N + 2 * stride - 1) / (2 * stride);
numBlocks = (numThreadsNeeded + BLOCK_DIM - 1) / BLOCK_DIM;
mergeSortKernel<T> <<< numBlocks, BLOCK_DIM >>> (output_d, tempOutput_d, stride, N, numThreadsNeeded);
}
std::swap(tempOutput_d, output_d);
outputIsCorrect = !outputIsCorrect;
}
if (!outputIsCorrect){
std::swap(tempOutput_d, output_d);
numBlocks = (N + BLOCK_DIM - 1) / BLOCK_DIM;
setOutputKernel<T> <<< numBlocks, BLOCK_DIM >>> (tempOutput_d, output_d, N);
}
cudaFree(tempOutput_d);
}
template <typename T>
void mergeSortGPU(const T* input, T* output, unsigned int N){
Timer timer;
// Allocating GPU memory
startTime(&timer);
T *input_d, *output_d;
cudaMalloc((void**) &input_d, N*sizeof(T));
cudaMalloc((void**) &output_d, N*sizeof(T));
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "GPU Allocation time");
//Copying data to GPU from Host
startTime(&timer);
cudaMemcpy(input_d, input, N*sizeof(T), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Copying to GPU time");
// Calling kernel
startTime(&timer);
mergeSortGPUHelper<T>(input_d, output_d, N);
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "GPU kernel time", GREEN);
//Copying data from GPU to Host
startTime(&timer);
cudaMemcpy(output, output_d, N*sizeof(T), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Copying from GPU time");
// Freeing memory
startTime(&timer);
cudaFree(input_d); cudaFree(output_d);
cudaDeviceSynchronize();
stopTime(&timer);
printElapsedTime(timer, "Deallocation time");
}
//Explicit instantiation to use different types
template void mergeSortGPU(const unsigned int* input, unsigned int* output, unsigned int N); |
bd0f734ff5298b9bb25491ecce54cbb1afcf5bde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(struct float4 * __restrict__ input, float dx, float dy, float dz, int L, int M, int N, struct float4 * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 0;
if(__iter_0__ <= (N-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 0;
if(__iter_1__ <= (M-2)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 0;
if(__iter_2__ <= (L-2)){
float __temp_0__;
__temp_0__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))].y - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y);
float __temp_1__;
__temp_1__ = (dz * __temp_0__);
float __temp_2__;
__temp_2__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))].z - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z);
float __temp_3__;
__temp_3__ = (dy * __temp_2__);
float __temp_4__;
__temp_4__ = (__temp_1__ - __temp_3__);
float __temp_5__;
__temp_5__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))].x - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x);
float __temp_6__;
__temp_6__ = (dz * __temp_5__);
float __temp_7__;
__temp_7__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z);
float __temp_8__;
__temp_8__ = (dx * __temp_7__);
float __temp_9__;
__temp_9__ = (__temp_6__ - __temp_8__);
float __temp_10__;
__temp_10__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))].x - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x);
float __temp_11__;
__temp_11__ = (dy * __temp_10__);
float __temp_12__;
__temp_12__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y);
float __temp_13__;
__temp_13__ = (dx * __temp_12__);
float __temp_14__;
__temp_14__ = (__temp_11__ - __temp_13__);
float __temp_15__;
__temp_15__ = (__temp_4__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x);
float __temp_16__;
__temp_16__ = (__temp_9__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y);
float __temp_17__;
__temp_17__ = (__temp_14__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z);
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x = __temp_15__;
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y = __temp_16__;
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z = __temp_17__;
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].w = 0.000000f;
}
}
}
}
__global__ void __kernel___forma_kernel__1__(struct float4 * __restrict__ __var_2__, float dx, float dy, float dz, int L, int M, int N, struct float4 * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_3__;
__iter_3__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_3__ <= (N-1)){
int __iter_4__;
__iter_4__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_4__ <= (M-1)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_5__ <= (L-1)){
float __temp_18__;
__temp_18__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y - __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))].y);
float __temp_19__;
__temp_19__ = (dz * __temp_18__);
float __temp_20__;
__temp_20__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z - __var_2__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))].z);
float __temp_21__;
__temp_21__ = (dy * __temp_20__);
float __temp_22__;
__temp_22__ = (__temp_19__ - __temp_21__);
float __temp_23__;
__temp_23__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x - __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))].x);
float __temp_24__;
__temp_24__ = (dz * __temp_23__);
float __temp_25__;
__temp_25__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z - __var_2__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z);
float __temp_26__;
__temp_26__ = (dx * __temp_25__);
float __temp_27__;
__temp_27__ = (__temp_24__ - __temp_26__);
float __temp_28__;
__temp_28__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x - __var_2__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))].x);
float __temp_29__;
__temp_29__ = (dy * __temp_28__);
float __temp_30__;
__temp_30__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y - __var_2__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y);
float __temp_31__;
__temp_31__ = (dx * __temp_30__);
float __temp_32__;
__temp_32__ = (__temp_29__ - __temp_31__);
float __temp_33__;
__temp_33__ = (__temp_22__ + __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x);
float __temp_34__;
__temp_34__ = (__temp_27__ + __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y);
float __temp_35__;
__temp_35__ = (__temp_32__ + __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z);
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x = __temp_33__;
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y = __temp_34__;
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z = __temp_35__;
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].w = 0.000000f;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void curl(struct float4 * h_input, float dx, float dy, float dz, int L, int M, int N, struct float4 * __var_0__){
/* Host allocation Begin */
struct float4 * input;
hipMalloc(&input,sizeof(struct float4)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(struct float4)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
struct float4 * __var_1__;
hipMalloc(&__var_1__,sizeof(struct float4)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
struct float4 * __var_2__;
hipMalloc(&__var_2__,sizeof(struct float4)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-2) - 0 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-2) - 0 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-2) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 16;
int __block_1___kernel___forma_kernel__0__ = 4;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<500; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, dx, dy, dz, L, M, N, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, dx, dy, dz, L, M, N, __var_1__);
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(struct float4)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| bd0f734ff5298b9bb25491ecce54cbb1afcf5bde.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(struct float4 * __restrict__ input, float dx, float dy, float dz, int L, int M, int N, struct float4 * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 0;
if(__iter_0__ <= (N-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 0;
if(__iter_1__ <= (M-2)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 0;
if(__iter_2__ <= (L-2)){
float __temp_0__;
__temp_0__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))].y - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y);
float __temp_1__;
__temp_1__ = (dz * __temp_0__);
float __temp_2__;
__temp_2__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))].z - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z);
float __temp_3__;
__temp_3__ = (dy * __temp_2__);
float __temp_4__;
__temp_4__ = (__temp_1__ - __temp_3__);
float __temp_5__;
__temp_5__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))].x - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x);
float __temp_6__;
__temp_6__ = (dz * __temp_5__);
float __temp_7__;
__temp_7__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z);
float __temp_8__;
__temp_8__ = (dx * __temp_7__);
float __temp_9__;
__temp_9__ = (__temp_6__ - __temp_8__);
float __temp_10__;
__temp_10__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))].x - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x);
float __temp_11__;
__temp_11__ = (dy * __temp_10__);
float __temp_12__;
__temp_12__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y - input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y);
float __temp_13__;
__temp_13__ = (dx * __temp_12__);
float __temp_14__;
__temp_14__ = (__temp_11__ - __temp_13__);
float __temp_15__;
__temp_15__ = (__temp_4__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x);
float __temp_16__;
__temp_16__ = (__temp_9__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y);
float __temp_17__;
__temp_17__ = (__temp_14__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z);
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].x = __temp_15__;
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].y = __temp_16__;
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].z = __temp_17__;
__var_2__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))].w = 0.000000f;
}
}
}
}
__global__ void __kernel___forma_kernel__1__(struct float4 * __restrict__ __var_2__, float dx, float dy, float dz, int L, int M, int N, struct float4 * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_3__;
__iter_3__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_3__ <= (N-1)){
int __iter_4__;
__iter_4__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_4__ <= (M-1)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_5__ <= (L-1)){
float __temp_18__;
__temp_18__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y - __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))].y);
float __temp_19__;
__temp_19__ = (dz * __temp_18__);
float __temp_20__;
__temp_20__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z - __var_2__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))].z);
float __temp_21__;
__temp_21__ = (dy * __temp_20__);
float __temp_22__;
__temp_22__ = (__temp_19__ - __temp_21__);
float __temp_23__;
__temp_23__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x - __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))].x);
float __temp_24__;
__temp_24__ = (dz * __temp_23__);
float __temp_25__;
__temp_25__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z - __var_2__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z);
float __temp_26__;
__temp_26__ = (dx * __temp_25__);
float __temp_27__;
__temp_27__ = (__temp_24__ - __temp_26__);
float __temp_28__;
__temp_28__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x - __var_2__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))].x);
float __temp_29__;
__temp_29__ = (dy * __temp_28__);
float __temp_30__;
__temp_30__ = (__var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y - __var_2__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y);
float __temp_31__;
__temp_31__ = (dx * __temp_30__);
float __temp_32__;
__temp_32__ = (__temp_29__ - __temp_31__);
float __temp_33__;
__temp_33__ = (__temp_22__ + __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x);
float __temp_34__;
__temp_34__ = (__temp_27__ + __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y);
float __temp_35__;
__temp_35__ = (__temp_32__ + __var_2__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z);
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].x = __temp_33__;
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].y = __temp_34__;
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].z = __temp_35__;
__var_1__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))].w = 0.000000f;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void curl(struct float4 * h_input, float dx, float dy, float dz, int L, int M, int N, struct float4 * __var_0__){
/* Host allocation Begin */
struct float4 * input;
cudaMalloc(&input,sizeof(struct float4)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(struct float4)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
struct float4 * __var_1__;
cudaMalloc(&__var_1__,sizeof(struct float4)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
struct float4 * __var_2__;
cudaMalloc(&__var_2__,sizeof(struct float4)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-2) - 0 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-2) - 0 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-2) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 16;
int __block_1___kernel___forma_kernel__0__ = 4;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<500; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, dx, dy, dz, L, M, N, __var_2__);
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, dx, dy, dz, L, M, N, __var_1__);
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(struct float4)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
5d4eb5ecec3fefe821fab3e98469f1101e71d383.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \file reduction_utilities.cpp
* \author Robert 'Bob' Caddy (rvc@pitt.edu)
* \brief Contains the implementation of the GPU resident reduction utilities
*
*/
// STL Includes
#include <float.h>
// External Includes
// Local Includes
#include "../utils/reduction_utilities.h"
#ifdef CUDA
namespace reduction_utilities
{
// =====================================================================
__global__ void kernelReduceMax(Real* in, Real* out, size_t N)
{
// Initialize maxVal to the smallest possible number
Real maxVal = -DBL_MAX;
// Grid stride loop to perform as much of the reduction as possible
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
// A transformation could go here
// Grid stride reduction
maxVal = max(maxVal, in[i]);
}
// Find the maximum val in the grid and write it to `out`. Note that
// there is no execution/memory barrier after this and so the
// reduced scalar is not available for use in this kernel. The grid
// wide barrier can be accomplished by ending this kernel here and
// then launching a new one or by using cooperative groups. If this
// becomes a need it can be added later
gridReduceMax(maxVal, out);
}
// =====================================================================
} // namespace reduction_utilities
#endif // CUDA | 5d4eb5ecec3fefe821fab3e98469f1101e71d383.cu | /*!
* \file reduction_utilities.cpp
* \author Robert 'Bob' Caddy (rvc@pitt.edu)
* \brief Contains the implementation of the GPU resident reduction utilities
*
*/
// STL Includes
#include <float.h>
// External Includes
// Local Includes
#include "../utils/reduction_utilities.h"
#ifdef CUDA
namespace reduction_utilities
{
// =====================================================================
__global__ void kernelReduceMax(Real* in, Real* out, size_t N)
{
// Initialize maxVal to the smallest possible number
Real maxVal = -DBL_MAX;
// Grid stride loop to perform as much of the reduction as possible
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
// A transformation could go here
// Grid stride reduction
maxVal = max(maxVal, in[i]);
}
// Find the maximum val in the grid and write it to `out`. Note that
// there is no execution/memory barrier after this and so the
// reduced scalar is not available for use in this kernel. The grid
// wide barrier can be accomplished by ending this kernel here and
// then launching a new one or by using cooperative groups. If this
// becomes a need it can be added later
gridReduceMax(maxVal, out);
}
// =====================================================================
} // namespace reduction_utilities
#endif // CUDA |
3b9e8149c34e5669f1b084496dc608b917742e54.hip | // !!! This is a file automatically generated by hipify!!!
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision:$
// $Date:$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file hash_table.cu
*
* @brief Hides all of the CUDA calls from the actual CPP file.
*/
#include <cuhash/cuda_util.h>
#include <cuhash/debugging.h>
#include <cuhash/definitions.h>
#include <cuhash/hash_table.cuh>
#include <hip/hip_runtime.h>
namespace cuhash {
namespace CUDAWrapper {
void ClearTable(const unsigned slots_in_table,
const Entry fill_value,
Entry *d_contents) {
hipLaunchKernelGGL(( clear_table), dim3(ComputeGridDim(slots_in_table)), dim3(kBlockSize), 0, 0,
slots_in_table, fill_value, d_contents);
CUDA_CHECK_ERROR("Error occurred during hash table clear.\n");
}
void CallCuckooHash(const unsigned n,
const unsigned num_hash_functions,
const unsigned *d_keys,
const unsigned *d_values,
const unsigned table_size,
const Functions<2> constants_2,
const Functions<3> constants_3,
const Functions<4> constants_4,
const Functions<5> constants_5,
const unsigned max_iterations,
Entry *d_contents,
uint2 stash_constants,
unsigned *d_stash_count,
unsigned *d_failures,
unsigned *d_iterations_taken) {
// Build the table.
hipMemset(d_failures, 0, sizeof(unsigned));
if (num_hash_functions == 2) {
hipLaunchKernelGGL(( CuckooHash), dim3(ComputeGridDim(n)), dim3(kBlockSize), 0, 0,
n,
d_keys,
d_values,
table_size,
constants_2,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
} else if (num_hash_functions == 3) {
hipLaunchKernelGGL(( CuckooHash), dim3(ComputeGridDim(n)), dim3(kBlockSize), 0, 0,
n,
d_keys,
d_values,
table_size,
constants_3,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
} else if (num_hash_functions == 4) {
hipLaunchKernelGGL(( CuckooHash), dim3(ComputeGridDim(n)), dim3(kBlockSize), 0, 0,
n,
d_keys,
d_values,
table_size,
constants_4,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
} else {
hipLaunchKernelGGL(( CuckooHash), dim3(ComputeGridDim(n)), dim3(kBlockSize), 0, 0,
n,
d_keys,
d_values,
table_size,
constants_5,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
}
CUDA_CHECK_ERROR("Error occurred during hash table build.\n");
}
void CallHashRetrieve(const unsigned n_queries,
const unsigned num_hash_functions,
const unsigned *d_keys,
const unsigned table_size,
const Entry *d_contents,
const Functions<2> constants_2,
const Functions<3> constants_3,
const Functions<4> constants_4,
const Functions<5> constants_5,
const uint2 stash_constants,
const unsigned stash_count,
unsigned *d_values) {
unsigned *d_retrieval_probes = NULL;
#ifdef TRACK_ITERATIONS
CUDA_SAFE_CALL(hipMalloc((void**)&d_retrieval_probes, sizeof(unsigned) * n_queries));
#endif
if (num_hash_functions == 2) {
hipLaunchKernelGGL(( hash_retrieve), dim3(ComputeGridDim(n_queries)), dim3(kBlockSize), 0, 0,
n_queries,
d_keys,
table_size,
d_contents,
constants_2,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
} else if (num_hash_functions == 3) {
hipLaunchKernelGGL(( hash_retrieve), dim3(ComputeGridDim(n_queries)), dim3(kBlockSize), 0, 0,
n_queries,
d_keys,
table_size,
d_contents,
constants_3,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
} else if (num_hash_functions == 4) {
hipLaunchKernelGGL(( hash_retrieve), dim3(ComputeGridDim(n_queries)), dim3(kBlockSize), 0, 0,
n_queries,
d_keys,
table_size,
d_contents,
constants_4,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
} else {
hipLaunchKernelGGL(( hash_retrieve), dim3(ComputeGridDim(n_queries)), dim3(kBlockSize), 0, 0,
n_queries,
d_keys,
table_size,
d_contents,
constants_5,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
}
CUDA_CHECK_ERROR("Retrieval failed.\n");
#ifdef TRACK_ITERATIONS
OutputRetrievalStatistics(n_queries,
d_retrieval_probes,
num_hash_functions);
CUDA_SAFE_CALL(hipFree(d_retrieval_probes));
#endif
}
}; // namespace CUDAWrapper
}; // namespace CuckooHashing
| 3b9e8149c34e5669f1b084496dc608b917742e54.cu | // -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision:$
// $Date:$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file hash_table.cu
*
* @brief Hides all of the CUDA calls from the actual CPP file.
*/
#include <cuhash/cuda_util.h>
#include <cuhash/debugging.h>
#include <cuhash/definitions.h>
#include <cuhash/hash_table.cuh>
#include <cuda.h>
namespace cuhash {
namespace CUDAWrapper {
void ClearTable(const unsigned slots_in_table,
const Entry fill_value,
Entry *d_contents) {
clear_table<<<ComputeGridDim(slots_in_table), kBlockSize>>>
(slots_in_table, fill_value, d_contents);
CUDA_CHECK_ERROR("Error occurred during hash table clear.\n");
}
void CallCuckooHash(const unsigned n,
const unsigned num_hash_functions,
const unsigned *d_keys,
const unsigned *d_values,
const unsigned table_size,
const Functions<2> constants_2,
const Functions<3> constants_3,
const Functions<4> constants_4,
const Functions<5> constants_5,
const unsigned max_iterations,
Entry *d_contents,
uint2 stash_constants,
unsigned *d_stash_count,
unsigned *d_failures,
unsigned *d_iterations_taken) {
// Build the table.
cudaMemset(d_failures, 0, sizeof(unsigned));
if (num_hash_functions == 2) {
CuckooHash<<<ComputeGridDim(n), kBlockSize>>>
(n,
d_keys,
d_values,
table_size,
constants_2,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
} else if (num_hash_functions == 3) {
CuckooHash<<<ComputeGridDim(n), kBlockSize>>>
(n,
d_keys,
d_values,
table_size,
constants_3,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
} else if (num_hash_functions == 4) {
CuckooHash<<<ComputeGridDim(n), kBlockSize>>>
(n,
d_keys,
d_values,
table_size,
constants_4,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
} else {
CuckooHash<<<ComputeGridDim(n), kBlockSize>>>
(n,
d_keys,
d_values,
table_size,
constants_5,
max_iterations,
d_contents,
stash_constants,
d_stash_count,
d_failures,
d_iterations_taken);
}
CUDA_CHECK_ERROR("Error occurred during hash table build.\n");
}
void CallHashRetrieve(const unsigned n_queries,
const unsigned num_hash_functions,
const unsigned *d_keys,
const unsigned table_size,
const Entry *d_contents,
const Functions<2> constants_2,
const Functions<3> constants_3,
const Functions<4> constants_4,
const Functions<5> constants_5,
const uint2 stash_constants,
const unsigned stash_count,
unsigned *d_values) {
unsigned *d_retrieval_probes = NULL;
#ifdef TRACK_ITERATIONS
CUDA_SAFE_CALL(cudaMalloc((void**)&d_retrieval_probes, sizeof(unsigned) * n_queries));
#endif
if (num_hash_functions == 2) {
hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>
(n_queries,
d_keys,
table_size,
d_contents,
constants_2,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
} else if (num_hash_functions == 3) {
hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>
(n_queries,
d_keys,
table_size,
d_contents,
constants_3,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
} else if (num_hash_functions == 4) {
hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>
(n_queries,
d_keys,
table_size,
d_contents,
constants_4,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
} else {
hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>
(n_queries,
d_keys,
table_size,
d_contents,
constants_5,
stash_constants,
stash_count,
d_values,
d_retrieval_probes);
}
CUDA_CHECK_ERROR("Retrieval failed.\n");
#ifdef TRACK_ITERATIONS
OutputRetrievalStatistics(n_queries,
d_retrieval_probes,
num_hash_functions);
CUDA_SAFE_CALL(cudaFree(d_retrieval_probes));
#endif
}
}; // namespace CUDAWrapper
}; // namespace CuckooHashing
|
9936e0763276f43ac93d3c88ba6fce9c18659837.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "gpu_spatial.h"
using namespace std;
#define THREAD1 128
#define BLOCK1 64
#define THREAD2 64
#define BLOCK2 128
extern hipStream_t *stream;
__global__ void parseMBR(char *MBRRaw, int *MBR, int numOfCasts){
for (int i=0; i<numOfCasts; i++) {
int dstIndx = blockDim.x * blockIdx.x * numOfCasts + threadIdx.x + i * blockDim.x;
int srcIndx = dstIndx * 5;
MBR[dstIndx] = (MBRRaw[srcIndx+1] - '0') * 1000 + (MBRRaw[srcIndx + 2] - '0') * 100 +
(MBRRaw[srcIndx+3] - '0') * 10 + (MBRRaw[srcIndx + 4] - '0');
}
}
__global__ void parseVertices(char *verticesRaw, int *offsetRaw, int* numOfVerticesInApoly,
int *X, int*Y, int *offsetInGPUMem, int nr_polys, int numOfPolyBlock){
for (int i=0; i<numOfPolyBlock; i++) {
int polyIndx = blockIdx.x + i*gridDim.x;
if (polyIndx < nr_polys){
int numOfVertices = numOfVerticesInApoly[polyIndx];
int numOfVerticePerThread = (numOfVertices/blockDim.x + 1)*blockDim.x;
int offsetInGPU = offsetInGPUMem[polyIndx];
for (int j=0; j<numOfVerticePerThread; j++) {
int verticeIndx = threadIdx.x + j*blockDim.x;
if (verticeIndx < numOfVertices) {
int srcIndx = offsetRaw[polyIndx] + verticeIndx*11;
X[offsetInGPU + verticeIndx] = (verticesRaw[srcIndx+1] - '0') * 1000 + (verticesRaw[srcIndx + 2] - '0') * 100 +
(verticesRaw[srcIndx+3] - '0') * 10 + (verticesRaw[srcIndx + 4] - '0');
Y[offsetInGPU + verticeIndx] = (verticesRaw[srcIndx+6] - '0') * 1000 + (verticesRaw[srcIndx + 7] - '0') * 100 +
(verticesRaw[srcIndx+8] - '0') * 10 + (verticesRaw[srcIndx + 9] - '0');
}
}
}
}
}
struct timespec diff(struct timespec start, struct timespec end){
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int getSizeBasedGPUConf1(int size){
return (size/(THREAD1*BLOCK1) + 1)*THREAD1*BLOCK1;
}
int getSizeBasedGPUConf2(int size){
return (size/THREAD2 + 1)*THREAD2;
}
int alloc_poly_array(poly_array_t *polys, const int nr_polys, const int nr_vertices)
{
int size_mbrs = nr_polys * sizeof(mbr_t);
int size_offsets = (nr_polys + 1) * sizeof(int);
int size_x = nr_vertices * sizeof(int);
int size_y = nr_vertices * sizeof(int);
polys->mbrs = (mbr_t *)malloc(size_mbrs + size_offsets + size_x + size_y);
if(!polys->mbrs) {
fprintf(stderr, "failed to allocate memory for poly array\n");
exit(1);
}
polys->nr_polys = nr_polys;
polys->nr_vertices = nr_vertices;
polys->offsets = (int *)((char *)(polys->mbrs) + size_mbrs);
polys->x = (int *)((char *)(polys->offsets) + size_offsets);
polys->y = (int *)((char *)(polys->x) + size_x);
return 0;
}
poly_array_t *gpu_parse(int dno, char *file_name)
{
char *MBRRaw, *verticesRaw, numOfVerticesBuf[4];
char *dev_MBRRaw, *dev_verticesRaw;
int *offset, *offsetRaw, *numOfVerticesInApoly;
int *dev_offsetRaw, *dev_numOfVerticesInApoly;
int *MBR, *X, *Y, *offsetInGPUMem;
int *dev_MBR, *dev_X, *dev_Y, *dev_offsetInGPUMem;
static const int parse_buf_size = 8192;
char readbuf[parse_buf_size];
fstream polyFile;
int nr_polys, nr_vertices;
polyFile.open(file_name, fstream::in | fstream::binary);
polyFile.getline(readbuf, parse_buf_size);
sscanf(readbuf, "%d, %d\n", &nr_polys, &nr_vertices);
// cout <<"num of polygon: " <<nr_polys <<" , number of vertices:" <<nr_vertices<<endl;
MBRRaw = (char *)malloc(getSizeBasedGPUConf1(20*nr_polys*sizeof(char)));
MBR = (int *)malloc(getSizeBasedGPUConf1(4*nr_polys*sizeof(int)));
hipSetDevice(dno);
hipMalloc((void**)&dev_MBRRaw, getSizeBasedGPUConf1(20*nr_polys*sizeof(char)));
hipMalloc((void**)&dev_MBR, getSizeBasedGPUConf1(4*nr_polys*sizeof(int)));
verticesRaw = (char *)malloc(nr_vertices * 11);
offset = (int *)malloc(nr_polys*sizeof(int));
offsetRaw = (int *)malloc(nr_polys*sizeof(int));
numOfVerticesInApoly = (int *)malloc(nr_polys*sizeof(int));
offsetInGPUMem = (int *)malloc(nr_polys*sizeof(int));
hipMalloc((void**)&dev_verticesRaw, nr_vertices * 11);
hipMalloc((void**)&dev_offsetRaw, nr_polys*sizeof(int));
hipMalloc((void**)&dev_numOfVerticesInApoly, nr_polys*sizeof(int));
hipMalloc((void**)&dev_offsetInGPUMem, nr_polys*sizeof(int));
int rawBufferIndx = 0;
int vertexIndx = 0;
int numVertices = 0;
int numVerticesInGPUMem = 0;
/* process first vertex line*/
polyFile.getline(readbuf, parse_buf_size);
memcpy(numOfVerticesBuf, readbuf, 4);
memcpy(MBRRaw, readbuf + 5, 20);
offset[0] = vertexIndx;
offsetRaw[0] = rawBufferIndx;
numVertices = atoi(numOfVerticesBuf);
offsetInGPUMem[0] = numVerticesInGPUMem;
numVerticesInGPUMem = (numVertices/THREAD2 + 1) * THREAD2;
numOfVerticesInApoly[0] = numVertices;
memcpy(verticesRaw, readbuf + 26, numVertices * 11);
vertexIndx += numVertices;
rawBufferIndx += strlen(readbuf)-26;
//cout<<atoi(numOfVerticesBuf)<<endl;
/* each iteration process a line */
for (int i=1; i<nr_polys; i++) {
polyFile.getline(readbuf, parse_buf_size);
memcpy(numOfVerticesBuf, readbuf, 4);
memcpy(MBRRaw + 20*i, readbuf + 5, 20);
offset[i] = vertexIndx;
offsetRaw[i] = rawBufferIndx;
numVertices = atoi(numOfVerticesBuf);
offsetInGPUMem[i] = numVerticesInGPUMem;
numVerticesInGPUMem += (numVertices/THREAD2 + 1) * THREAD2;
numOfVerticesInApoly[i] = numVertices;
memcpy(verticesRaw + rawBufferIndx, readbuf + 26, numVertices * 11);
vertexIndx += numVertices;
rawBufferIndx += strlen(readbuf)-26;
//break;
//cout<<atoi(numOfVerticesBuf)<<endl;
}
//cout<<getSizeBasedGPUConf2(nr_vertices*sizeof(int))<<" "<<(nr_vertices*sizeof(int)/THREAD2 + 1 )*THREAD2
// <<" "<<numVerticesInGPUMem<<" "<<nr_polys/BLOCK2 + 1<<" "<<rawBufferIndx<<endl;
//cout<<verticesRaw + offsetRaw[nr_polys-1]<<endl;
//cout<<verticesRaw<<endl;
X = (int *)malloc(nr_vertices * sizeof(int));
Y = (int *)malloc(nr_vertices * sizeof(int));
hipMalloc((void**)&dev_X, nr_vertices * sizeof(int));
hipMalloc((void**)&dev_Y, nr_vertices * sizeof(int));
//cout<<MBRRaw<<endl<<endl;
/* for debugging
cout<<offset[0]<<" "<<offset[1]<<endl;
cout<<offsetRaw[0]<<" "<<offsetRaw[1]<<endl;
cout<<MBRRaw<<endl<<endl;
cout<<verticesRaw<<endl;
*/
//cout<<"number per THREAD1 "<<getSizeBasedGPUConf1(4*nr_polys*sizeof(int))/(BLOCK1*THREAD1)<<endl;
hipMemcpy(dev_MBRRaw, MBRRaw, getSizeBasedGPUConf1(20*nr_polys*sizeof(char)), hipMemcpyHostToDevice);
hipEvent_t start, stop;
// float elapsedTime;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( parseMBR), dim3(BLOCK1), dim3(THREAD1), 0, stream[dno], dev_MBRRaw, dev_MBR, getSizeBasedGPUConf1(nr_polys*4)/(BLOCK1*THREAD1));
hipDeviceSynchronize();
// hipEventRecord( stop, 0 );
// hipEventSynchronize( stop );
// hipEventElapsedTime( &elapsedTime, start, stop );
// cout<<"Time:" <<elapsedTime<<"ms"<<endl;
// hipEventDestroy( start );
// hipEventDestroy( stop );
hipMemcpy(MBR, dev_MBR, getSizeBasedGPUConf1(4*nr_polys*sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(dev_verticesRaw, verticesRaw, nr_vertices * 11, hipMemcpyHostToDevice);
hipMemcpy(dev_offsetRaw, offsetRaw, nr_polys*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_numOfVerticesInApoly, numOfVerticesInApoly, nr_polys*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_offsetInGPUMem, offset, nr_polys*sizeof(int), hipMemcpyHostToDevice);
// hipEventCreate( &start );
// hipEventCreate( &stop );
// hipEventRecord( start, 0 );
hipLaunchKernelGGL(( parseVertices), dim3(BLOCK2), dim3(THREAD2), 0, stream[dno], dev_verticesRaw, dev_offsetRaw, dev_numOfVerticesInApoly,
dev_X, dev_Y, dev_offsetInGPUMem, nr_polys, nr_polys/BLOCK2 + 1);
// hipDeviceSynchronize();
// hipEventRecord( stop, 0 );
// hipEventSynchronize( stop );
// hipEventElapsedTime( &elapsedTime, start, stop );
// cout<<"Time:" <<elapsedTime<<"ms"<<endl;
hipMemcpy(X, dev_X, nr_vertices * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(Y, dev_Y, nr_vertices * sizeof(int), hipMemcpyDeviceToHost);
poly_array_t *polys = (poly_array_t *)malloc(sizeof(poly_array_t));
alloc_poly_array(polys, nr_polys, nr_vertices);
memcpy(polys->mbrs, MBR, sizeof(mbr_t) * nr_polys);
memcpy(polys->offsets, offset, sizeof(int) * nr_polys);
polys->offsets[nr_polys] = nr_vertices;
memcpy(polys->x, X, sizeof(int) * nr_vertices);
memcpy(polys->y, Y, sizeof(int) * nr_vertices);
//int ret_X[nr_vertices], ret_Y[nr_vertices];
//for (int i=0; i<nr_polys; i++) {
// memcpy(&ret_X[offset[i]], &X[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int));
// memcpy(&ret_Y[offset[i]], &Y[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int));
//}
//cout<<" "<<X[nr_vertices - 1]<<" "<<Y[nr_vertices - 1];
// for (int i=0; i<nr_polys; i++) {
// cout<<numOfVerticesInApoly[i]<<" "<<MBR[4*i]<<" "<<MBR[4*i+1]<<" "<<MBR[4*i+2]<<" "<<MBR[4*i+3];
// for (int j=0; j<numOfVerticesInApoly[i]; j++){
// cout<<" "<<X[offset[i]+j]<<" "<<Y[offset[i]+j];
// }
// cout<<endl;
// }
free(MBRRaw);
free(verticesRaw);
free(offset);
free(offsetRaw);
free(numOfVerticesInApoly);
free(offsetInGPUMem);
free(MBR);
free(X);
free(Y);
hipFree(dev_MBRRaw);
hipFree(dev_MBR);
hipFree(dev_verticesRaw);
hipFree(dev_offsetRaw);
hipFree(dev_numOfVerticesInApoly);
hipFree(dev_offsetInGPUMem);
hipFree(dev_X);
hipFree(dev_Y);
return polys;
}
| 9936e0763276f43ac93d3c88ba6fce9c18659837.cu | #include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "gpu_spatial.h"
using namespace std;
#define THREAD1 128
#define BLOCK1 64
#define THREAD2 64
#define BLOCK2 128
extern cudaStream_t *stream;
__global__ void parseMBR(char *MBRRaw, int *MBR, int numOfCasts){
for (int i=0; i<numOfCasts; i++) {
int dstIndx = blockDim.x * blockIdx.x * numOfCasts + threadIdx.x + i * blockDim.x;
int srcIndx = dstIndx * 5;
MBR[dstIndx] = (MBRRaw[srcIndx+1] - '0') * 1000 + (MBRRaw[srcIndx + 2] - '0') * 100 +
(MBRRaw[srcIndx+3] - '0') * 10 + (MBRRaw[srcIndx + 4] - '0');
}
}
__global__ void parseVertices(char *verticesRaw, int *offsetRaw, int* numOfVerticesInApoly,
int *X, int*Y, int *offsetInGPUMem, int nr_polys, int numOfPolyBlock){
for (int i=0; i<numOfPolyBlock; i++) {
int polyIndx = blockIdx.x + i*gridDim.x;
if (polyIndx < nr_polys){
int numOfVertices = numOfVerticesInApoly[polyIndx];
int numOfVerticePerThread = (numOfVertices/blockDim.x + 1)*blockDim.x;
int offsetInGPU = offsetInGPUMem[polyIndx];
for (int j=0; j<numOfVerticePerThread; j++) {
int verticeIndx = threadIdx.x + j*blockDim.x;
if (verticeIndx < numOfVertices) {
int srcIndx = offsetRaw[polyIndx] + verticeIndx*11;
X[offsetInGPU + verticeIndx] = (verticesRaw[srcIndx+1] - '0') * 1000 + (verticesRaw[srcIndx + 2] - '0') * 100 +
(verticesRaw[srcIndx+3] - '0') * 10 + (verticesRaw[srcIndx + 4] - '0');
Y[offsetInGPU + verticeIndx] = (verticesRaw[srcIndx+6] - '0') * 1000 + (verticesRaw[srcIndx + 7] - '0') * 100 +
(verticesRaw[srcIndx+8] - '0') * 10 + (verticesRaw[srcIndx + 9] - '0');
}
}
}
}
}
struct timespec diff(struct timespec start, struct timespec end){
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int getSizeBasedGPUConf1(int size){
return (size/(THREAD1*BLOCK1) + 1)*THREAD1*BLOCK1;
}
int getSizeBasedGPUConf2(int size){
return (size/THREAD2 + 1)*THREAD2;
}
int alloc_poly_array(poly_array_t *polys, const int nr_polys, const int nr_vertices)
{
int size_mbrs = nr_polys * sizeof(mbr_t);
int size_offsets = (nr_polys + 1) * sizeof(int);
int size_x = nr_vertices * sizeof(int);
int size_y = nr_vertices * sizeof(int);
polys->mbrs = (mbr_t *)malloc(size_mbrs + size_offsets + size_x + size_y);
if(!polys->mbrs) {
fprintf(stderr, "failed to allocate memory for poly array\n");
exit(1);
}
polys->nr_polys = nr_polys;
polys->nr_vertices = nr_vertices;
polys->offsets = (int *)((char *)(polys->mbrs) + size_mbrs);
polys->x = (int *)((char *)(polys->offsets) + size_offsets);
polys->y = (int *)((char *)(polys->x) + size_x);
return 0;
}
poly_array_t *gpu_parse(int dno, char *file_name)
{
char *MBRRaw, *verticesRaw, numOfVerticesBuf[4];
char *dev_MBRRaw, *dev_verticesRaw;
int *offset, *offsetRaw, *numOfVerticesInApoly;
int *dev_offsetRaw, *dev_numOfVerticesInApoly;
int *MBR, *X, *Y, *offsetInGPUMem;
int *dev_MBR, *dev_X, *dev_Y, *dev_offsetInGPUMem;
static const int parse_buf_size = 8192;
char readbuf[parse_buf_size];
fstream polyFile;
int nr_polys, nr_vertices;
polyFile.open(file_name, fstream::in | fstream::binary);
polyFile.getline(readbuf, parse_buf_size);
sscanf(readbuf, "%d, %d\n", &nr_polys, &nr_vertices);
// cout <<"num of polygon: " <<nr_polys <<" , number of vertices:" <<nr_vertices<<endl;
MBRRaw = (char *)malloc(getSizeBasedGPUConf1(20*nr_polys*sizeof(char)));
MBR = (int *)malloc(getSizeBasedGPUConf1(4*nr_polys*sizeof(int)));
cudaSetDevice(dno);
cudaMalloc((void**)&dev_MBRRaw, getSizeBasedGPUConf1(20*nr_polys*sizeof(char)));
cudaMalloc((void**)&dev_MBR, getSizeBasedGPUConf1(4*nr_polys*sizeof(int)));
verticesRaw = (char *)malloc(nr_vertices * 11);
offset = (int *)malloc(nr_polys*sizeof(int));
offsetRaw = (int *)malloc(nr_polys*sizeof(int));
numOfVerticesInApoly = (int *)malloc(nr_polys*sizeof(int));
offsetInGPUMem = (int *)malloc(nr_polys*sizeof(int));
cudaMalloc((void**)&dev_verticesRaw, nr_vertices * 11);
cudaMalloc((void**)&dev_offsetRaw, nr_polys*sizeof(int));
cudaMalloc((void**)&dev_numOfVerticesInApoly, nr_polys*sizeof(int));
cudaMalloc((void**)&dev_offsetInGPUMem, nr_polys*sizeof(int));
int rawBufferIndx = 0;
int vertexIndx = 0;
int numVertices = 0;
int numVerticesInGPUMem = 0;
/* process first vertex line*/
polyFile.getline(readbuf, parse_buf_size);
memcpy(numOfVerticesBuf, readbuf, 4);
memcpy(MBRRaw, readbuf + 5, 20);
offset[0] = vertexIndx;
offsetRaw[0] = rawBufferIndx;
numVertices = atoi(numOfVerticesBuf);
offsetInGPUMem[0] = numVerticesInGPUMem;
numVerticesInGPUMem = (numVertices/THREAD2 + 1) * THREAD2;
numOfVerticesInApoly[0] = numVertices;
memcpy(verticesRaw, readbuf + 26, numVertices * 11);
vertexIndx += numVertices;
rawBufferIndx += strlen(readbuf)-26;
//cout<<atoi(numOfVerticesBuf)<<endl;
/* each iteration process a line */
for (int i=1; i<nr_polys; i++) {
polyFile.getline(readbuf, parse_buf_size);
memcpy(numOfVerticesBuf, readbuf, 4);
memcpy(MBRRaw + 20*i, readbuf + 5, 20);
offset[i] = vertexIndx;
offsetRaw[i] = rawBufferIndx;
numVertices = atoi(numOfVerticesBuf);
offsetInGPUMem[i] = numVerticesInGPUMem;
numVerticesInGPUMem += (numVertices/THREAD2 + 1) * THREAD2;
numOfVerticesInApoly[i] = numVertices;
memcpy(verticesRaw + rawBufferIndx, readbuf + 26, numVertices * 11);
vertexIndx += numVertices;
rawBufferIndx += strlen(readbuf)-26;
//break;
//cout<<atoi(numOfVerticesBuf)<<endl;
}
//cout<<getSizeBasedGPUConf2(nr_vertices*sizeof(int))<<" "<<(nr_vertices*sizeof(int)/THREAD2 + 1 )*THREAD2
// <<" "<<numVerticesInGPUMem<<" "<<nr_polys/BLOCK2 + 1<<" "<<rawBufferIndx<<endl;
//cout<<verticesRaw + offsetRaw[nr_polys-1]<<endl;
//cout<<verticesRaw<<endl;
X = (int *)malloc(nr_vertices * sizeof(int));
Y = (int *)malloc(nr_vertices * sizeof(int));
cudaMalloc((void**)&dev_X, nr_vertices * sizeof(int));
cudaMalloc((void**)&dev_Y, nr_vertices * sizeof(int));
//cout<<MBRRaw<<endl<<endl;
/* for debugging
cout<<offset[0]<<" "<<offset[1]<<endl;
cout<<offsetRaw[0]<<" "<<offsetRaw[1]<<endl;
cout<<MBRRaw<<endl<<endl;
cout<<verticesRaw<<endl;
*/
//cout<<"number per THREAD1 "<<getSizeBasedGPUConf1(4*nr_polys*sizeof(int))/(BLOCK1*THREAD1)<<endl;
cudaMemcpy(dev_MBRRaw, MBRRaw, getSizeBasedGPUConf1(20*nr_polys*sizeof(char)), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
// float elapsedTime;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
parseMBR<<<BLOCK1, THREAD1, 0, stream[dno]>>>( dev_MBRRaw, dev_MBR, getSizeBasedGPUConf1(nr_polys*4)/(BLOCK1*THREAD1));
cudaThreadSynchronize();
// cudaEventRecord( stop, 0 );
// cudaEventSynchronize( stop );
// cudaEventElapsedTime( &elapsedTime, start, stop );
// cout<<"Time:" <<elapsedTime<<"ms"<<endl;
// cudaEventDestroy( start );
// cudaEventDestroy( stop );
cudaMemcpy(MBR, dev_MBR, getSizeBasedGPUConf1(4*nr_polys*sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(dev_verticesRaw, verticesRaw, nr_vertices * 11, cudaMemcpyHostToDevice);
cudaMemcpy(dev_offsetRaw, offsetRaw, nr_polys*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_numOfVerticesInApoly, numOfVerticesInApoly, nr_polys*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_offsetInGPUMem, offset, nr_polys*sizeof(int), cudaMemcpyHostToDevice);
// cudaEventCreate( &start );
// cudaEventCreate( &stop );
// cudaEventRecord( start, 0 );
parseVertices<<<BLOCK2, THREAD2, 0, stream[dno]>>>(dev_verticesRaw, dev_offsetRaw, dev_numOfVerticesInApoly,
dev_X, dev_Y, dev_offsetInGPUMem, nr_polys, nr_polys/BLOCK2 + 1);
// cudaThreadSynchronize();
// cudaEventRecord( stop, 0 );
// cudaEventSynchronize( stop );
// cudaEventElapsedTime( &elapsedTime, start, stop );
// cout<<"Time:" <<elapsedTime<<"ms"<<endl;
cudaMemcpy(X, dev_X, nr_vertices * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Y, dev_Y, nr_vertices * sizeof(int), cudaMemcpyDeviceToHost);
poly_array_t *polys = (poly_array_t *)malloc(sizeof(poly_array_t));
alloc_poly_array(polys, nr_polys, nr_vertices);
memcpy(polys->mbrs, MBR, sizeof(mbr_t) * nr_polys);
memcpy(polys->offsets, offset, sizeof(int) * nr_polys);
polys->offsets[nr_polys] = nr_vertices;
memcpy(polys->x, X, sizeof(int) * nr_vertices);
memcpy(polys->y, Y, sizeof(int) * nr_vertices);
//int ret_X[nr_vertices], ret_Y[nr_vertices];
//for (int i=0; i<nr_polys; i++) {
// memcpy(&ret_X[offset[i]], &X[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int));
// memcpy(&ret_Y[offset[i]], &Y[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int));
//}
//cout<<" "<<X[nr_vertices - 1]<<" "<<Y[nr_vertices - 1];
// for (int i=0; i<nr_polys; i++) {
// cout<<numOfVerticesInApoly[i]<<" "<<MBR[4*i]<<" "<<MBR[4*i+1]<<" "<<MBR[4*i+2]<<" "<<MBR[4*i+3];
// for (int j=0; j<numOfVerticesInApoly[i]; j++){
// cout<<" "<<X[offset[i]+j]<<" "<<Y[offset[i]+j];
// }
// cout<<endl;
// }
free(MBRRaw);
free(verticesRaw);
free(offset);
free(offsetRaw);
free(numOfVerticesInApoly);
free(offsetInGPUMem);
free(MBR);
free(X);
free(Y);
cudaFree(dev_MBRRaw);
cudaFree(dev_MBR);
cudaFree(dev_verticesRaw);
cudaFree(dev_offsetRaw);
cudaFree(dev_numOfVerticesInApoly);
cudaFree(dev_offsetInGPUMem);
cudaFree(dev_X);
cudaFree(dev_Y);
return polys;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.